Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 3699B1739B for ; Mon, 20 Apr 2015 18:53:32 +0000 (UTC) Received: (qmail 82319 invoked by uid 500); 20 Apr 2015 18:53:10 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 82122 invoked by uid 500); 20 Apr 2015 18:53:10 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 80906 invoked by uid 99); 20 Apr 2015 18:53:09 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 20 Apr 2015 18:53:09 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id A3E5FE095C; Mon, 20 Apr 2015 18:53:09 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: zhz@apache.org To: common-commits@hadoop.apache.org Date: Mon, 20 Apr 2015 18:53:43 -0000 Message-Id: <998b5d64832f4f3d876ae2bb312f0116@git.apache.org> In-Reply-To: <10c98c8bd8ed493f93bf8419d6cd7b71@git.apache.org> References: <10c98c8bd8ed493f93bf8419d6cd7b71@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [36/50] hadoop git commit: HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all ECSchemas loaded in Namenode. (Contributed by Vinayakumar B) HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all ECSchemas loaded in Namenode. (Contributed by Vinayakumar B) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/414c2ba1 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/414c2ba1 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/414c2ba1 Branch: refs/heads/HDFS-7285 Commit: 414c2ba1d2921c459b7b860e49f5e1c55dffe0c1 Parents: cb71eec Author: Vinayakumar B Authored: Fri Apr 10 15:07:32 2015 +0530 Committer: Zhe Zhang Committed: Mon Apr 20 10:28:38 2015 -0700 ---------------------------------------------------------------------- .../apache/hadoop/io/erasurecode/ECSchema.java | 4 +- .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 5 +- .../java/org/apache/hadoop/hdfs/DFSClient.java | 11 ++++ .../hadoop/hdfs/protocol/ClientProtocol.java | 10 ++++ ...tNamenodeProtocolServerSideTranslatorPB.java | 19 +++++++ .../ClientNamenodeProtocolTranslatorPB.java | 26 ++++++++- .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 5 +- .../hdfs/server/namenode/FSNamesystem.java | 17 ++++++ .../hdfs/server/namenode/NameNodeRpcServer.java | 9 +++- .../src/main/proto/ClientNamenodeProtocol.proto | 9 ++++ .../hadoop-hdfs/src/main/proto/hdfs.proto | 3 +- .../org/apache/hadoop/hdfs/TestECSchemas.java | 57 ++++++++++++++++++++ 12 files changed, 164 insertions(+), 11 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java index 8c3310e..32077f6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java @@ -123,12 +123,12 @@ public final class ECSchema { this.chunkSize = DEFAULT_CHUNK_SIZE; try { - if (options.containsKey(CHUNK_SIZE_KEY)) { + if (this.options.containsKey(CHUNK_SIZE_KEY)) { this.chunkSize = Integer.parseInt(options.get(CHUNK_SIZE_KEY)); } } catch (NumberFormatException e) { throw new IllegalArgumentException("Option value " + - options.get(CHUNK_SIZE_KEY) + " for " + CHUNK_SIZE_KEY + + this.options.get(CHUNK_SIZE_KEY) + " for " + CHUNK_SIZE_KEY + " is found. It should be an integer"); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt index 753795a..5250dfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -58,4 +58,7 @@ HDFS-8104. Make hard-coded values consistent with the system default schema first before remove them. (Kai Zheng) - HDFS-7889. Subclass DFSOutputStream to support writing striping layout files. (Li Bo via Kai Zheng) \ No newline at end of file + HDFS-7889. Subclass DFSOutputStream to support writing striping layout files. (Li Bo via Kai Zheng) + + HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all + ECSchemas loaded in Namenode. (vinayakumarb) \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 6627124..6a4b3d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -163,6 +163,7 @@ import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.io.retry.LossyRetryInvocationHandler; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; @@ -3100,6 +3101,16 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } } + public ECSchema[] getECSchemas() throws IOException { + checkOpen(); + TraceScope scope = Trace.startSpan("getECSchemas", traceSampler); + try { + return namenode.getECSchemas(); + } finally { + scope.close(); + } + } + public DFSInotifyEventInputStream getInotifyEventStream() throws IOException { return new DFSInotifyEventInputStream(traceSampler, namenode); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 45d92f3..7f5ac49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.io.retry.AtMostOnce; import org.apache.hadoop.io.retry.Idempotent; import org.apache.hadoop.security.AccessControlException; @@ -1474,4 +1475,13 @@ public interface ClientProtocol { */ @Idempotent public ECInfo getErasureCodingInfo(String src) throws IOException; + + /** + * Gets list of ECSchemas loaded in Namenode + * + * @return Returns the list of ECSchemas loaded at Namenode + * @throws IOException + */ + @Idempotent + public ECSchema[] getECSchemas() throws IOException; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 7569819..79ab939 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -107,6 +107,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDat import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetErasureCodingInfoRequestProto; @@ -218,6 +220,7 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; @@ -1530,4 +1533,20 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements throw new ServiceException(e); } } + + @Override + public GetECSchemasResponseProto getECSchemas(RpcController controller, + GetECSchemasRequestProto request) throws ServiceException { + try { + ECSchema[] ecSchemas = server.getECSchemas(); + GetECSchemasResponseProto.Builder resBuilder = GetECSchemasResponseProto + .newBuilder(); + for (ECSchema ecSchema : ecSchemas) { + resBuilder.addSchemas(PBHelper.convertECSchema(ecSchema)); + } + return resBuilder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 568da68..0211522 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -107,6 +107,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDat import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetErasureCodingInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetErasureCodingInfoResponseProto; @@ -165,10 +167,11 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateErasureCodingZoneRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateErasureCodingZoneResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos; +import org.apache.hadoop.hdfs.protocol.proto.*; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto; @@ -180,6 +183,7 @@ import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolTranslator; @@ -237,6 +241,10 @@ public class ClientNamenodeProtocolTranslatorPB implements VOID_GET_STORAGE_POLICIES_REQUEST = GetStoragePoliciesRequestProto.newBuilder().build(); + private final static GetECSchemasRequestProto + VOID_GET_ECSCHEMAS_REQUEST = GetECSchemasRequestProto + .newBuilder().build(); + public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) { rpcProxy = proxy; } @@ -1550,4 +1558,20 @@ public class ClientNamenodeProtocolTranslatorPB implements throw ProtobufHelper.getRemoteException(e); } } + + @Override + public ECSchema[] getECSchemas() throws IOException { + try { + GetECSchemasResponseProto response = rpcProxy.getECSchemas(null, + VOID_GET_ECSCHEMAS_REQUEST); + ECSchema[] schemas = new ECSchema[response.getSchemasCount()]; + int i = 0; + for (ECSchemaProto schemaProto : response.getSchemasList()) { + schemas[i++] = PBHelper.convertECSchema(schemaProto); + } + return schemas; + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 9aff943..dd86e9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -3121,8 +3121,6 @@ public class PBHelper { for (ECSchemaOptionEntryProto option : optionsList) { options.put(option.getKey(), option.getValue()); } - // include chunksize in options. - options.put(ECSchema.CHUNK_SIZE_KEY, String.valueOf(schema.getChunkSize())); return new ECSchema(schema.getSchemaName(), schema.getCodecName(), schema.getDataUnits(), schema.getParityUnits(), options); } @@ -3132,8 +3130,7 @@ public class PBHelper { .setSchemaName(schema.getSchemaName()) .setCodecName(schema.getCodecName()) .setDataUnits(schema.getNumDataUnits()) - .setParityUnits(schema.getNumParityUnits()) - .setChunkSize(schema.getChunkSize()); + .setParityUnits(schema.getNumParityUnits()); Set> entrySet = schema.getOptions().entrySet(); for (Entry entry : entrySet) { builder.addOptions(ECSchemaOptionEntryProto.newBuilder() http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 57bf888..097f3b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -8173,6 +8173,23 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, return null; } + /** + * Get available ECSchemas + */ + ECSchema[] getECSchemas() throws IOException { + checkOperation(OperationCategory.READ); + waitForLoadingFSImage(); + readLock(); + try { + checkOperation(OperationCategory.READ); + // TODO HDFS-7866 Need to return all schemas maintained by Namenode + ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); + return new ECSchema[] { defaultSchema }; + } finally { + readUnlock(); + } + } + void setXAttr(String src, XAttr xAttr, EnumSet flag, boolean logRetryCache) throws IOException { http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 712fa31..f398ac9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -142,6 +142,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RetryCache; @@ -2051,9 +2052,15 @@ class NameNodeRpcServer implements NamenodeProtocols { nn.spanReceiverHost.removeSpanReceiver(id); } - @Override // ClientNameNodeProtocol + @Override // ClientProtocol public ECInfo getErasureCodingInfo(String src) throws IOException { checkNNStartup(); return namesystem.getErasureCodingInfo(src); } + + @Override // ClientProtocol + public ECSchema[] getECSchemas() throws IOException { + checkNNStartup(); + return namesystem.getECSchemas(); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 9488aed..3389a22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -729,6 +729,13 @@ message GetErasureCodingInfoResponseProto { optional ECInfoProto ECInfo = 1; } +message GetECSchemasRequestProto { // void request +} + +message GetECSchemasResponseProto { + repeated ECSchemaProto schemas = 1; +} + service ClientNamenodeProtocol { rpc getBlockLocations(GetBlockLocationsRequestProto) returns(GetBlockLocationsResponseProto); @@ -879,4 +886,6 @@ service ClientNamenodeProtocol { returns(GetEditsFromTxidResponseProto); rpc getErasureCodingInfo(GetErasureCodingInfoRequestProto) returns(GetErasureCodingInfoResponseProto); + rpc getECSchemas(GetECSchemasRequestProto) + returns(GetECSchemasResponseProto); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 1314ea0..0507538 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -637,8 +637,7 @@ message ECSchemaProto { required string codecName = 2; required uint32 dataUnits = 3; required uint32 parityUnits = 4; - required uint32 chunkSize = 5; - repeated ECSchemaOptionEntryProto options = 6; + repeated ECSchemaOptionEntryProto options = 5; } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestECSchemas.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestECSchemas.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestECSchemas.java new file mode 100644 index 0000000..07e1359 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestECSchemas.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import static org.junit.Assert.*; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.namenode.ECSchemaManager; +import org.apache.hadoop.io.erasurecode.ECSchema; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestECSchemas { + private MiniDFSCluster cluster; + + @Before + public void before() throws IOException { + cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0) + .build(); + cluster.waitActive(); + } + + @After + public void after() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testGetECSchemas() throws Exception { + ECSchema[] ecSchemas = cluster.getFileSystem().getClient().getECSchemas(); + // TODO update assertion after HDFS-7866 + assertNotNull(ecSchemas); + assertEquals("Should have only one ecSchema", 1, ecSchemas.length); + assertEquals("Returned schemas should have only default schema", + ECSchemaManager.getSystemDefaultSchema(), ecSchemas[0]); + } +}