Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 79C05200D39 for ; Sat, 7 Oct 2017 04:06:11 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 7842F160BDE; Sat, 7 Oct 2017 02:06:11 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 23A871609E1 for ; Sat, 7 Oct 2017 04:06:08 +0200 (CEST) Received: (qmail 82065 invoked by uid 500); 7 Oct 2017 02:06:04 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 79222 invoked by uid 99); 7 Oct 2017 02:06:02 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Sat, 07 Oct 2017 02:06:02 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 52E89F5D11; Sat, 7 Oct 2017 02:06:00 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: inigoiri@apache.org To: common-commits@hadoop.apache.org Date: Sat, 07 Oct 2017 02:06:01 -0000 Message-Id: In-Reply-To: <0cd2adcb5e6d4e34b8c054b3370736fe@git.apache.org> References: <0cd2adcb5e6d4e34b8c054b3370736fe@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [02/31] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri. archived-at: Sat, 07 Oct 2017 02:06:11 -0000 HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri. (cherry picked from commit 6821e801724ac38e9737538b2164c9ae88792282) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2761bbc9 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2761bbc9 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2761bbc9 Branch: refs/heads/trunk Commit: 2761bbc91a7b0a36c42b1b6569c5ecd4f236281b Parents: 2b08a1f Author: Inigo Authored: Tue Mar 28 14:30:59 2017 -0700 Committer: Inigo Goiri Committed: Fri Oct 6 18:50:44 2017 -0700 ---------------------------------------------------------------------- .../hadoop-hdfs/src/main/bin/hdfs | 5 + .../hadoop-hdfs/src/main/bin/hdfs.cmd | 8 +- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 17 + .../resolver/ActiveNamenodeResolver.java | 117 +++ .../resolver/FederationNamenodeContext.java | 87 +++ .../FederationNamenodeServiceState.java | 46 ++ .../resolver/FederationNamespaceInfo.java | 99 +++ .../resolver/FileSubclusterResolver.java | 75 ++ .../resolver/NamenodePriorityComparator.java | 63 ++ .../resolver/NamenodeStatusReport.java | 195 +++++ .../federation/resolver/PathLocation.java | 122 +++ .../federation/resolver/RemoteLocation.java | 74 ++ .../federation/resolver/package-info.java | 41 + .../federation/router/FederationUtil.java | 117 +++ .../router/RemoteLocationContext.java | 38 + .../hdfs/server/federation/router/Router.java | 263 +++++++ .../federation/router/RouterRpcServer.java | 102 +++ .../server/federation/router/package-info.java | 31 + .../federation/store/StateStoreService.java | 77 ++ .../server/federation/store/package-info.java | 62 ++ .../src/main/resources/hdfs-default.xml | 16 + .../server/federation/FederationTestUtils.java | 233 ++++++ .../hdfs/server/federation/MockResolver.java | 290 +++++++ .../server/federation/RouterConfigBuilder.java | 40 + .../server/federation/RouterDFSCluster.java | 767 +++++++++++++++++++ .../server/federation/router/TestRouter.java | 96 +++ 26 files changed, 3080 insertions(+), 1 deletion(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index e6405b5..b1f44a4 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -57,6 +57,7 @@ function hadoop_usage hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an fsimage" hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer to a legacy fsimage" hadoop_add_subcommand "portmap" daemon "run a portmap service" + hadoop_add_subcommand "router" daemon "run the DFS router" hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode" hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot" hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage policies" @@ -176,6 +177,10 @@ function hdfscmd_case HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap ;; + router) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.Router' + ;; secondarynamenode) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd index 2181e47..b9853d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd @@ -59,7 +59,7 @@ if "%1" == "--loglevel" ( ) ) - set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto debug + set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto router debug for %%i in ( %hdfscommands% ) do ( if %hdfs-command% == %%i set hdfscommand=true ) @@ -179,6 +179,11 @@ goto :eof set CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin goto :eof +:router + set CLASS=org.apache.hadoop.hdfs.server.federation.router.Router + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS% + goto :eof + :debug set CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin goto :eof @@ -219,6 +224,7 @@ goto :eof @echo secondarynamenode run the DFS secondary namenode @echo namenode run the DFS namenode @echo journalnode run the DFS journalnode + @echo router run the DFS router @echo zkfc run the ZK Failover Controller daemon @echo datanode run a DFS datanode @echo dfsadmin run a DFS admin client http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index f2f8730..92cb0ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -1112,6 +1112,23 @@ public class DFSConfigKeys extends CommonConfigurationKeys { "dfs.use.dfs.network.topology"; public static final boolean DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT = true; + // HDFS federation + public static final String FEDERATION_PREFIX = "dfs.federation."; + + // HDFS Router-based federation + public static final String FEDERATION_ROUTER_PREFIX = + "dfs.federation.router."; + + // HDFS Router State Store connection + public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS = + FEDERATION_ROUTER_PREFIX + "file.resolver.client.class"; + public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT = + "org.apache.hadoop.hdfs.server.federation.MockResolver"; + public static final String FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS = + FEDERATION_ROUTER_PREFIX + "namenode.resolver.client.class"; + public static final String FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT = + "org.apache.hadoop.hdfs.server.federation.MockResolver"; + // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry @Deprecated public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java new file mode 100644 index 0000000..477053d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.resolver; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Locates the most active NN for a given nameservice ID or blockpool ID. This + * interface is used by the {@link org.apache.hadoop.hdfs.server.federation. + * router.RouterRpcServer RouterRpcServer} to: + *
    + *
  • Determine the target NN for a given subcluster. + *
  • List of all namespaces discovered/active in the federation. + *
  • Update the currently active NN empirically. + *
+ * The interface is also used by the {@link org.apache.hadoop.hdfs.server. + * federation.router.NamenodeHeartbeatService NamenodeHeartbeatService} to + * register a discovered NN. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface ActiveNamenodeResolver { + + /** + * Report a successful, active NN address for a nameservice or blockPool. + * + * @param ns Nameservice identifier. + * @param successfulAddress The address the successful responded to the + * command. + * @throws IOException If the state store cannot be accessed. + */ + void updateActiveNamenode( + String ns, InetSocketAddress successfulAddress) throws IOException; + + /** + * Returns a prioritized list of the most recent cached registration entries + * for a single nameservice ID. + * Returns an empty list if none are found. Returns entries in preference of: + *
    + *
  • The most recent ACTIVE NN + *
  • The most recent STANDBY NN + *
  • The most recent UNAVAILABLE NN + *
+ * + * @param nameserviceId Nameservice identifier. + * @return Prioritized list of namenode contexts. + * @throws IOException If the state store cannot be accessed. + */ + List + getNamenodesForNameserviceId(String nameserviceId) throws IOException; + + /** + * Returns a prioritized list of the most recent cached registration entries + * for a single block pool ID. + * Returns an empty list if none are found. Returns entries in preference of: + *
    + *
  • The most recent ACTIVE NN + *
  • The most recent STANDBY NN + *
  • The most recent UNAVAILABLE NN + *
+ * + * @param blockPoolId Block pool identifier for the nameservice. + * @return Prioritized list of namenode contexts. + * @throws IOException If the state store cannot be accessed. + */ + List + getNamenodesForBlockPoolId(String blockPoolId) throws IOException; + + /** + * Register a namenode in the State Store. + * + * @param report Namenode status report. + * @return True if the node was registered and successfully committed to the + * data store. + * @throws IOException Throws exception if the namenode could not be + * registered. + */ + boolean registerNamenode(NamenodeStatusReport report) throws IOException; + + /** + * Get a list of all namespaces that are registered and active in the + * federation. + * + * @return List of name spaces in the federation + * @throws Throws exception if the namespace list is not available. + */ + Set getNamespaces() throws IOException; + + /** + * Assign a unique identifier for the parent router service. + * Required to report the status to the namenode resolver. + * + * @param router Unique string identifier for the router. + */ + void setRouterId(String routerId); +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeContext.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeContext.java new file mode 100644 index 0000000..68ef02a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeContext.java @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.resolver; + +/** + * Interface for a discovered NN and its current server endpoints. + */ +public interface FederationNamenodeContext { + + /** + * Get the RPC server address of the namenode. + * + * @return RPC server address in the form of host:port. + */ + String getRpcAddress(); + + /** + * Get the Service RPC server address of the namenode. + * + * @return Service RPC server address in the form of host:port. + */ + String getServiceAddress(); + + /** + * Get the Lifeline RPC server address of the namenode. + * + * @return Lifeline RPC server address in the form of host:port. + */ + String getLifelineAddress(); + + /** + * Get the HTTP server address of the namenode. + * + * @return HTTP address in the form of host:port. + */ + String getWebAddress(); + + /** + * Get the unique key representing the namenode. + * + * @return Combination of the nameservice and the namenode IDs. + */ + String getNamenodeKey(); + + /** + * Identifier for the nameservice/namespace. + * + * @return Namenode nameservice identifier. + */ + String getNameserviceId(); + + /** + * Identifier for the namenode. + * + * @return String + */ + String getNamenodeId(); + + /** + * The current state of the namenode (active, standby, etc). + * + * @return FederationNamenodeServiceState State of the namenode. + */ + FederationNamenodeServiceState getState(); + + /** + * The update date. + * + * @return Long with the update date. + */ + long getDateModified(); +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java new file mode 100644 index 0000000..c773f82 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamenodeServiceState.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.resolver; + +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; + +/** + * Namenode state in the federation. The order of this enum is used to evaluate + * NN priority for RPC calls. + */ +public enum FederationNamenodeServiceState { + ACTIVE, // HAServiceState.ACTIVE or operational. + STANDBY, // HAServiceState.STANDBY. + UNAVAILABLE, // When the namenode cannot be reached. + EXPIRED; // When the last update is too old. + + public static FederationNamenodeServiceState getState(HAServiceState state) { + switch(state) { + case ACTIVE: + return FederationNamenodeServiceState.ACTIVE; + case STANDBY: + return FederationNamenodeServiceState.STANDBY; + case INITIALIZING: + return FederationNamenodeServiceState.UNAVAILABLE; + case STOPPING: + return FederationNamenodeServiceState.UNAVAILABLE; + default: + return FederationNamenodeServiceState.UNAVAILABLE; + } + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java new file mode 100644 index 0000000..bbaeca3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.resolver; + +import org.apache.hadoop.hdfs.server.federation.router.RemoteLocationContext; + +/** + * Represents information about a single nameservice/namespace in a federated + * HDFS cluster. + */ +public class FederationNamespaceInfo + implements Comparable, RemoteLocationContext { + + /** Block pool identifier. */ + private String blockPoolId; + /** Cluster identifier. */ + private String clusterId; + /** Nameservice identifier. */ + private String nameserviceId; + + public FederationNamespaceInfo(String bpId, String clId, String nsId) { + this.blockPoolId = bpId; + this.clusterId = clId; + this.nameserviceId = nsId; + } + + /** + * The HDFS nameservice id for this namespace. + * + * @return Nameservice identifier. + */ + public String getNameserviceId() { + return this.nameserviceId; + } + + /** + * The HDFS cluster id for this namespace. + * + * @return Cluster identifier. + */ + public String getClusterId() { + return this.clusterId; + } + + /** + * The HDFS block pool id for this namespace. + * + * @return Block pool identifier. + */ + public String getBlockPoolId() { + return this.blockPoolId; + } + + @Override + public int hashCode() { + return this.nameserviceId.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } else if (obj instanceof FederationNamespaceInfo) { + return this.compareTo((FederationNamespaceInfo) obj) == 0; + } else { + return false; + } + } + + @Override + public int compareTo(FederationNamespaceInfo info) { + return this.nameserviceId.compareTo(info.getNameserviceId()); + } + + @Override + public String toString() { + return this.nameserviceId + "->" + this.blockPoolId + ":" + this.clusterId; + } + + @Override + public String getDest() { + return this.nameserviceId; + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java new file mode 100644 index 0000000..af9f493 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.resolver; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Interface to map a file path in the global name space to a specific + * subcluster and path in an HDFS name space. + *

+ * Each path in the global/federated namespace may map to 1-N different HDFS + * locations. Each location specifies a single nameservice and a single HDFS + * path. The behavior is similar to MergeFS and Nfly and allows the merger + * of multiple HDFS locations into a single path. See HADOOP-8298 and + * HADOOP-12077 + *

+ * For example, a directory listing will fetch listings for each destination + * path and combine them into a single set of results. + *

+ * When multiple destinations are available for a path, the destinations are + * prioritized in a consistent manner. This allows the proxy server to + * guess the best/most likely destination and attempt it first. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface FileSubclusterResolver { + + /** + * Get the destinations for a global path. Results are from the mount table + * cache. If multiple destinations are available, the first result is the + * highest priority destination. + * + * @param path Global path. + * @return Location in a destination namespace or null if it does not exist. + * @throws IOException Throws exception if the data is not available. + */ + PathLocation getDestinationForPath(String path) throws IOException; + + /** + * Get a list of mount points for a path. Results are from the mount table + * cache. + * + * @return List of mount points present at this path or zero-length list if + * none are found. + * @throws IOException Throws exception if the data is not available. + */ + List getMountPoints(String path) throws IOException; + + /** + * Get the default namespace for the cluster. + * + * @return Default namespace identifier. + */ + String getDefaultNamespace(); +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodePriorityComparator.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodePriorityComparator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodePriorityComparator.java new file mode 100644 index 0000000..fe82f29 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodePriorityComparator.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.resolver; + +import java.util.Comparator; + +/** + * Compares NNs in the same namespace and prioritizes by their status. The + * priorities are: + *

    + *
  • ACTIVE + *
  • STANDBY + *
  • UNAVAILABLE + *
+ * When two NNs have the same state, the last modification date is the tie + * breaker, newest has priority. Expired NNs are excluded. + */ +public class NamenodePriorityComparator + implements Comparator { + + @Override + public int compare(FederationNamenodeContext o1, + FederationNamenodeContext o2) { + FederationNamenodeServiceState state1 = o1.getState(); + FederationNamenodeServiceState state2 = o2.getState(); + + if (state1 == state2) { + // Both have the same state, use mode dates + return compareModDates(o1, o2); + } else { + // Enum is ordered by priority + return state1.compareTo(state2); + } + } + + /** + * Compare the modification dates. + * + * @param o1 Context 1. + * @param o2 Context 2. + * @return Comparison between dates. + */ + private int compareModDates(FederationNamenodeContext o1, + FederationNamenodeContext o2) { + // Reverse sort, lowest position is highest priority. + return (int) (o2.getDateModified() - o1.getDateModified()); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java new file mode 100644 index 0000000..9259048 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java @@ -0,0 +1,195 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.resolver; + +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; + +/** + * Status of the namenode. + */ +public class NamenodeStatusReport { + + /** Namenode information. */ + private String nameserviceId = ""; + private String namenodeId = ""; + private String clusterId = ""; + private String blockPoolId = ""; + private String rpcAddress = ""; + private String serviceAddress = ""; + private String lifelineAddress = ""; + private String webAddress = ""; + + /** Namenode state. */ + private HAServiceState status = HAServiceState.STANDBY; + private boolean safeMode = false; + + /** If the fields are valid. */ + private boolean registrationValid = false; + private boolean haStateValid = false; + + public NamenodeStatusReport(String ns, String nn, String rpc, String service, + String lifeline, String web) { + this.nameserviceId = ns; + this.namenodeId = nn; + this.rpcAddress = rpc; + this.serviceAddress = service; + this.lifelineAddress = lifeline; + this.webAddress = web; + } + + /** + * If the registration is valid. + * + * @return If the registration is valid. + */ + public boolean registrationValid() { + return this.registrationValid; + } + + /** + * If the HA state is valid. + * + * @return If the HA state is valid. + */ + public boolean haStateValid() { + return this.haStateValid; + } + + /** + * Get the state of the Namenode being monitored. + * + * @return State of the Namenode. + */ + public FederationNamenodeServiceState getState() { + if (!registrationValid) { + return FederationNamenodeServiceState.UNAVAILABLE; + } else if (haStateValid) { + return FederationNamenodeServiceState.getState(status); + } else { + return FederationNamenodeServiceState.ACTIVE; + } + } + + /** + * Get the name service identifier. + * + * @return The name service identifier. + */ + public String getNameserviceId() { + return this.nameserviceId; + } + + /** + * Get the namenode identifier. + * + * @return The namenode identifier. + */ + public String getNamenodeId() { + return this.namenodeId; + } + + /** + * Get the cluster identifier. + * + * @return The cluster identifier. + */ + public String getClusterId() { + return this.clusterId; + } + + /** + * Get the block pool identifier. + * + * @return The block pool identifier. + */ + public String getBlockPoolId() { + return this.blockPoolId; + } + + /** + * Get the RPC address. + * + * @return The RPC address. + */ + public String getRpcAddress() { + return this.rpcAddress; + } + + /** + * Get the Service RPC address. + * + * @return The Service RPC address. + */ + public String getServiceAddress() { + return this.serviceAddress; + } + + /** + * Get the Lifeline RPC address. + * + * @return The Lifeline RPC address. + */ + public String getLifelineAddress() { + return this.lifelineAddress; + } + + /** + * Get the web address. + * + * @return The web address. + */ + public String getWebAddress() { + return this.webAddress; + } + + /** + * Get the HA service state. + * + * @return The HA service state. + */ + public void setHAServiceState(HAServiceState state) { + this.status = state; + this.haStateValid = true; + } + + /** + * Set the namespace information. + * + * @param info Namespace information. + */ + public void setNamespaceInfo(NamespaceInfo info) { + this.clusterId = info.getClusterID(); + this.blockPoolId = info.getBlockPoolID(); + this.registrationValid = true; + } + + public void setSafeMode(boolean safemode) { + this.safeMode = safemode; + } + + public boolean getSafemode() { + return this.safeMode; + } + + @Override + public String toString() { + return String.format("%s-%s:%s", + nameserviceId, namenodeId, serviceAddress); + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/PathLocation.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/PathLocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/PathLocation.java new file mode 100644 index 0000000..d90565c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/PathLocation.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.resolver; + +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; + +/** + * A map of the properties and target destinations (name space + path) for + * a path in the global/federated namespace. + * This data is generated from the @see MountTable records. + */ +public class PathLocation { + + /** Source path in global namespace. */ + private final String sourcePath; + + /** Remote paths in the target namespaces. */ + private final List destinations; + + /** List of name spaces present. */ + private final Set namespaces; + + + /** + * Create a new PathLocation. + * + * @param source Source path in the global name space. + * @param dest Destinations of the mount table entry. + * @param namespaces Unique identifier representing the combination of + * name spaces present in the destination list. + */ + public PathLocation( + String source, List dest, Set nss) { + this.sourcePath = source; + this.destinations = dest; + this.namespaces = nss; + } + + /** + * Create a path location from another path. + * + * @param other Other path location to copy from. + */ + public PathLocation(PathLocation other) { + this.sourcePath = other.sourcePath; + this.destinations = new LinkedList(other.destinations); + this.namespaces = new HashSet(other.namespaces); + } + + /** + * Get the source path in the global namespace for this path location. + * + * @return The path in the global namespace. + */ + public String getSourcePath() { + return this.sourcePath; + } + + /** + * Get the list of subclusters defined for the destinations. + */ + public Set getNamespaces() { + return Collections.unmodifiableSet(this.namespaces); + } + + @Override + public String toString() { + RemoteLocation loc = getDefaultLocation(); + return loc.getNameserviceId() + "->" + loc.getDest(); + } + + /** + * Check if this location supports multiple clusters/paths. + * + * @return If it has multiple destinations. + */ + public boolean hasMultipleDestinations() { + return this.destinations.size() > 1; + } + + /** + * Get the list of locations found in the mount table. + * The first result is the highest priority path. + * + * @return List of remote locations. + */ + public List getDestinations() { + return Collections.unmodifiableList(this.destinations); + } + + /** + * Get the default or highest priority location. + * + * @return The default location. + */ + public RemoteLocation getDefaultLocation() { + if (destinations.isEmpty() || destinations.get(0).getDest() == null) { + throw new UnsupportedOperationException( + "Unsupported path " + sourcePath + " please check mount table"); + } + return destinations.get(0); + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/RemoteLocation.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/RemoteLocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/RemoteLocation.java new file mode 100644 index 0000000..eef136d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/RemoteLocation.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.resolver; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.hadoop.hdfs.server.federation.router.RemoteLocationContext; + +/** + * A single in a remote namespace consisting of a nameservice ID + * and a HDFS path. + */ +public class RemoteLocation implements RemoteLocationContext { + + /** Identifier of the remote namespace for this location. */ + private String nameserviceId; + /** Path in the remote location. */ + private String path; + + /** + * Create a new remote location. + * + * @param nsId Destination namespace. + * @param pPath Path in the destination namespace. + */ + public RemoteLocation(String nsId, String pPath) { + this.nameserviceId = nsId; + this.path = pPath; + } + + @Override + public String getNameserviceId() { + return this.nameserviceId; + } + + @Override + public String getDest() { + return this.path; + } + + @Override + public String toString() { + return this.nameserviceId + "->" + this.path; + } + + @Override + public int hashCode() { + return new HashCodeBuilder(17, 31) + .append(this.nameserviceId) + .append(this.path) + .toHashCode(); + } + + @Override + public boolean equals(Object obj) { + return (obj != null && + obj.getClass() == this.getClass() && + obj.hashCode() == this.hashCode()); + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/package-info.java new file mode 100644 index 0000000..d8be9e3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/package-info.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * The resolver package contains indepedent data resolvers used in HDFS + * federation. The data resolvers collect data from the cluster, including from + * the state store. The resolvers expose APIs used by HDFS federation to collect + * aggregated, cached data for use in Real-time request processing. The + * resolvers are perf-sensitive and are used in the flow of the + * {@link RouterRpcServer} request path. + *

+ * The principal resolvers are: + *

    + *
  • {@link ActiveNamenodeResolver} Real-time interface for locating the most + * recently active NN for a nameservice. + *
  • {@link FileSubclusterResolver} Real-time interface for determining the NN + * and local file path for a given file/folder based on the global namespace + * path. + *
+ */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +package org.apache.hadoop.hdfs.server.federation.resolver; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java new file mode 100644 index 0000000..6e7e865 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import java.lang.reflect.Constructor; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; + +/** + * Utilities for managing HDFS federation. + */ +public final class FederationUtil { + + private static final Log LOG = LogFactory.getLog(FederationUtil.class); + + private FederationUtil() { + // Utility Class + } + + /** + * Create an instance of an interface with a constructor using a state store + * constructor. + * + * @param conf Configuration + * @param context Context object to pass to the instance. + * @param contextType Type of the context passed to the constructor. + * @param configurationKeyName Configuration key to retrieve the class to load + * @param defaultClassName Default class to load if the configuration key is + * not set + * @param clazz Class/interface that must be implemented by the instance. + * @return New instance of the specified class that implements the desired + * interface and a single parameter constructor containing a + * StateStore reference. + */ + private static T newInstance(final Configuration conf, + final R context, final Class contextClass, + final String configKeyName, final String defaultClassName, + final Class clazz) { + + String className = conf.get(configKeyName, defaultClassName); + try { + Class instance = conf.getClassByName(className); + if (clazz.isAssignableFrom(instance)) { + if (contextClass == null) { + // Default constructor if no context + @SuppressWarnings("unchecked") + Constructor constructor = + (Constructor) instance.getConstructor(); + return constructor.newInstance(); + } else { + // Constructor with context + @SuppressWarnings("unchecked") + Constructor constructor = (Constructor) instance.getConstructor( + Configuration.class, contextClass); + return constructor.newInstance(conf, context); + } + } else { + throw new RuntimeException("Class " + className + " not instance of " + + clazz.getCanonicalName()); + } + } catch (ReflectiveOperationException e) { + LOG.error("Could not instantiate: " + className, e); + return null; + } + } + + /** + * Creates an instance of a FileSubclusterResolver from the configuration. + * + * @param conf Configuration that defines the file resolver class. + * @param obj Context object passed to class constructor. + * @return FileSubclusterResolver + */ + public static FileSubclusterResolver newFileSubclusterResolver( + Configuration conf, StateStoreService stateStore) { + return newInstance(conf, stateStore, StateStoreService.class, + DFSConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS, + DFSConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT, + FileSubclusterResolver.class); + } + + /** + * Creates an instance of an ActiveNamenodeResolver from the configuration. + * + * @param conf Configuration that defines the namenode resolver class. + * @param obj Context object passed to class constructor. + * @return ActiveNamenodeResolver + */ + public static ActiveNamenodeResolver newActiveNamenodeResolver( + Configuration conf, StateStoreService stateStore) { + return newInstance(conf, stateStore, StateStoreService.class, + DFSConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS, + DFSConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT, + ActiveNamenodeResolver.class); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java new file mode 100644 index 0000000..da6066b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteLocationContext.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +/** + * Interface for objects that are unique to a namespace. + */ +public interface RemoteLocationContext { + + /** + * Returns an identifier for a unique namespace. + * + * @return Namespace identifier. + */ + String getNameserviceId(); + + /** + * Destination in this location. For example the path in a remote namespace. + * + * @return Destination in this location. + */ + String getDest(); +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java new file mode 100644 index 0000000..fe0d02a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -0,0 +1,263 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.newActiveNamenodeResolver; +import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.newFileSubclusterResolver; +import static org.apache.hadoop.util.ExitUtil.terminate; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.util.ShutdownHookManager; +import org.apache.hadoop.util.StringUtils; + +/** + * Router that provides a unified view of multiple federated HDFS clusters. It + * has two main roles: (1) federated interface and (2) NameNode heartbeat. + *

+ * For the federated interface, the Router receives a client request, checks the + * State Store for the correct subcluster, and forwards the request to the + * active Namenode of that subcluster. The reply from the Namenode then flows in + * the opposite direction. The Routers are stateless and can be behind a load + * balancer. HDFS clients connect to the router using the same interfaces as are + * used to communicate with a namenode, namely the ClientProtocol RPC interface + * and the WebHdfs HTTP interface exposed by the router. {@link RouterRpcServer} + * {@link RouterHttpServer} + *

+ * For NameNode heartbeat, the Router periodically checks the state of a + * NameNode (usually on the same server) and reports their high availability + * (HA) state and load/space status to the State Store. Note that this is an + * optional role as a Router can be independent of any subcluster. + * {@link StateStoreService} {@link NamenodeHeartbeatService} + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class Router extends CompositeService { + + private static final Log LOG = LogFactory.getLog(Router.class); + + + /** Configuration for the Router. */ + private Configuration conf; + + /** Router address/identifier. */ + private String routerId; + + /** RPC interface to the client. */ + private RouterRpcServer rpcServer; + + /** Interface with the State Store. */ + private StateStoreService stateStore; + + /** Interface to map global name space to HDFS subcluster name spaces. */ + private FileSubclusterResolver subclusterResolver; + + /** Interface to identify the active NN for a nameservice or blockpool ID. */ + private ActiveNamenodeResolver namenodeResolver; + + + /** Usage string for help message. */ + private static final String USAGE = "Usage: java Router"; + + /** Priority of the Router shutdown hook. */ + public static final int SHUTDOWN_HOOK_PRIORITY = 30; + + + ///////////////////////////////////////////////////////// + // Constructor + ///////////////////////////////////////////////////////// + + public Router() { + super(Router.class.getName()); + } + + ///////////////////////////////////////////////////////// + // Service management + ///////////////////////////////////////////////////////// + + @Override + protected void serviceInit(Configuration configuration) throws Exception { + this.conf = configuration; + + // TODO Interface to the State Store + this.stateStore = null; + + // Resolver to track active NNs + this.namenodeResolver = newActiveNamenodeResolver( + this.conf, this.stateStore); + if (this.namenodeResolver == null) { + throw new IOException("Cannot find namenode resolver."); + } + + // Lookup interface to map between the global and subcluster name spaces + this.subclusterResolver = newFileSubclusterResolver( + this.conf, this.stateStore); + if (this.subclusterResolver == null) { + throw new IOException("Cannot find subcluster resolver"); + } + + super.serviceInit(conf); + } + + @Override + protected void serviceStart() throws Exception { + + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + + super.serviceStop(); + } + + /** + * Shutdown the router. + */ + public void shutDown() { + new Thread() { + @Override + public void run() { + Router.this.stop(); + } + }.start(); + } + + /** + * Main run loop for the router. + * + * @param argv parameters. + */ + public static void main(String[] argv) { + if (DFSUtil.parseHelpArgument(argv, Router.USAGE, System.out, true)) { + System.exit(0); + } + + try { + StringUtils.startupShutdownMessage(Router.class, argv, LOG); + + Router router = new Router(); + + ShutdownHookManager.get().addShutdownHook( + new CompositeServiceShutdownHook(router), SHUTDOWN_HOOK_PRIORITY); + + Configuration conf = new HdfsConfiguration(); + router.init(conf); + router.start(); + } catch (Throwable e) { + LOG.error("Failed to start router.", e); + terminate(1, e); + } + } + + ///////////////////////////////////////////////////////// + // RPC Server + ///////////////////////////////////////////////////////// + + /** + * Create a new Router RPC server to proxy ClientProtocol requests. + * + * @return RouterRpcServer + * @throws IOException If the router RPC server was not started. + */ + protected RouterRpcServer createRpcServer() throws IOException { + return new RouterRpcServer(this.conf, this, this.getNamenodeResolver(), + this.getSubclusterResolver()); + } + + /** + * Get the Router RPC server. + * + * @return Router RPC server. + */ + public RouterRpcServer getRpcServer() { + return this.rpcServer; + } + + ///////////////////////////////////////////////////////// + // Submodule getters + ///////////////////////////////////////////////////////// + + /** + * Get the State Store service. + * + * @return State Store service. + */ + public StateStoreService getStateStore() { + return this.stateStore; + } + + /** + * Get the subcluster resolver for files. + * + * @return Subcluster resolver for files. + */ + public FileSubclusterResolver getSubclusterResolver() { + return this.subclusterResolver; + } + + /** + * Get the namenode resolver for a subcluster. + * + * @return The namenode resolver for a subcluster. + */ + public ActiveNamenodeResolver getNamenodeResolver() { + return this.namenodeResolver; + } + + ///////////////////////////////////////////////////////// + // Router info + ///////////////////////////////////////////////////////// + + /** + * Unique ID for the router, typically the hostname:port string for the + * router's RPC server. This ID may be null on router startup before the RPC + * server has bound to a port. + * + * @return Router identifier. + */ + public String getRouterId() { + return this.routerId; + } + + /** + * Sets a unique ID for this router. + * + * @param router Identifier of the Router. + */ + public void setRouterId(String id) { + this.routerId = id; + if (this.stateStore != null) { + this.stateStore.setIdentifier(this.routerId); + } + if (this.namenodeResolver != null) { + this.namenodeResolver.setRouterId(this.routerId); + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java new file mode 100644 index 0000000..24792bb --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; +import org.apache.hadoop.ipc.RPC.Server; +import org.apache.hadoop.service.AbstractService; + +import com.google.common.annotations.VisibleForTesting; + +/** + * This class is responsible for handling all of the RPC calls to the It is + * created, started, and stopped by {@link Router}. It implements the + * {@link ClientProtocol} to mimic a + * {@link org.apache.hadoop.hdfs.server.namenode.NameNode NameNode} and proxies + * the requests to the active + * {@link org.apache.hadoop.hdfs.server.namenode.NameNode NameNode}. + */ +public class RouterRpcServer extends AbstractService { + + /** The RPC server that listens to requests from clients. */ + private final Server rpcServer; + + /** + * Construct a router RPC server. + * + * @param configuration HDFS Configuration. + * @param nnResolver The NN resolver instance to determine active NNs in HA. + * @param fileResolver File resolver to resolve file paths to subclusters. + * @throws IOException If the RPC server could not be created. + */ + public RouterRpcServer(Configuration configuration, Router router, + ActiveNamenodeResolver nnResolver, FileSubclusterResolver fileResolver) + throws IOException { + super(RouterRpcServer.class.getName()); + + this.rpcServer = null; + } + + /** + * Allow access to the client RPC server for testing. + * + * @return The RPC server. + */ + @VisibleForTesting + public Server getServer() { + return this.rpcServer; + } + + @Override + protected void serviceInit(Configuration configuration) throws Exception { + super.serviceInit(configuration); + } + + /** + * Start client and service RPC servers. + */ + @Override + protected void serviceStart() throws Exception { + if (this.rpcServer != null) { + this.rpcServer.start(); + } + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + if (this.rpcServer != null) { + this.rpcServer.stop(); + } + super.serviceStop(); + } + + /** + * Wait until the RPC servers have shutdown. + */ + void join() throws InterruptedException { + if (this.rpcServer != null) { + this.rpcServer.join(); + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/package-info.java new file mode 100644 index 0000000..327f39b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/package-info.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * The router package includes the core services for a HDFS federation router. + * The {@link Router} acts as a transparent proxy in front of a cluster of + * multiple NameNodes and nameservices. The {@link RouterRpcServer} exposes the + * NameNode clientProtocol and is the primary contact point for DFS clients in a + * federated cluster. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +package org.apache.hadoop.hdfs.server.federation.router; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java new file mode 100644 index 0000000..866daa3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.store; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.service.CompositeService; + +/** + * A service to initialize a + * {@link org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver + * StateStoreDriver} and maintain the connection to the data store. There + * are multiple state store driver connections supported: + *

    + *
  • File {@link org.apache.hadoop.hdfs.server.federation.store.driver.impl. + * StateStoreFileImpl StateStoreFileImpl} + *
  • ZooKeeper {@link org.apache.hadoop.hdfs.server.federation.store.driver. + * impl.StateStoreZooKeeperImpl StateStoreZooKeeperImpl} + *
+ *

+ * The service also supports the dynamic registration of data interfaces such as + * the following: + *

    + *
  • {@link MembershipStateStore}: state of the Namenodes in the + * federation. + *
  • {@link MountTableStore}: Mount table between to subclusters. + * See {@link org.apache.hadoop.fs.viewfs.ViewFs ViewFs}. + *
  • {@link RouterStateStore}: State of the routers in the federation. + *
+ */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class StateStoreService extends CompositeService { + + /** Identifier for the service. */ + private String identifier; + + // Stub class + public StateStoreService(String name) { + super(name); + } + + /** + * Fetch a unique identifier for this state store instance. Typically it is + * the address of the router. + * + * @return Unique identifier for this store. + */ + public String getIdentifier() { + return this.identifier; + } + + /** + * Set a unique synchronization identifier for this store. + * + * @param id Unique identifier, typically the router's RPC address. + */ + public void setIdentifier(String id) { + this.identifier = id; + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/package-info.java new file mode 100644 index 0000000..949ec7c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/package-info.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * The federation state store tracks persistent values that are shared between + * multiple routers. + *

+ * Data is stored in data records that inherit from a common class. Data records + * are serialized when written to the data store using a modular serialization + * implementation. The default is profobuf serialization. Data is stored as rows + * of records of the same type with each data member in a record representing a + * column. + *

+ * The state store uses a modular data storage + * {@link org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver + * StateStoreDriver} to handle querying, updating and deleting data records. The + * data storage driver is initialized and maintained by the + * {@link org.apache.hadoop.hdfs.server.federation.store. + * StateStoreService FederationStateStoreService}. The state store + * supports fetching all records of a type, filtering by column values or + * fetching a single record by its primary key. + *

+ * The state store contains several API interfaces, one for each data records + * type. + *

+ *

    + *
  • FederationMembershipStateStore: state of all Namenodes in the federation. + * Uses the MembershipState record. + *
  • FederationMountTableStore: Mount table mapping paths in the global + * namespace to individual subcluster paths. Uses the MountTable record. + *
  • RouterStateStore: State of all routers in the federation. Uses the + * RouterState record. + *
+ *

+ * Each API is defined in a separate interface. The implementations of these + * interfaces are responsible for accessing the + * {@link org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver + * StateStoreDriver} to query, update and delete data records. + */ + +@InterfaceAudience.Private +@InterfaceStability.Evolving + +package org.apache.hadoop.hdfs.server.federation.store; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 8a60b80..36a9b2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4650,4 +4650,20 @@ + + dfs.federation.router.file.resolver.client.class + org.apache.hadoop.hdfs.server.federation.MockResolver + + Class to resolve files to subclusters. + + + + + dfs.federation.router.namenode.resolver.client.class + org.apache.hadoop.hdfs.server.federation.MockResolver + + Class to resolve the namenode for a subcluster. + + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/2761bbc9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java new file mode 100644 index 0000000..8674682 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java @@ -0,0 +1,233 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.Date; +import java.util.List; +import java.util.Random; + +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; +import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.security.AccessControlException; + +/** + * Helper utilities for testing HDFS Federation. + */ +public final class FederationTestUtils { + + public final static String NAMESERVICE1 = "ns0"; + public final static String NAMESERVICE2 = "ns1"; + public final static String NAMENODE1 = "nn0"; + public final static String NAMENODE2 = "nn1"; + public final static String NAMENODE3 = "nn2"; + public final static String NAMENODE4 = "nn3"; + public final static String ROUTER1 = "router0"; + public final static String ROUTER2 = "router1"; + public final static String ROUTER3 = "router2"; + public final static String ROUTER4 = "router3"; + public final static long BLOCK_SIZE_BYTES = 134217728; + + private FederationTestUtils() { + // Utility class + } + + public static void verifyException(Object obj, String methodName, + Class exceptionClass, Class[] parameterTypes, + Object[] arguments) { + + Throwable triggeredException = null; + try { + Method m = obj.getClass().getMethod(methodName, parameterTypes); + m.invoke(obj, arguments); + } catch (InvocationTargetException ex) { + triggeredException = ex.getTargetException(); + } catch (Exception e) { + triggeredException = e; + } + if (exceptionClass != null) { + assertNotNull("No exception was triggered, expected exception - " + + exceptionClass.getName(), triggeredException); + assertEquals(exceptionClass, triggeredException.getClass()); + } else { + assertNull("Exception was triggered but no exception was expected", + triggeredException); + } + } + + public static NamenodeStatusReport createNamenodeReport(String ns, String nn, + HAServiceState state) { + Random rand = new Random(); + NamenodeStatusReport report = new NamenodeStatusReport(ns, nn, + "localhost:" + rand.nextInt(10000), "localhost:" + rand.nextInt(10000), + "localhost:" + rand.nextInt(10000), "testwebaddress-" + ns + nn); + if (state == null) { + // Unavailable, no additional info + return report; + } + report.setHAServiceState(state); + report.setNamespaceInfo(new NamespaceInfo(1, "tesclusterid", ns, 0, + "testbuildvesion", "testsoftwareversion")); + return report; + } + + public static void waitNamenodeRegistered(ActiveNamenodeResolver resolver, + String nameserviceId, String namenodeId, + FederationNamenodeServiceState finalState) + throws InterruptedException, IllegalStateException, IOException { + + for (int loopCount = 0; loopCount < 20; loopCount++) { + + if (loopCount > 0) { + Thread.sleep(1000); + } + + List namenodes; + namenodes = + resolver.getNamenodesForNameserviceId(nameserviceId); + for (FederationNamenodeContext namenode : namenodes) { + if (namenodeId != null + && !namenode.getNamenodeId().equals(namenodeId)) { + // Keep looking + continue; + } + if (finalState != null && !namenode.getState().equals(finalState)) { + // Wrong state, wait a bit more + break; + } + // Found + return; + } + } + assertTrue("Failed to verify state store registration for state - " + + finalState + " - " + " - " + nameserviceId + " - ", false); + } + + public static boolean verifyDate(Date d1, Date d2, long precision) { + if (Math.abs(d1.getTime() - d2.getTime()) < precision) { + return true; + } + return false; + } + + public static boolean addDirectory(FileSystem context, String path) + throws IOException { + context.mkdirs(new Path(path), new FsPermission("777")); + return verifyFileExists(context, path); + } + + public static FileStatus getFileStatus(FileSystem context, String path) + throws IOException { + return context.getFileStatus(new Path(path)); + } + + public static boolean verifyFileExists(FileSystem context, String path) { + try { + FileStatus status = getFileStatus(context, path); + if (status != null) { + return true; + } + } catch (Exception e) { + return false; + } + + return false; + } + + public static boolean checkForFileInDirectory(FileSystem context, + String testPath, String targetFile) throws AccessControlException, + FileNotFoundException, + UnsupportedFileSystemException, IllegalArgumentException, + IOException { + FileStatus[] fileStatus = context.listStatus(new Path(testPath)); + String file = null; + String verifyPath = testPath + "/" + targetFile; + if (testPath.equals("/")) { + verifyPath = testPath + targetFile; + } + + Boolean found = false; + for (int i = 0; i < fileStatus.length; i++) { + FileStatus f = fileStatus[i]; + file = Path.getPathWithoutSchemeAndAuthority(f.getPath()).toString(); + if (file.equals(verifyPath)) { + found = true; + } + } + return found; + } + + public static int countContents(FileSystem context, String testPath) + throws IOException { + FileStatus[] fileStatus = context.listStatus(new Path(testPath)); + return fileStatus.length; + } + + public static void createFile(FileSystem fs, String path, long length) + throws IOException { + FsPermission permissions = new FsPermission("700"); + FSDataOutputStream writeStream = fs.create(new Path(path), permissions, + true, 1000, (short) 1, BLOCK_SIZE_BYTES, null); + for (int i = 0; i < length; i++) { + writeStream.write(i); + } + writeStream.close(); + } + + public static String readFile(FileSystem fs, String path) throws IOException { + // Read the file from the filesystem via the active namenode + Path fileName = new Path(path); + InputStreamReader reader = new InputStreamReader(fs.open(fileName)); + BufferedReader bufferedReader = new BufferedReader(reader); + StringBuilder data = new StringBuilder(); + String line; + + while ((line = bufferedReader.readLine()) != null) { + data.append(line); + } + + bufferedReader.close(); + reader.close(); + return data.toString(); + } + + public static boolean deleteFile(FileSystem fs, String path) + throws IOException { + return fs.delete(new Path(path), true); + } +} --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org