hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hanishakon...@apache.org
Subject [24/50] [abbrv] hadoop git commit: HDFS-13215. RBF: Move Router to its own module. Contributed by Wei Yan
Date Wed, 21 Mar 2018 23:48:00 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
new file mode 100644
index 0000000..d5fb9ba
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.records;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
+import org.apache.hadoop.hdfs.server.federation.router.RouterQuotaUsage;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Test;
+
+/**
+ * Test the Mount Table entry in the State Store.
+ */
+public class TestMountTable {
+
+  private static final String SRC = "/test";
+  private static final String DST_NS_0 = "ns0";
+  private static final String DST_NS_1 = "ns1";
+  private static final String DST_PATH_0 = "/path1";
+  private static final String DST_PATH_1 = "/path/path2";
+  private static final List<RemoteLocation> DST = new LinkedList<>();
+  static {
+    DST.add(new RemoteLocation(DST_NS_0, DST_PATH_0));
+    DST.add(new RemoteLocation(DST_NS_1, DST_PATH_1));
+  }
+  private static final Map<String, String> DST_MAP = new LinkedHashMap<>();
+  static {
+    DST_MAP.put(DST_NS_0, DST_PATH_0);
+    DST_MAP.put(DST_NS_1, DST_PATH_1);
+  }
+
+  private static final long DATE_CREATED = 100;
+  private static final long DATE_MOD = 200;
+
+  private static final long NS_COUNT = 1;
+  private static final long NS_QUOTA = 5;
+  private static final long SS_COUNT = 10;
+  private static final long SS_QUOTA = 100;
+
+  private static final RouterQuotaUsage QUOTA = new RouterQuotaUsage.Builder()
+      .fileAndDirectoryCount(NS_COUNT).quota(NS_QUOTA).spaceConsumed(SS_COUNT)
+      .spaceQuota(SS_QUOTA).build();
+
+  @Test
+  public void testGetterSetter() throws IOException {
+
+    MountTable record = MountTable.newInstance(SRC, DST_MAP);
+
+    validateDestinations(record);
+    assertEquals(SRC, record.getSourcePath());
+    assertEquals(DST, record.getDestinations());
+    assertTrue(DATE_CREATED > 0);
+    assertTrue(DATE_MOD > 0);
+
+    RouterQuotaUsage quota = record.getQuota();
+    assertEquals(0, quota.getFileAndDirectoryCount());
+    assertEquals(HdfsConstants.QUOTA_DONT_SET, quota.getQuota());
+    assertEquals(0, quota.getSpaceConsumed());
+    assertEquals(HdfsConstants.QUOTA_DONT_SET, quota.getSpaceQuota());
+
+    MountTable record2 =
+        MountTable.newInstance(SRC, DST_MAP, DATE_CREATED, DATE_MOD);
+
+    validateDestinations(record2);
+    assertEquals(SRC, record2.getSourcePath());
+    assertEquals(DST, record2.getDestinations());
+    assertEquals(DATE_CREATED, record2.getDateCreated());
+    assertEquals(DATE_MOD, record2.getDateModified());
+    assertFalse(record.isReadOnly());
+    assertEquals(DestinationOrder.HASH, record.getDestOrder());
+  }
+
+  @Test
+  public void testSerialization() throws IOException {
+    testSerialization(DestinationOrder.RANDOM);
+    testSerialization(DestinationOrder.HASH);
+    testSerialization(DestinationOrder.LOCAL);
+  }
+
+  private void testSerialization(final DestinationOrder order)
+      throws IOException {
+
+    MountTable record = MountTable.newInstance(
+        SRC, DST_MAP, DATE_CREATED, DATE_MOD);
+    record.setReadOnly(true);
+    record.setDestOrder(order);
+    record.setQuota(QUOTA);
+
+    StateStoreSerializer serializer = StateStoreSerializer.getSerializer();
+    String serializedString = serializer.serializeString(record);
+    MountTable record2 =
+        serializer.deserialize(serializedString, MountTable.class);
+
+    validateDestinations(record2);
+    assertEquals(SRC, record2.getSourcePath());
+    assertEquals(DST, record2.getDestinations());
+    assertEquals(DATE_CREATED, record2.getDateCreated());
+    assertEquals(DATE_MOD, record2.getDateModified());
+    assertTrue(record2.isReadOnly());
+    assertEquals(order, record2.getDestOrder());
+
+    RouterQuotaUsage quotaGet = record2.getQuota();
+    assertEquals(NS_COUNT, quotaGet.getFileAndDirectoryCount());
+    assertEquals(NS_QUOTA, quotaGet.getQuota());
+    assertEquals(SS_COUNT, quotaGet.getSpaceConsumed());
+    assertEquals(SS_QUOTA, quotaGet.getSpaceQuota());
+  }
+
+  @Test
+  public void testReadOnly() throws IOException {
+
+    Map<String, String> dest = new LinkedHashMap<>();
+    dest.put(DST_NS_0, DST_PATH_0);
+    dest.put(DST_NS_1, DST_PATH_1);
+    MountTable record1 = MountTable.newInstance(SRC, dest);
+    record1.setReadOnly(true);
+
+    validateDestinations(record1);
+    assertEquals(SRC, record1.getSourcePath());
+    assertEquals(DST, record1.getDestinations());
+    assertTrue(DATE_CREATED > 0);
+    assertTrue(DATE_MOD > 0);
+    assertTrue(record1.isReadOnly());
+
+    MountTable record2 = MountTable.newInstance(
+        SRC, DST_MAP, DATE_CREATED, DATE_MOD);
+    record2.setReadOnly(true);
+
+    validateDestinations(record2);
+    assertEquals(SRC, record2.getSourcePath());
+    assertEquals(DST, record2.getDestinations());
+    assertEquals(DATE_CREATED, record2.getDateCreated());
+    assertEquals(DATE_MOD, record2.getDateModified());
+    assertTrue(record2.isReadOnly());
+  }
+
+  @Test
+  public void testOrder() throws IOException {
+    testOrder(DestinationOrder.HASH);
+    testOrder(DestinationOrder.LOCAL);
+    testOrder(DestinationOrder.RANDOM);
+  }
+
+  private void testOrder(final DestinationOrder order)
+      throws IOException {
+
+    MountTable record = MountTable.newInstance(
+        SRC, DST_MAP, DATE_CREATED, DATE_MOD);
+    record.setDestOrder(order);
+
+    validateDestinations(record);
+    assertEquals(SRC, record.getSourcePath());
+    assertEquals(DST, record.getDestinations());
+    assertEquals(DATE_CREATED, record.getDateCreated());
+    assertEquals(DATE_MOD, record.getDateModified());
+    assertEquals(order, record.getDestOrder());
+  }
+
+  private void validateDestinations(MountTable record) {
+
+    assertEquals(SRC, record.getSourcePath());
+    assertEquals(2, record.getDestinations().size());
+
+    RemoteLocation location1 = record.getDestinations().get(0);
+    assertEquals(DST_NS_0, location1.getNameserviceId());
+    assertEquals(DST_PATH_0, location1.getDest());
+
+    RemoteLocation location2 = record.getDestinations().get(1);
+    assertEquals(DST_NS_1, location2.getNameserviceId());
+    assertEquals(DST_PATH_1, location2.getDest());
+  }
+
+  @Test
+  public void testQuota() throws IOException {
+    MountTable record = MountTable.newInstance(SRC, DST_MAP);
+    record.setQuota(QUOTA);
+
+    validateDestinations(record);
+    assertEquals(SRC, record.getSourcePath());
+    assertEquals(DST, record.getDestinations());
+    assertTrue(DATE_CREATED > 0);
+    assertTrue(DATE_MOD > 0);
+
+    RouterQuotaUsage quotaGet = record.getQuota();
+    assertEquals(NS_COUNT, quotaGet.getFileAndDirectoryCount());
+    assertEquals(NS_QUOTA, quotaGet.getQuota());
+    assertEquals(SS_COUNT, quotaGet.getSpaceConsumed());
+    assertEquals(SS_QUOTA, quotaGet.getSpaceQuota());
+  }
+
+  @Test
+  public void testValidation() throws IOException {
+    Map<String, String> destinations = new HashMap<>();
+    destinations.put("ns0", "/testValidate-dest");
+    try {
+      MountTable.newInstance("testValidate", destinations);
+      fail("Mount table entry should be created failed.");
+    } catch (Exception e) {
+      GenericTestUtils.assertExceptionContains(
+          MountTable.ERROR_MSG_MUST_START_WITH_BACK_SLASH, e);
+    }
+
+    destinations.clear();
+    destinations.put("ns0", "testValidate-dest");
+    try {
+      MountTable.newInstance("/testValidate", destinations);
+      fail("Mount table entry should be created failed.");
+    } catch (Exception e) {
+      GenericTestUtils.assertExceptionContains(
+          MountTable.ERROR_MSG_ALL_DEST_MUST_START_WITH_BACK_SLASH, e);
+    }
+
+    destinations.clear();
+    destinations.put("", "/testValidate-dest");
+    try {
+      MountTable.newInstance("/testValidate", destinations);
+      fail("Mount table entry should be created failed.");
+    } catch (Exception e) {
+      GenericTestUtils.assertExceptionContains(
+          MountTable.ERROR_MSG_INVAILD_DEST_NS, e);
+    }
+
+    destinations.clear();
+    destinations.put("ns0", "/testValidate-dest");
+    MountTable record = MountTable.newInstance("/testValidate", destinations);
+    assertNotNull(record);
+
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestRouterState.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestRouterState.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestRouterState.java
new file mode 100644
index 0000000..dfe2bc9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestRouterState.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.records;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.server.federation.router.RouterServiceState;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+import org.junit.Test;
+
+/**
+ * Test the Router State records.
+ */
+public class TestRouterState {
+
+  private static final String ADDRESS = "address";
+  private static final String VERSION = "version";
+  private static final String COMPILE_INFO = "compileInfo";
+  private static final long START_TIME = 100;
+  private static final long DATE_MODIFIED = 200;
+  private static final long DATE_CREATED = 300;
+  private static final long FILE_RESOLVER_VERSION = 500;
+  private static final RouterServiceState STATE = RouterServiceState.RUNNING;
+
+
+  private RouterState generateRecord() throws IOException {
+    RouterState record = RouterState.newInstance(ADDRESS, START_TIME, STATE);
+    record.setVersion(VERSION);
+    record.setCompileInfo(COMPILE_INFO);
+    record.setDateCreated(DATE_CREATED);
+    record.setDateModified(DATE_MODIFIED);
+
+    StateStoreVersion version = StateStoreVersion.newInstance();
+    version.setMountTableVersion(FILE_RESOLVER_VERSION);
+    record.setStateStoreVersion(version);
+    return record;
+  }
+
+  private void validateRecord(RouterState record) throws IOException {
+    assertEquals(ADDRESS, record.getAddress());
+    assertEquals(START_TIME, record.getDateStarted());
+    assertEquals(STATE, record.getStatus());
+    assertEquals(COMPILE_INFO, record.getCompileInfo());
+    assertEquals(VERSION, record.getVersion());
+
+    StateStoreVersion version = record.getStateStoreVersion();
+    assertEquals(FILE_RESOLVER_VERSION, version.getMountTableVersion());
+  }
+
+  @Test
+  public void testGetterSetter() throws IOException {
+    RouterState record = generateRecord();
+    validateRecord(record);
+  }
+
+  @Test
+  public void testSerialization() throws IOException {
+
+    RouterState record = generateRecord();
+
+    StateStoreSerializer serializer = StateStoreSerializer.getSerializer();
+    String serializedString = serializer.serializeString(record);
+    RouterState newRecord =
+        serializer.deserialize(serializedString, RouterState.class);
+
+    validateRecord(newRecord);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/hdfs-site.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/hdfs-site.xml
new file mode 100644
index 0000000..e6dd2f1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/hdfs-site.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <!-- Disable min block size since most tests use tiny blocks -->
+  <property>
+    <name>dfs.namenode.fs-limits.min-block-size</name>
+    <value>0</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/log4j.properties
new file mode 100644
index 0000000..c671ccc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/log4j.properties
@@ -0,0 +1,23 @@
+#
+#   Licensed to the Apache Software Foundation (ASF) under one or more
+#   contributor license agreements.  See the NOTICE file distributed with
+#   this work for additional information regarding copyright ownership.
+#   The ASF licenses this file to You under the Apache License, Version 2.0
+#   (the "License"); you may not use this file except in compliance with
+#   the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=info,stdout
+log4j.threshold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index a99ea75..3b770f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -15,9 +15,6 @@
        <Package name="org.apache.hadoop.hdfs.qjournal.protocol" />
      </Match>
      <Match>
-       <Package name="org.apache.hadoop.hdfs.federation.protocol.proto" />
-     </Match>
-     <Match>
        <Bug pattern="EI_EXPOSE_REP" />
      </Match>
      <Match>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 909e589..f8b1722 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -263,9 +263,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                 <copy file="${basedir}/src/main/webapps/proto-web.xml"
                       tofile="${project.build.directory}/webapps/nfs3/WEB-INF/web.xml"
                       filtering="true"/>
-                <copy file="${basedir}/src/main/webapps/proto-web.xml"
-                      tofile="${project.build.directory}/webapps/router/WEB-INF/web.xml"
-                      filtering="true"/>
                 <copy toDir="${project.build.directory}/webapps">
                   <fileset dir="${basedir}/src/main/webapps">
                     <exclude name="**/proto-web.xml"/>
@@ -343,8 +340,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                   <include>QJournalProtocol.proto</include>
                   <include>editlog.proto</include>
                   <include>fsimage.proto</include>
-                  <include>FederationProtocol.proto</include>
-                  <include>RouterProtocol.proto</include>
                   <include>AliasMapProtocol.proto</include>
                   <include>InterQJournalProtocol.proto</include>
                 </includes>
@@ -395,7 +390,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <exclude>src/main/webapps/hdfs/robots.txt</exclude>
             <exclude>src/main/webapps/journal/robots.txt</exclude>
             <exclude>src/main/webapps/secondary/robots.txt</exclude>
-            <exclude>src/main/webapps/router/robots.txt</exclude>
             <exclude>src/contrib/**</exclude>
             <exclude>src/site/resources/images/*</exclude>
             <exclude>src/main/webapps/static/bootstrap-3.0.2/**</exclude>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index af4d5bf..a7ee2bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hdfs;
 
-import java.util.concurrent.TimeUnit;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -27,15 +25,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
-import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
-import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
-import org.apache.hadoop.hdfs.server.federation.router.RouterRpcMonitor;
-import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformanceMonitor;
-import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
-import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
-import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
-import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -1193,199 +1182,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       "dfs.use.dfs.network.topology";
   public static final boolean DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT = true;
 
-  // HDFS federation
-  public static final String FEDERATION_PREFIX = "dfs.federation.";
-
-  // HDFS Router-based federation
-  public static final String FEDERATION_ROUTER_PREFIX =
-      "dfs.federation.router.";
-  public static final String DFS_ROUTER_DEFAULT_NAMESERVICE =
-      FEDERATION_ROUTER_PREFIX + "default.nameserviceId";
-  public static final String DFS_ROUTER_HANDLER_COUNT_KEY =
-      FEDERATION_ROUTER_PREFIX + "handler.count";
-  public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10;
-  public static final String DFS_ROUTER_READER_QUEUE_SIZE_KEY =
-      FEDERATION_ROUTER_PREFIX + "reader.queue.size";
-  public static final int DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT = 100;
-  public static final String DFS_ROUTER_READER_COUNT_KEY =
-      FEDERATION_ROUTER_PREFIX + "reader.count";
-  public static final int DFS_ROUTER_READER_COUNT_DEFAULT = 1;
-  public static final String DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY =
-      FEDERATION_ROUTER_PREFIX + "handler.queue.size";
-  public static final int DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT = 100;
-  public static final String DFS_ROUTER_RPC_BIND_HOST_KEY =
-      FEDERATION_ROUTER_PREFIX + "rpc-bind-host";
-  public static final int DFS_ROUTER_RPC_PORT_DEFAULT = 8888;
-  public static final String DFS_ROUTER_RPC_ADDRESS_KEY =
-      FEDERATION_ROUTER_PREFIX + "rpc-address";
-  public static final String DFS_ROUTER_RPC_ADDRESS_DEFAULT =
-      "0.0.0.0:" + DFS_ROUTER_RPC_PORT_DEFAULT;
-  public static final String DFS_ROUTER_RPC_ENABLE =
-      FEDERATION_ROUTER_PREFIX + "rpc.enable";
-  public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true;
-
-  public static final String DFS_ROUTER_METRICS_ENABLE =
-      FEDERATION_ROUTER_PREFIX + "metrics.enable";
-  public static final boolean DFS_ROUTER_METRICS_ENABLE_DEFAULT = true;
-  public static final String DFS_ROUTER_METRICS_CLASS =
-      FEDERATION_ROUTER_PREFIX + "metrics.class";
-  public static final Class<? extends RouterRpcMonitor>
-      DFS_ROUTER_METRICS_CLASS_DEFAULT =
-          FederationRPCPerformanceMonitor.class;
-
-  // HDFS Router heartbeat
-  public static final String DFS_ROUTER_HEARTBEAT_ENABLE =
-      FEDERATION_ROUTER_PREFIX + "heartbeat.enable";
-  public static final boolean DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT = true;
-  public static final String DFS_ROUTER_HEARTBEAT_INTERVAL_MS =
-      FEDERATION_ROUTER_PREFIX + "heartbeat.interval";
-  public static final long DFS_ROUTER_HEARTBEAT_INTERVAL_MS_DEFAULT =
-      TimeUnit.SECONDS.toMillis(5);
-  public static final String DFS_ROUTER_MONITOR_NAMENODE =
-      FEDERATION_ROUTER_PREFIX + "monitor.namenode";
-  public static final String DFS_ROUTER_MONITOR_LOCAL_NAMENODE =
-      FEDERATION_ROUTER_PREFIX + "monitor.localnamenode.enable";
-  public static final boolean DFS_ROUTER_MONITOR_LOCAL_NAMENODE_DEFAULT = true;
-  public static final String DFS_ROUTER_HEARTBEAT_STATE_INTERVAL_MS =
-      FEDERATION_ROUTER_PREFIX + "heartbeat-state.interval";
-  public static final long DFS_ROUTER_HEARTBEAT_STATE_INTERVAL_MS_DEFAULT =
-      TimeUnit.SECONDS.toMillis(5);
-
-  // HDFS Router NN client
-  public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE =
-      FEDERATION_ROUTER_PREFIX + "connection.pool-size";
-  public static final int DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT =
-      64;
-  public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN =
-      FEDERATION_ROUTER_PREFIX + "connection.pool.clean.ms";
-  public static final long DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN_DEFAULT =
-      TimeUnit.MINUTES.toMillis(1);
-  public static final String DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS =
-      FEDERATION_ROUTER_PREFIX + "connection.clean.ms";
-  public static final long DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS_DEFAULT =
-      TimeUnit.SECONDS.toMillis(10);
-
-  // HDFS Router RPC client
-  public static final String DFS_ROUTER_CLIENT_THREADS_SIZE =
-      FEDERATION_ROUTER_PREFIX + "client.thread-size";
-  public static final int DFS_ROUTER_CLIENT_THREADS_SIZE_DEFAULT = 32;
-  public static final String DFS_ROUTER_CLIENT_MAX_ATTEMPTS =
-      FEDERATION_ROUTER_PREFIX + "client.retry.max.attempts";
-  public static final int DFS_ROUTER_CLIENT_MAX_ATTEMPTS_DEFAULT = 3;
-
-  // HDFS Router State Store connection
-  public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS =
-      FEDERATION_ROUTER_PREFIX + "file.resolver.client.class";
-  public static final Class<? extends FileSubclusterResolver>
-      FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT =
-          MountTableResolver.class;
-  public static final String FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS =
-      FEDERATION_ROUTER_PREFIX + "namenode.resolver.client.class";
-  public static final Class<? extends ActiveNamenodeResolver>
-      FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT =
-          MembershipNamenodeResolver.class;
-
-  // HDFS Router-based federation State Store
-  public static final String FEDERATION_STORE_PREFIX =
-      FEDERATION_ROUTER_PREFIX + "store.";
-
-  public static final String DFS_ROUTER_STORE_ENABLE =
-      FEDERATION_STORE_PREFIX + "enable";
-  public static final boolean DFS_ROUTER_STORE_ENABLE_DEFAULT = true;
-
-  public static final String FEDERATION_STORE_SERIALIZER_CLASS =
-      DFSConfigKeys.FEDERATION_STORE_PREFIX + "serializer";
-  public static final Class<StateStoreSerializerPBImpl>
-      FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT =
-          StateStoreSerializerPBImpl.class;
-
-  public static final String FEDERATION_STORE_DRIVER_CLASS =
-      FEDERATION_STORE_PREFIX + "driver.class";
-  public static final Class<? extends StateStoreDriver>
-      FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreZooKeeperImpl.class;
-
-  public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
-      FEDERATION_STORE_PREFIX + "connection.test";
-  public static final long FEDERATION_STORE_CONNECTION_TEST_MS_DEFAULT =
-      TimeUnit.MINUTES.toMillis(1);
-
-  public static final String DFS_ROUTER_CACHE_TIME_TO_LIVE_MS =
-      FEDERATION_ROUTER_PREFIX + "cache.ttl";
-  public static final long DFS_ROUTER_CACHE_TIME_TO_LIVE_MS_DEFAULT =
-      TimeUnit.MINUTES.toMillis(1);
-
-  public static final String FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS =
-      FEDERATION_STORE_PREFIX + "membership.expiration";
-  public static final long FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS_DEFAULT =
-      TimeUnit.MINUTES.toMillis(5);
-  public static final String FEDERATION_STORE_ROUTER_EXPIRATION_MS =
-      FEDERATION_STORE_PREFIX + "router.expiration";
-  public static final long FEDERATION_STORE_ROUTER_EXPIRATION_MS_DEFAULT =
-      TimeUnit.MINUTES.toMillis(5);
-
-  // HDFS Router safe mode
-  public static final String DFS_ROUTER_SAFEMODE_ENABLE =
-      FEDERATION_ROUTER_PREFIX + "safemode.enable";
-  public static final boolean DFS_ROUTER_SAFEMODE_ENABLE_DEFAULT = true;
-  public static final String DFS_ROUTER_SAFEMODE_EXTENSION =
-      FEDERATION_ROUTER_PREFIX + "safemode.extension";
-  public static final long DFS_ROUTER_SAFEMODE_EXTENSION_DEFAULT =
-      TimeUnit.SECONDS.toMillis(30);
-  public static final String DFS_ROUTER_SAFEMODE_EXPIRATION =
-      FEDERATION_ROUTER_PREFIX + "safemode.expiration";
-  public static final long DFS_ROUTER_SAFEMODE_EXPIRATION_DEFAULT =
-      3 * DFS_ROUTER_CACHE_TIME_TO_LIVE_MS_DEFAULT;
-
-  // HDFS Router-based federation mount table entries
-  /** Maximum number of cache entries to have. */
-  public static final String FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE =
-      DFSConfigKeys.FEDERATION_ROUTER_PREFIX + "mount-table.max-cache-size";
-  /** Remove cache entries if we have more than 10k. */
-  public static final int FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT = 10000;
-
-  // HDFS Router-based federation admin
-  public static final String DFS_ROUTER_ADMIN_HANDLER_COUNT_KEY =
-      FEDERATION_ROUTER_PREFIX + "admin.handler.count";
-  public static final int DFS_ROUTER_ADMIN_HANDLER_COUNT_DEFAULT = 1;
-  public static final int    DFS_ROUTER_ADMIN_PORT_DEFAULT = 8111;
-  public static final String DFS_ROUTER_ADMIN_ADDRESS_KEY =
-      FEDERATION_ROUTER_PREFIX + "admin-address";
-  public static final String DFS_ROUTER_ADMIN_ADDRESS_DEFAULT =
-      "0.0.0.0:" + DFS_ROUTER_ADMIN_PORT_DEFAULT;
-  public static final String DFS_ROUTER_ADMIN_BIND_HOST_KEY =
-      FEDERATION_ROUTER_PREFIX + "admin-bind-host";
-  public static final String DFS_ROUTER_ADMIN_ENABLE =
-      FEDERATION_ROUTER_PREFIX + "admin.enable";
-  public static final boolean DFS_ROUTER_ADMIN_ENABLE_DEFAULT = true;
-
-  // HDFS Router-based federation web
-  public static final String DFS_ROUTER_HTTP_ENABLE =
-      FEDERATION_ROUTER_PREFIX + "http.enable";
-  public static final boolean DFS_ROUTER_HTTP_ENABLE_DEFAULT = true;
-  public static final String DFS_ROUTER_HTTP_ADDRESS_KEY =
-      FEDERATION_ROUTER_PREFIX + "http-address";
-  public static final int    DFS_ROUTER_HTTP_PORT_DEFAULT = 50071;
-  public static final String DFS_ROUTER_HTTP_BIND_HOST_KEY =
-      FEDERATION_ROUTER_PREFIX + "http-bind-host";
-  public static final String DFS_ROUTER_HTTP_ADDRESS_DEFAULT =
-      "0.0.0.0:" + DFS_ROUTER_HTTP_PORT_DEFAULT;
-  public static final String DFS_ROUTER_HTTPS_ADDRESS_KEY =
-      FEDERATION_ROUTER_PREFIX + "https-address";
-  public static final int    DFS_ROUTER_HTTPS_PORT_DEFAULT = 50072;
-  public static final String DFS_ROUTER_HTTPS_BIND_HOST_KEY =
-      FEDERATION_ROUTER_PREFIX + "https-bind-host";
-  public static final String DFS_ROUTER_HTTPS_ADDRESS_DEFAULT =
-      "0.0.0.0:" + DFS_ROUTER_HTTPS_PORT_DEFAULT;
-
-  // HDFS Router-based federation quota
-  public static final String DFS_ROUTER_QUOTA_ENABLE =
-      FEDERATION_ROUTER_PREFIX + "quota.enable";
-  public static final boolean DFS_ROUTER_QUOTA_ENABLED_DEFAULT = false;
-  public static final String DFS_ROUTER_QUOTA_CACHE_UPATE_INTERVAL =
-      FEDERATION_ROUTER_PREFIX + "quota-cache.update.interval";
-  public static final long DFS_ROUTER_QUOTA_CACHE_UPATE_INTERVAL_DEFAULT =
-      60000;
-
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java
deleted file mode 100644
index 96fa794..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocolPB;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
-import org.apache.hadoop.ipc.ProtocolInfo;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.security.token.TokenInfo;
-
-/**
- * Protocol that a clients use to communicate with the NameNode.
- * Note: This extends the protocolbuffer service based interface to
- * add annotations required for security.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Stable
-@KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
-@TokenInfo(DelegationTokenSelector.class)
-@ProtocolInfo(protocolName = HdfsConstants.CLIENT_NAMENODE_PROTOCOL_NAME,
-    protocolVersion = 1)
-public interface RouterAdminProtocolPB extends
-    RouterAdminProtocolService.BlockingInterface {
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
deleted file mode 100644
index 159d5c2..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocolPB;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryResponseProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeResponseProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesResponseProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeResponseProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeResponseProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryResponseProto;
-import org.apache.hadoop.hdfs.server.federation.router.RouterAdminServer;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryRequestPBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryResponsePBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnterSafeModeRequestPBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnterSafeModeResponsePBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesRequestPBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesResponsePBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetSafeModeRequestPBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetSafeModeResponsePBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafeModeRequestPBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafeModeResponsePBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryResponsePBImpl;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
-/**
- * This class is used on the server side. Calls come across the wire for the for
- * protocol {@link RouterAdminProtocolPB}. This class translates the PB data
- * types to the native data types used inside the HDFS Router as specified in
- * the generic RouterAdminProtocol.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Stable
-public class RouterAdminProtocolServerSideTranslatorPB implements
-    RouterAdminProtocolPB {
-
-  private final RouterAdminServer server;
-
-  /**
-   * Constructor.
-   * @param server The NN server.
-   * @throws IOException
-   */
-  public RouterAdminProtocolServerSideTranslatorPB(RouterAdminServer server)
-      throws IOException {
-    this.server = server;
-  }
-
-  @Override
-  public AddMountTableEntryResponseProto addMountTableEntry(
-      RpcController controller, AddMountTableEntryRequestProto request)
-      throws ServiceException {
-
-    try {
-      AddMountTableEntryRequest req =
-          new AddMountTableEntryRequestPBImpl(request);
-      AddMountTableEntryResponse response = server.addMountTableEntry(req);
-      AddMountTableEntryResponsePBImpl responsePB =
-          (AddMountTableEntryResponsePBImpl)response;
-      return responsePB.getProto();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  /**
-   * Remove an entry from the mount table.
-   */
-  @Override
-  public RemoveMountTableEntryResponseProto removeMountTableEntry(
-      RpcController controller, RemoveMountTableEntryRequestProto request)
-      throws ServiceException {
-    try {
-      RemoveMountTableEntryRequest req =
-          new RemoveMountTableEntryRequestPBImpl(request);
-      RemoveMountTableEntryResponse response =
-          server.removeMountTableEntry(req);
-      RemoveMountTableEntryResponsePBImpl responsePB =
-          (RemoveMountTableEntryResponsePBImpl)response;
-      return responsePB.getProto();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  /**
-   * Get matching mount table entries.
-   */
-  @Override
-  public GetMountTableEntriesResponseProto getMountTableEntries(
-      RpcController controller, GetMountTableEntriesRequestProto request)
-          throws ServiceException {
-    try {
-      GetMountTableEntriesRequest req =
-          new GetMountTableEntriesRequestPBImpl(request);
-      GetMountTableEntriesResponse response = server.getMountTableEntries(req);
-      GetMountTableEntriesResponsePBImpl responsePB =
-          (GetMountTableEntriesResponsePBImpl)response;
-      return responsePB.getProto();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  /**
-   * Update a single mount table entry.
-   */
-  @Override
-  public UpdateMountTableEntryResponseProto updateMountTableEntry(
-      RpcController controller, UpdateMountTableEntryRequestProto request)
-          throws ServiceException {
-    try {
-      UpdateMountTableEntryRequest req =
-          new UpdateMountTableEntryRequestPBImpl(request);
-      UpdateMountTableEntryResponse response =
-          server.updateMountTableEntry(req);
-      UpdateMountTableEntryResponsePBImpl responsePB =
-          (UpdateMountTableEntryResponsePBImpl)response;
-      return responsePB.getProto();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public EnterSafeModeResponseProto enterSafeMode(RpcController controller,
-      EnterSafeModeRequestProto request) throws ServiceException {
-    try {
-      EnterSafeModeRequest req = new EnterSafeModeRequestPBImpl(request);
-      EnterSafeModeResponse response = server.enterSafeMode(req);
-      EnterSafeModeResponsePBImpl responsePB =
-          (EnterSafeModeResponsePBImpl) response;
-      return responsePB.getProto();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public LeaveSafeModeResponseProto leaveSafeMode(RpcController controller,
-      LeaveSafeModeRequestProto request) throws ServiceException {
-    try {
-      LeaveSafeModeRequest req = new LeaveSafeModeRequestPBImpl(request);
-      LeaveSafeModeResponse response = server.leaveSafeMode(req);
-      LeaveSafeModeResponsePBImpl responsePB =
-          (LeaveSafeModeResponsePBImpl) response;
-      return responsePB.getProto();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public GetSafeModeResponseProto getSafeMode(RpcController controller,
-      GetSafeModeRequestProto request) throws ServiceException {
-    try {
-      GetSafeModeRequest req = new GetSafeModeRequestPBImpl(request);
-      GetSafeModeResponse response = server.getSafeMode(req);
-      GetSafeModeResponsePBImpl responsePB =
-          (GetSafeModeResponsePBImpl) response;
-      return responsePB.getProto();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java
deleted file mode 100644
index d6210ce..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java
+++ /dev/null
@@ -1,208 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocolPB;
-
-import java.io.Closeable;
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryResponseProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeResponseProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesResponseProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeResponseProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeResponseProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
-import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryResponseProto;
-import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
-import org.apache.hadoop.hdfs.server.federation.router.RouterStateManager;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryRequestPBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryResponsePBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnterSafeModeResponsePBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesRequestPBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesResponsePBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetSafeModeResponsePBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafeModeResponsePBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
-import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryResponsePBImpl;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtocolMetaInterface;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RpcClientUtil;
-
-import com.google.protobuf.ServiceException;
-
-/**
- * This class forwards NN's ClientProtocol calls as RPC calls to the NN server
- * while translating from the parameter types used in ClientProtocol to the
- * new PB types.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Stable
-public class RouterAdminProtocolTranslatorPB
-    implements ProtocolMetaInterface, MountTableManager,
-    Closeable, ProtocolTranslator, RouterStateManager {
-  final private RouterAdminProtocolPB rpcProxy;
-
-  public RouterAdminProtocolTranslatorPB(RouterAdminProtocolPB proxy) {
-    rpcProxy = proxy;
-  }
-
-  @Override
-  public void close() {
-    RPC.stopProxy(rpcProxy);
-  }
-
-  @Override
-  public Object getUnderlyingProxyObject() {
-    return rpcProxy;
-  }
-
-  @Override
-  public boolean isMethodSupported(String methodName) throws IOException {
-    return RpcClientUtil.isMethodSupported(rpcProxy,
-        RouterAdminProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
-        RPC.getProtocolVersion(RouterAdminProtocolPB.class), methodName);
-  }
-
-  @Override
-  public AddMountTableEntryResponse addMountTableEntry(
-      AddMountTableEntryRequest request) throws IOException {
-    AddMountTableEntryRequestPBImpl requestPB =
-        (AddMountTableEntryRequestPBImpl)request;
-    AddMountTableEntryRequestProto proto = requestPB.getProto();
-    try {
-      AddMountTableEntryResponseProto response =
-          rpcProxy.addMountTableEntry(null, proto);
-      return new AddMountTableEntryResponsePBImpl(response);
-    } catch (ServiceException e) {
-      throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
-    }
-  }
-
-  @Override
-  public UpdateMountTableEntryResponse updateMountTableEntry(
-      UpdateMountTableEntryRequest request) throws IOException {
-    UpdateMountTableEntryRequestPBImpl requestPB =
-        (UpdateMountTableEntryRequestPBImpl)request;
-    UpdateMountTableEntryRequestProto proto = requestPB.getProto();
-    try {
-      UpdateMountTableEntryResponseProto response =
-          rpcProxy.updateMountTableEntry(null, proto);
-      return new UpdateMountTableEntryResponsePBImpl(response);
-    } catch (ServiceException e) {
-      throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
-    }
-  }
-
-  @Override
-  public RemoveMountTableEntryResponse removeMountTableEntry(
-      RemoveMountTableEntryRequest request) throws IOException {
-    RemoveMountTableEntryRequestPBImpl requestPB =
-        (RemoveMountTableEntryRequestPBImpl)request;
-    RemoveMountTableEntryRequestProto proto = requestPB.getProto();
-    try {
-      RemoveMountTableEntryResponseProto responseProto =
-          rpcProxy.removeMountTableEntry(null, proto);
-      return new RemoveMountTableEntryResponsePBImpl(responseProto);
-    } catch (ServiceException e) {
-      throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
-    }
-  }
-
-  @Override
-  public GetMountTableEntriesResponse getMountTableEntries(
-      GetMountTableEntriesRequest request) throws IOException {
-    GetMountTableEntriesRequestPBImpl requestPB =
-        (GetMountTableEntriesRequestPBImpl)request;
-    GetMountTableEntriesRequestProto proto = requestPB.getProto();
-    try {
-      GetMountTableEntriesResponseProto response =
-          rpcProxy.getMountTableEntries(null, proto);
-      return new GetMountTableEntriesResponsePBImpl(response);
-    } catch (ServiceException e) {
-      throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
-    }
-  }
-
-  @Override
-  public EnterSafeModeResponse enterSafeMode(EnterSafeModeRequest request)
-      throws IOException {
-    EnterSafeModeRequestProto proto =
-        EnterSafeModeRequestProto.newBuilder().build();
-    try {
-      EnterSafeModeResponseProto response =
-          rpcProxy.enterSafeMode(null, proto);
-      return new EnterSafeModeResponsePBImpl(response);
-    } catch (ServiceException e) {
-      throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
-    }
-  }
-
-  @Override
-  public LeaveSafeModeResponse leaveSafeMode(LeaveSafeModeRequest request)
-      throws IOException {
-    LeaveSafeModeRequestProto proto =
-        LeaveSafeModeRequestProto.newBuilder().build();
-    try {
-      LeaveSafeModeResponseProto response =
-          rpcProxy.leaveSafeMode(null, proto);
-      return new LeaveSafeModeResponsePBImpl(response);
-    } catch (ServiceException e) {
-      throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
-    }
-  }
-
-  @Override
-  public GetSafeModeResponse getSafeMode(GetSafeModeRequest request)
-      throws IOException {
-    GetSafeModeRequestProto proto =
-        GetSafeModeRequestProto.newBuilder().build();
-    try {
-      GetSafeModeResponseProto response =
-          rpcProxy.getSafeMode(null, proto);
-      return new GetSafeModeResponsePBImpl(response);
-    } catch (ServiceException e) {
-      throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java
deleted file mode 100644
index 79fb3e4..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.metrics;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * JMX interface for the federation statistics.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public interface FederationMBean {
-
-  /**
-   * Get information about all the namenodes in the federation or null if
-   * failure.
-   * @return JSON with all the Namenodes.
-   */
-  String getNamenodes();
-
-  /**
-   * Get the latest info for each registered nameservice.
-   * @return JSON with all the nameservices.
-   */
-  String getNameservices();
-
-  /**
-   * Get the mount table for the federated filesystem or null if failure.
-   * @return JSON with the mount table.
-   */
-  String getMountTable();
-
-  /**
-   * Get the latest state of all routers.
-   * @return JSON with all of the known routers or null if failure.
-   */
-  String getRouters();
-
-  /**
-   * Get the total capacity of the federated cluster.
-   * @return Total capacity of the federated cluster.
-   */
-  long getTotalCapacity();
-
-  /**
-   * Get the used capacity of the federated cluster.
-   * @return Used capacity of the federated cluster.
-   */
-  long getUsedCapacity();
-
-  /**
-   * Get the remaining capacity of the federated cluster.
-   * @return Remaining capacity of the federated cluster.
-   */
-  long getRemainingCapacity();
-
-  /**
-   * Get the total remote storage capacity mounted in the federated cluster.
-   * @return Remote capacity of the federated cluster.
-   */
-  long getProvidedSpace();
-
-  /**
-   * Get the number of nameservices in the federation.
-   * @return Number of nameservices in the federation.
-   */
-  int getNumNameservices();
-
-  /**
-   * Get the number of namenodes.
-   * @return Number of namenodes.
-   */
-  int getNumNamenodes();
-
-  /**
-   * Get the number of expired namenodes.
-   * @return Number of expired namenodes.
-   */
-  int getNumExpiredNamenodes();
-
-  /**
-   * Get the number of live datanodes.
-   * @return Number of live datanodes.
-   */
-  int getNumLiveNodes();
-
-  /**
-   * Get the number of dead datanodes.
-   * @return Number of dead datanodes.
-   */
-  int getNumDeadNodes();
-
-  /**
-   * Get the number of decommissioning datanodes.
-   * @return Number of decommissioning datanodes.
-   */
-  int getNumDecommissioningNodes();
-
-  /**
-   * Get the number of live decommissioned datanodes.
-   * @return Number of live decommissioned datanodes.
-   */
-  int getNumDecomLiveNodes();
-
-  /**
-   * Get the number of dead decommissioned datanodes.
-   * @return Number of dead decommissioned datanodes.
-   */
-  int getNumDecomDeadNodes();
-
-  /**
-   * Get Max, Median, Min and Standard Deviation of DataNodes usage.
-   * @return the DataNode usage information, as a JSON string.
-   */
-  String getNodeUsage();
-
-  /**
-   * Get the number of blocks in the federation.
-   * @return Number of blocks in the federation.
-   */
-  long getNumBlocks();
-
-  /**
-   * Get the number of missing blocks in the federation.
-   * @return Number of missing blocks in the federation.
-   */
-  long getNumOfMissingBlocks();
-
-  /**
-   * Get the number of pending replication blocks in the federation.
-   * @return Number of pending replication blocks in the federation.
-   */
-  long getNumOfBlocksPendingReplication();
-
-  /**
-   * Get the number of under replicated blocks in the federation.
-   * @return Number of under replicated blocks in the federation.
-   */
-  long getNumOfBlocksUnderReplicated();
-
-  /**
-   * Get the number of pending deletion blocks in the federation.
-   * @return Number of pending deletion blocks in the federation.
-   */
-  long getNumOfBlocksPendingDeletion();
-
-  /**
-   * Get the number of files in the federation.
-   * @return Number of files in the federation.
-   */
-  long getNumFiles();
-
-  /**
-   * When the router started.
-   * @return Date as a string the router started.
-   */
-  String getRouterStarted();
-
-  /**
-   * Get the version of the router.
-   * @return Version of the router.
-   */
-  String getVersion();
-
-  /**
-   * Get the compilation date of the router.
-   * @return Compilation date of the router.
-   */
-  String getCompiledDate();
-
-  /**
-   * Get the compilation info of the router.
-   * @return Compilation info of the router.
-   */
-  String getCompileInfo();
-
-  /**
-   * Get the host and port of the router.
-   * @return Host and port of the router.
-   */
-  String getHostAndPort();
-
-  /**
-   * Get the identifier of the router.
-   * @return Identifier of the router.
-   */
-  String getRouterId();
-
-  /**
-   * Get the host and port of the router.
-   * @return Host and port of the router.
-   */
-  String getClusterId();
-
-  /**
-   * Get the host and port of the router.
-   * @return Host and port of the router.
-   */
-  String getBlockPoolId();
-
-  /**
-   * Get the current state of the router.
-   *
-   * @return String label for the current router state.
-   */
-  String getRouterStatus();
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message