hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject [01/38] hadoop git commit: HDFS-13215. RBF: Move Router to its own module.
Date Tue, 20 Mar 2018 06:26:18 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 30d9a5db2 -> 4aa34324b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
deleted file mode 100644
index fd29e37..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
+++ /dev/null
@@ -1,613 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.store.driver;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Random;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.federation.metrics.StateStoreMetrics;
-import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState;
-import org.apache.hadoop.hdfs.server.federation.router.RouterServiceState;
-import org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils;
-import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
-import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
-import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
-import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
-import org.apache.hadoop.hdfs.server.federation.store.records.Query;
-import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult;
-import org.apache.hadoop.hdfs.server.federation.store.records.RouterState;
-import org.apache.hadoop.hdfs.server.federation.store.records.StateStoreVersion;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Base tests for the driver. The particular implementations will use this to
- * test their functionality.
- */
-public class TestStateStoreDriverBase {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestStateStoreDriverBase.class);
-
-  private static StateStoreService stateStore;
-  private static Configuration conf;
-
-  private static final Random RANDOM = new Random();
-
-
-  /**
-   * Get the State Store driver.
-   * @return State Store driver.
-   */
-  protected StateStoreDriver getStateStoreDriver() {
-    return stateStore.getDriver();
-  }
-
-  @After
-  public void cleanMetrics() {
-    if (stateStore != null) {
-      StateStoreMetrics metrics = stateStore.getMetrics();
-      metrics.reset();
-    }
-  }
-
-  @AfterClass
-  public static void tearDownCluster() {
-    if (stateStore != null) {
-      stateStore.stop();
-    }
-  }
-
-  /**
-   * Get a new State Store using this configuration.
-   *
-   * @param config Configuration for the State Store.
-   * @throws Exception If we cannot get the State Store.
-   */
-  public static void getStateStore(Configuration config) throws Exception {
-    conf = config;
-    stateStore = FederationStateStoreTestUtils.newStateStore(conf);
-  }
-
-  private String generateRandomString() {
-    String randomString = "randomString-" + RANDOM.nextInt();
-    return randomString;
-  }
-
-  private long generateRandomLong() {
-    return RANDOM.nextLong();
-  }
-
-  @SuppressWarnings("rawtypes")
-  private <T extends Enum> T generateRandomEnum(Class<T> enumClass) {
-    int x = RANDOM.nextInt(enumClass.getEnumConstants().length);
-    T data = enumClass.getEnumConstants()[x];
-    return data;
-  }
-
-  @SuppressWarnings("unchecked")
-  private <T extends BaseRecord> T generateFakeRecord(Class<T> recordClass)
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-
-    if (recordClass == MembershipState.class) {
-      return (T) MembershipState.newInstance(generateRandomString(),
-          generateRandomString(), generateRandomString(),
-          generateRandomString(), generateRandomString(),
-          generateRandomString(), generateRandomString(),
-          generateRandomString(), generateRandomString(),
-          generateRandomEnum(FederationNamenodeServiceState.class), false);
-    } else if (recordClass == MountTable.class) {
-      String src = "/" + generateRandomString();
-      Map<String, String> destMap = Collections.singletonMap(
-          generateRandomString(), "/" + generateRandomString());
-      return (T) MountTable.newInstance(src, destMap);
-    } else if (recordClass == RouterState.class) {
-      RouterState routerState = RouterState.newInstance(generateRandomString(),
-          generateRandomLong(), generateRandomEnum(RouterServiceState.class));
-      StateStoreVersion version = generateFakeRecord(StateStoreVersion.class);
-      routerState.setStateStoreVersion(version);
-      return (T) routerState;
-    }
-
-    return null;
-  }
-
-  /**
-   * Validate if a record is the same.
-   *
-   * @param original Original record.
-   * @param committed Committed record.
-   * @param assertEquals Assert if the records are equal or just return.
-   * @return If the record is successfully validated.
-   */
-  private boolean validateRecord(
-      BaseRecord original, BaseRecord committed, boolean assertEquals) {
-
-    boolean ret = true;
-
-    Map<String, Class<?>> fields = getFields(original);
-    for (String key : fields.keySet()) {
-      if (key.equals("dateModified") ||
-          key.equals("dateCreated") ||
-          key.equals("proto")) {
-        // Fields are updated/set on commit and fetch and may not match
-        // the fields that are initialized in a non-committed object.
-        continue;
-      }
-      Object data1 = getField(original, key);
-      Object data2 = getField(committed, key);
-      if (assertEquals) {
-        assertEquals("Field " + key + " does not match", data1, data2);
-      } else if (!data1.equals(data2)) {
-        ret = false;
-      }
-    }
-
-    long now = stateStore.getDriver().getTime();
-    assertTrue(
-        committed.getDateCreated() <= now && committed.getDateCreated() > 0);
-    assertTrue(committed.getDateModified() >= committed.getDateCreated());
-
-    return ret;
-  }
-
-  public static void removeAll(StateStoreDriver driver) throws IOException {
-    driver.removeAll(MembershipState.class);
-    driver.removeAll(MountTable.class);
-  }
-
-  public <T extends BaseRecord> void testInsert(
-      StateStoreDriver driver, Class<T> recordClass)
-          throws IllegalArgumentException, IllegalAccessException, IOException {
-
-    assertTrue(driver.removeAll(recordClass));
-    QueryResult<T> queryResult0 = driver.get(recordClass);
-    List<T> records0 = queryResult0.getRecords();
-    assertTrue(records0.isEmpty());
-
-    // Insert single
-    BaseRecord record = generateFakeRecord(recordClass);
-    driver.put(record, true, false);
-
-    // Verify
-    QueryResult<T> queryResult1 = driver.get(recordClass);
-    List<T> records1 = queryResult1.getRecords();
-    assertEquals(1, records1.size());
-    T record0 = records1.get(0);
-    validateRecord(record, record0, true);
-
-    // Insert multiple
-    List<T> insertList = new ArrayList<>();
-    for (int i = 0; i < 10; i++) {
-      T newRecord = generateFakeRecord(recordClass);
-      insertList.add(newRecord);
-    }
-    driver.putAll(insertList, true, false);
-
-    // Verify
-    QueryResult<T> queryResult2 = driver.get(recordClass);
-    List<T> records2 = queryResult2.getRecords();
-    assertEquals(11, records2.size());
-  }
-
-  public <T extends BaseRecord> void testFetchErrors(StateStoreDriver driver,
-      Class<T> clazz) throws IllegalAccessException, IOException {
-
-    // Fetch empty list
-    driver.removeAll(clazz);
-    QueryResult<T> result0 = driver.get(clazz);
-    assertNotNull(result0);
-    List<T> records0 = result0.getRecords();
-    assertEquals(records0.size(), 0);
-
-    // Insert single
-    BaseRecord record = generateFakeRecord(clazz);
-    assertTrue(driver.put(record, true, false));
-
-    // Verify
-    QueryResult<T> result1 = driver.get(clazz);
-    List<T> records1 = result1.getRecords();
-    assertEquals(1, records1.size());
-    validateRecord(record, records1.get(0), true);
-
-    // Test fetch single object with a bad query
-    final T fakeRecord = generateFakeRecord(clazz);
-    final Query<T> query = new Query<T>(fakeRecord);
-    T getRecord = driver.get(clazz, query);
-    assertNull(getRecord);
-
-    // Test fetch multiple objects does not exist returns empty list
-    assertEquals(driver.getMultiple(clazz, query).size(), 0);
-  }
-
-  public <T extends BaseRecord> void testPut(
-      StateStoreDriver driver, Class<T> clazz)
-          throws IllegalArgumentException, ReflectiveOperationException,
-          IOException, SecurityException {
-
-    driver.removeAll(clazz);
-    QueryResult<T> records = driver.get(clazz);
-    assertTrue(records.getRecords().isEmpty());
-
-    // Insert multiple
-    List<T> insertList = new ArrayList<>();
-    for (int i = 0; i < 10; i++) {
-      T newRecord = generateFakeRecord(clazz);
-      insertList.add(newRecord);
-    }
-
-    // Verify
-    assertTrue(driver.putAll(insertList, false, true));
-    records = driver.get(clazz);
-    assertEquals(records.getRecords().size(), 10);
-
-    // Generate a new record with the same PK fields as an existing record
-    BaseRecord updatedRecord = generateFakeRecord(clazz);
-    BaseRecord existingRecord = records.getRecords().get(0);
-    Map<String, String> primaryKeys = existingRecord.getPrimaryKeys();
-    for (Entry<String, String> entry : primaryKeys.entrySet()) {
-      String key = entry.getKey();
-      String value = entry.getValue();
-      Class<?> fieldType = getFieldType(existingRecord, key);
-      Object field = fromString(value, fieldType);
-      assertTrue(setField(updatedRecord, key, field));
-    }
-
-    // Attempt an update of an existing entry, but it is not allowed.
-    assertFalse(driver.put(updatedRecord, false, true));
-
-    // Verify no update occurred, all original records are unchanged
-    QueryResult<T> newRecords = driver.get(clazz);
-    assertTrue(newRecords.getRecords().size() == 10);
-    assertEquals("A single entry was improperly updated in the store", 10,
-        countMatchingEntries(records.getRecords(), newRecords.getRecords()));
-
-    // Update the entry (allowing updates)
-    assertTrue(driver.put(updatedRecord, true, false));
-
-    // Verify that one entry no longer matches the original set
-    newRecords = driver.get(clazz);
-    assertEquals(10, newRecords.getRecords().size());
-    assertEquals(
-        "Record of type " + clazz + " not updated in the store", 9,
-        countMatchingEntries(records.getRecords(), newRecords.getRecords()));
-  }
-
-  private int countMatchingEntries(
-      Collection<? extends BaseRecord> committedList,
-      Collection<? extends BaseRecord> matchList) {
-
-    int matchingCount = 0;
-    for (BaseRecord committed : committedList) {
-      for (BaseRecord match : matchList) {
-        try {
-          if (match.getPrimaryKey().equals(committed.getPrimaryKey())) {
-            if (validateRecord(match, committed, false)) {
-              matchingCount++;
-            }
-            break;
-          }
-        } catch (Exception ex) {
-        }
-      }
-    }
-    return matchingCount;
-  }
-
-  public <T extends BaseRecord> void testRemove(
-      StateStoreDriver driver, Class<T> clazz)
-          throws IllegalArgumentException, IllegalAccessException, IOException {
-
-    // Remove all
-    assertTrue(driver.removeAll(clazz));
-    QueryResult<T> records = driver.get(clazz);
-    assertTrue(records.getRecords().isEmpty());
-
-    // Insert multiple
-    List<T> insertList = new ArrayList<>();
-    for (int i = 0; i < 10; i++) {
-      T newRecord = generateFakeRecord(clazz);
-      insertList.add(newRecord);
-    }
-
-    // Verify
-    assertTrue(driver.putAll(insertList, false, true));
-    records = driver.get(clazz);
-    assertEquals(records.getRecords().size(), 10);
-
-    // Remove Single
-    assertTrue(driver.remove(records.getRecords().get(0)));
-
-    // Verify
-    records = driver.get(clazz);
-    assertEquals(records.getRecords().size(), 9);
-
-    // Remove with filter
-    final T firstRecord = records.getRecords().get(0);
-    final Query<T> query0 = new Query<T>(firstRecord);
-    assertTrue(driver.remove(clazz, query0) > 0);
-
-    final T secondRecord = records.getRecords().get(1);
-    final Query<T> query1 = new Query<T>(secondRecord);
-    assertTrue(driver.remove(clazz, query1) > 0);
-
-    // Verify
-    records = driver.get(clazz);
-    assertEquals(records.getRecords().size(), 7);
-
-    // Remove all
-    assertTrue(driver.removeAll(clazz));
-
-    // Verify
-    records = driver.get(clazz);
-    assertTrue(records.getRecords().isEmpty());
-  }
-
-  public void testInsert(StateStoreDriver driver)
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testInsert(driver, MembershipState.class);
-    testInsert(driver, MountTable.class);
-  }
-
-  public void testPut(StateStoreDriver driver)
-      throws IllegalArgumentException, ReflectiveOperationException,
-      IOException, SecurityException {
-    testPut(driver, MembershipState.class);
-    testPut(driver, MountTable.class);
-  }
-
-  public void testRemove(StateStoreDriver driver)
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testRemove(driver, MembershipState.class);
-    testRemove(driver, MountTable.class);
-  }
-
-  public void testFetchErrors(StateStoreDriver driver)
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testFetchErrors(driver, MembershipState.class);
-    testFetchErrors(driver, MountTable.class);
-  }
-
-  public void testMetrics(StateStoreDriver driver)
-      throws IOException, IllegalArgumentException, IllegalAccessException {
-
-    MountTable insertRecord =
-        this.generateFakeRecord(MountTable.class);
-
-    // Put single
-    StateStoreMetrics metrics = stateStore.getMetrics();
-    assertEquals(0, metrics.getWriteOps());
-    driver.put(insertRecord, true, false);
-    assertEquals(1, metrics.getWriteOps());
-
-    // Put multiple
-    metrics.reset();
-    assertEquals(0, metrics.getWriteOps());
-    driver.put(insertRecord, true, false);
-    assertEquals(1, metrics.getWriteOps());
-
-    // Get Single
-    metrics.reset();
-    assertEquals(0, metrics.getReadOps());
-
-    final String querySourcePath = insertRecord.getSourcePath();
-    MountTable partial = MountTable.newInstance();
-    partial.setSourcePath(querySourcePath);
-    final Query<MountTable> query = new Query<>(partial);
-    driver.get(MountTable.class, query);
-    assertEquals(1, metrics.getReadOps());
-
-    // GetAll
-    metrics.reset();
-    assertEquals(0, metrics.getReadOps());
-    driver.get(MountTable.class);
-    assertEquals(1, metrics.getReadOps());
-
-    // GetMultiple
-    metrics.reset();
-    assertEquals(0, metrics.getReadOps());
-    driver.getMultiple(MountTable.class, query);
-    assertEquals(1, metrics.getReadOps());
-
-    // Insert fails
-    metrics.reset();
-    assertEquals(0, metrics.getFailureOps());
-    driver.put(insertRecord, false, true);
-    assertEquals(1, metrics.getFailureOps());
-
-    // Remove single
-    metrics.reset();
-    assertEquals(0, metrics.getRemoveOps());
-    driver.remove(insertRecord);
-    assertEquals(1, metrics.getRemoveOps());
-
-    // Remove multiple
-    metrics.reset();
-    driver.put(insertRecord, true, false);
-    assertEquals(0, metrics.getRemoveOps());
-    driver.remove(MountTable.class, query);
-    assertEquals(1, metrics.getRemoveOps());
-
-    // Remove all
-    metrics.reset();
-    driver.put(insertRecord, true, false);
-    assertEquals(0, metrics.getRemoveOps());
-    driver.removeAll(MountTable.class);
-    assertEquals(1, metrics.getRemoveOps());
-  }
-
-  /**
-   * Sets the value of a field on the object.
-   *
-   * @param fieldName The string name of the field.
-   * @param data The data to pass to the field's setter.
-   *
-   * @return True if successful, fails if failed.
-   */
-  private static boolean setField(
-      BaseRecord record, String fieldName, Object data) {
-
-    Method m = locateSetter(record, fieldName);
-    if (m != null) {
-      try {
-        m.invoke(record, data);
-      } catch (Exception e) {
-        LOG.error("Cannot set field " + fieldName + " on object "
-            + record.getClass().getName() + " to data " + data + " of type "
-            + data.getClass(), e);
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Finds the appropriate setter for a field name.
-   *
-   * @param fieldName The legacy name of the field.
-   * @return The matching setter or null if not found.
-   */
-  private static Method locateSetter(BaseRecord record, String fieldName) {
-    for (Method m : record.getClass().getMethods()) {
-      if (m.getName().equalsIgnoreCase("set" + fieldName)) {
-        return m;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Returns all serializable fields in the object.
-   *
-   * @return Map with the fields.
-   */
-  private static Map<String, Class<?>> getFields(BaseRecord record) {
-    Map<String, Class<?>> getters = new HashMap<>();
-    for (Method m : record.getClass().getDeclaredMethods()) {
-      if (m.getName().startsWith("get")) {
-        try {
-          Class<?> type = m.getReturnType();
-          char[] c = m.getName().substring(3).toCharArray();
-          c[0] = Character.toLowerCase(c[0]);
-          String key = new String(c);
-          getters.put(key, type);
-        } catch (Exception e) {
-          LOG.error("Cannot execute getter " + m.getName()
-              + " on object " + record);
-        }
-      }
-    }
-    return getters;
-  }
-
-  /**
-   * Get the type of a field.
-   *
-   * @param fieldName
-   * @return Field type
-   */
-  private static Class<?> getFieldType(BaseRecord record, String fieldName) {
-    Method m = locateGetter(record, fieldName);
-    return m.getReturnType();
-  }
-
-  /**
-   * Fetches the value for a field name.
-   *
-   * @param fieldName the legacy name of the field.
-   * @return The field data or null if not found.
-   */
-  private static Object getField(BaseRecord record, String fieldName) {
-    Object result = null;
-    Method m = locateGetter(record, fieldName);
-    if (m != null) {
-      try {
-        result = m.invoke(record);
-      } catch (Exception e) {
-        LOG.error("Cannot get field " + fieldName + " on object " + record);
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Finds the appropriate getter for a field name.
-   *
-   * @param fieldName The legacy name of the field.
-   * @return The matching getter or null if not found.
-   */
-  private static Method locateGetter(BaseRecord record, String fieldName) {
-    for (Method m : record.getClass().getMethods()) {
-      if (m.getName().equalsIgnoreCase("get" + fieldName)) {
-        return m;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Expands a data object from the store into an record object. Default store
-   * data type is a String. Override if additional serialization is required.
-   *
-   * @param data Object containing the serialized data. Only string is
-   *          supported.
-   * @param clazz Target object class to hold the deserialized data.
-   * @return An instance of the target data object initialized with the
-   *         deserialized data.
-   */
-  @Deprecated
-  @SuppressWarnings({ "unchecked", "rawtypes" })
-  private static <T> T fromString(String data, Class<T> clazz) {
-
-    if (data.equals("null")) {
-      return null;
-    } else if (clazz == String.class) {
-      return (T) data;
-    } else if (clazz == Long.class || clazz == long.class) {
-      return (T) Long.valueOf(data);
-    } else if (clazz == Integer.class || clazz == int.class) {
-      return (T) Integer.valueOf(data);
-    } else if (clazz == Double.class || clazz == double.class) {
-      return (T) Double.valueOf(data);
-    } else if (clazz == Float.class || clazz == float.class) {
-      return (T) Float.valueOf(data);
-    } else if (clazz == Boolean.class || clazz == boolean.class) {
-      return (T) Boolean.valueOf(data);
-    } else if (clazz.isEnum()) {
-      return (T) Enum.valueOf((Class<Enum>) clazz, data);
-    }
-    return null;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFile.java
deleted file mode 100644
index a8a9020..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFile.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.store.driver;
-
-import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Test the FileSystem (e.g., HDFS) implementation of the State Store driver.
- */
-public class TestStateStoreFile extends TestStateStoreDriverBase {
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    Configuration conf = getStateStoreConfiguration(StateStoreFileImpl.class);
-    getStateStore(conf);
-  }
-
-  @Before
-  public void startup() throws IOException {
-    removeAll(getStateStoreDriver());
-  }
-
-  @Test
-  public void testInsert()
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testInsert(getStateStoreDriver());
-  }
-
-  @Test
-  public void testUpdate()
-      throws IllegalArgumentException, ReflectiveOperationException,
-      IOException, SecurityException {
-    testPut(getStateStoreDriver());
-  }
-
-  @Test
-  public void testDelete()
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testRemove(getStateStoreDriver());
-  }
-
-  @Test
-  public void testFetchErrors()
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testFetchErrors(getStateStoreDriver());
-  }
-
-  @Test
-  public void testMetrics()
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testMetrics(getStateStoreDriver());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFileBase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFileBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFileBase.java
deleted file mode 100644
index 9adfe33..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFileBase.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.store.driver;
-
-import static org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileBaseImpl.isOldTempRecord;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.util.Time;
-import org.junit.Test;
-
-/**
- * Tests for the State Store file based implementation.
- */
-public class TestStateStoreFileBase {
-
-  @Test
-  public void testTempOld() {
-    assertFalse(isOldTempRecord("test.txt"));
-    assertFalse(isOldTempRecord("testfolder/test.txt"));
-
-    long tnow = Time.now();
-    String tmpFile1 = "test." + tnow + ".tmp";
-    assertFalse(isOldTempRecord(tmpFile1));
-
-    long told = Time.now() - TimeUnit.MINUTES.toMillis(1);
-    String tmpFile2 = "test." + told + ".tmp";
-    assertTrue(isOldTempRecord(tmpFile2));
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFileSystem.java
deleted file mode 100644
index 8c4b188..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFileSystem.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.store.driver;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils;
-import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileSystemImpl;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Test the FileSystem (e.g., HDFS) implementation of the State Store driver.
- */
-public class TestStateStoreFileSystem extends TestStateStoreDriverBase {
-
-  private static MiniDFSCluster dfsCluster;
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    Configuration conf = FederationStateStoreTestUtils
-        .getStateStoreConfiguration(StateStoreFileSystemImpl.class);
-    conf.set(StateStoreFileSystemImpl.FEDERATION_STORE_FS_PATH,
-        "/hdfs-federation/");
-
-    // Create HDFS cluster to back the state tore
-    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
-    builder.numDataNodes(1);
-    dfsCluster = builder.build();
-    dfsCluster.waitClusterUp();
-    getStateStore(conf);
-  }
-
-  @AfterClass
-  public static void tearDownCluster() {
-    if (dfsCluster != null) {
-      dfsCluster.shutdown();
-    }
-  }
-
-  @Before
-  public void startup() throws IOException {
-    removeAll(getStateStoreDriver());
-  }
-
-  @Test
-  public void testInsert()
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testInsert(getStateStoreDriver());
-  }
-
-  @Test
-  public void testUpdate() throws IllegalArgumentException, IOException,
-      SecurityException, ReflectiveOperationException {
-    testPut(getStateStoreDriver());
-  }
-
-  @Test
-  public void testDelete()
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testRemove(getStateStoreDriver());
-  }
-
-  @Test
-  public void testFetchErrors()
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testFetchErrors(getStateStoreDriver());
-  }
-
-  @Test
-  public void testMetrics()
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testMetrics(getStateStoreDriver());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreZK.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreZK.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreZK.java
deleted file mode 100644
index 36353ff..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreZK.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.store.driver;
-
-import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
-
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.RetryNTimes;
-import org.apache.curator.test.TestingServer;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Test the ZooKeeper implementation of the State Store driver.
- */
-public class TestStateStoreZK extends TestStateStoreDriverBase {
-
-  private static TestingServer curatorTestingServer;
-  private static CuratorFramework curatorFramework;
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    curatorTestingServer = new TestingServer();
-    curatorTestingServer.start();
-    String connectString = curatorTestingServer.getConnectString();
-    curatorFramework = CuratorFrameworkFactory.builder()
-        .connectString(connectString)
-        .retryPolicy(new RetryNTimes(100, 100))
-        .build();
-    curatorFramework.start();
-
-    // Create the ZK State Store
-    Configuration conf =
-        getStateStoreConfiguration(StateStoreZooKeeperImpl.class);
-    conf.set(CommonConfigurationKeys.ZK_ADDRESS, connectString);
-    // Disable auto-repair of connection
-    conf.setLong(DFSConfigKeys.FEDERATION_STORE_CONNECTION_TEST_MS,
-        TimeUnit.HOURS.toMillis(1));
-    getStateStore(conf);
-  }
-
-  @AfterClass
-  public static void tearDownCluster() {
-    curatorFramework.close();
-    try {
-      curatorTestingServer.stop();
-    } catch (IOException e) {
-    }
-  }
-
-  @Before
-  public void startup() throws IOException {
-    removeAll(getStateStoreDriver());
-  }
-
-  @Test
-  public void testInsert()
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testInsert(getStateStoreDriver());
-  }
-
-  @Test
-  public void testUpdate()
-      throws IllegalArgumentException, ReflectiveOperationException,
-      IOException, SecurityException {
-    testPut(getStateStoreDriver());
-  }
-
-  @Test
-  public void testDelete()
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testRemove(getStateStoreDriver());
-  }
-
-  @Test
-  public void testFetchErrors()
-      throws IllegalArgumentException, IllegalAccessException, IOException {
-    testFetchErrors(getStateStoreDriver());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java
deleted file mode 100644
index d922414..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMembershipState.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.store.records;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState;
-import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
-import org.junit.Test;
-
-/**
- * Test the Membership State records.
- */
-public class TestMembershipState {
-
-  private static final String ROUTER = "router";
-  private static final String NAMESERVICE = "nameservice";
-  private static final String NAMENODE = "namenode";
-  private static final String CLUSTER_ID = "cluster";
-  private static final String BLOCKPOOL_ID = "blockpool";
-  private static final String RPC_ADDRESS = "rpcaddress";
-  private static final String SERVICE_ADDRESS = "serviceaddress";
-  private static final String LIFELINE_ADDRESS = "lifelineaddress";
-  private static final String WEB_ADDRESS = "webaddress";
-  private static final boolean SAFE_MODE = false;
-
-  private static final long DATE_CREATED = 100;
-  private static final long DATE_MODIFIED = 200;
-
-  private static final long NUM_BLOCKS = 300;
-  private static final long NUM_FILES = 400;
-  private static final int NUM_DEAD = 500;
-  private static final int NUM_ACTIVE = 600;
-  private static final int NUM_DECOM = 700;
-  private static final int NUM_DECOM_ACTIVE = 800;
-  private static final int NUM_DECOM_DEAD = 900;
-  private static final long NUM_BLOCK_MISSING = 1000;
-
-  private static final long TOTAL_SPACE = 1100;
-  private static final long AVAILABLE_SPACE = 1200;
-
-  private static final FederationNamenodeServiceState STATE =
-      FederationNamenodeServiceState.ACTIVE;
-
-  private MembershipState createRecord() throws IOException {
-
-    MembershipState record = MembershipState.newInstance(
-        ROUTER, NAMESERVICE, NAMENODE, CLUSTER_ID,
-        BLOCKPOOL_ID, RPC_ADDRESS, SERVICE_ADDRESS, LIFELINE_ADDRESS,
-        WEB_ADDRESS, STATE, SAFE_MODE);
-    record.setDateCreated(DATE_CREATED);
-    record.setDateModified(DATE_MODIFIED);
-
-    MembershipStats stats = MembershipStats.newInstance();
-    stats.setNumOfBlocks(NUM_BLOCKS);
-    stats.setNumOfFiles(NUM_FILES);
-    stats.setNumOfActiveDatanodes(NUM_ACTIVE);
-    stats.setNumOfDeadDatanodes(NUM_DEAD);
-    stats.setNumOfDecommissioningDatanodes(NUM_DECOM);
-    stats.setNumOfDecomActiveDatanodes(NUM_DECOM_ACTIVE);
-    stats.setNumOfDecomDeadDatanodes(NUM_DECOM_DEAD);
-    stats.setNumOfBlocksMissing(NUM_BLOCK_MISSING);
-    stats.setTotalSpace(TOTAL_SPACE);
-    stats.setAvailableSpace(AVAILABLE_SPACE);
-    record.setStats(stats);
-    return record;
-  }
-
-  private void validateRecord(MembershipState record) throws IOException {
-
-    assertEquals(ROUTER, record.getRouterId());
-    assertEquals(NAMESERVICE, record.getNameserviceId());
-    assertEquals(CLUSTER_ID, record.getClusterId());
-    assertEquals(BLOCKPOOL_ID, record.getBlockPoolId());
-    assertEquals(RPC_ADDRESS, record.getRpcAddress());
-    assertEquals(WEB_ADDRESS, record.getWebAddress());
-    assertEquals(STATE, record.getState());
-    assertEquals(SAFE_MODE, record.getIsSafeMode());
-    assertEquals(DATE_CREATED, record.getDateCreated());
-    assertEquals(DATE_MODIFIED, record.getDateModified());
-
-    MembershipStats stats = record.getStats();
-    assertEquals(NUM_BLOCKS, stats.getNumOfBlocks());
-    assertEquals(NUM_FILES, stats.getNumOfFiles());
-    assertEquals(NUM_ACTIVE, stats.getNumOfActiveDatanodes());
-    assertEquals(NUM_DEAD, stats.getNumOfDeadDatanodes());
-    assertEquals(NUM_DECOM, stats.getNumOfDecommissioningDatanodes());
-    assertEquals(NUM_DECOM_ACTIVE, stats.getNumOfDecomActiveDatanodes());
-    assertEquals(NUM_DECOM_DEAD, stats.getNumOfDecomDeadDatanodes());
-    assertEquals(TOTAL_SPACE, stats.getTotalSpace());
-    assertEquals(AVAILABLE_SPACE, stats.getAvailableSpace());
-  }
-
-  @Test
-  public void testGetterSetter() throws IOException {
-    MembershipState record = createRecord();
-    validateRecord(record);
-  }
-
-  @Test
-  public void testSerialization() throws IOException {
-
-    MembershipState record = createRecord();
-
-    StateStoreSerializer serializer = StateStoreSerializer.getSerializer();
-    String serializedString = serializer.serializeString(record);
-    MembershipState newRecord =
-        serializer.deserialize(serializedString, MembershipState.class);
-
-    validateRecord(newRecord);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
deleted file mode 100644
index d5fb9ba..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
+++ /dev/null
@@ -1,259 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.store.records;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
-import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
-import org.apache.hadoop.hdfs.server.federation.router.RouterQuotaUsage;
-import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
-
-/**
- * Test the Mount Table entry in the State Store.
- */
-public class TestMountTable {
-
-  private static final String SRC = "/test";
-  private static final String DST_NS_0 = "ns0";
-  private static final String DST_NS_1 = "ns1";
-  private static final String DST_PATH_0 = "/path1";
-  private static final String DST_PATH_1 = "/path/path2";
-  private static final List<RemoteLocation> DST = new LinkedList<>();
-  static {
-    DST.add(new RemoteLocation(DST_NS_0, DST_PATH_0));
-    DST.add(new RemoteLocation(DST_NS_1, DST_PATH_1));
-  }
-  private static final Map<String, String> DST_MAP = new LinkedHashMap<>();
-  static {
-    DST_MAP.put(DST_NS_0, DST_PATH_0);
-    DST_MAP.put(DST_NS_1, DST_PATH_1);
-  }
-
-  private static final long DATE_CREATED = 100;
-  private static final long DATE_MOD = 200;
-
-  private static final long NS_COUNT = 1;
-  private static final long NS_QUOTA = 5;
-  private static final long SS_COUNT = 10;
-  private static final long SS_QUOTA = 100;
-
-  private static final RouterQuotaUsage QUOTA = new RouterQuotaUsage.Builder()
-      .fileAndDirectoryCount(NS_COUNT).quota(NS_QUOTA).spaceConsumed(SS_COUNT)
-      .spaceQuota(SS_QUOTA).build();
-
-  @Test
-  public void testGetterSetter() throws IOException {
-
-    MountTable record = MountTable.newInstance(SRC, DST_MAP);
-
-    validateDestinations(record);
-    assertEquals(SRC, record.getSourcePath());
-    assertEquals(DST, record.getDestinations());
-    assertTrue(DATE_CREATED > 0);
-    assertTrue(DATE_MOD > 0);
-
-    RouterQuotaUsage quota = record.getQuota();
-    assertEquals(0, quota.getFileAndDirectoryCount());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quota.getQuota());
-    assertEquals(0, quota.getSpaceConsumed());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quota.getSpaceQuota());
-
-    MountTable record2 =
-        MountTable.newInstance(SRC, DST_MAP, DATE_CREATED, DATE_MOD);
-
-    validateDestinations(record2);
-    assertEquals(SRC, record2.getSourcePath());
-    assertEquals(DST, record2.getDestinations());
-    assertEquals(DATE_CREATED, record2.getDateCreated());
-    assertEquals(DATE_MOD, record2.getDateModified());
-    assertFalse(record.isReadOnly());
-    assertEquals(DestinationOrder.HASH, record.getDestOrder());
-  }
-
-  @Test
-  public void testSerialization() throws IOException {
-    testSerialization(DestinationOrder.RANDOM);
-    testSerialization(DestinationOrder.HASH);
-    testSerialization(DestinationOrder.LOCAL);
-  }
-
-  private void testSerialization(final DestinationOrder order)
-      throws IOException {
-
-    MountTable record = MountTable.newInstance(
-        SRC, DST_MAP, DATE_CREATED, DATE_MOD);
-    record.setReadOnly(true);
-    record.setDestOrder(order);
-    record.setQuota(QUOTA);
-
-    StateStoreSerializer serializer = StateStoreSerializer.getSerializer();
-    String serializedString = serializer.serializeString(record);
-    MountTable record2 =
-        serializer.deserialize(serializedString, MountTable.class);
-
-    validateDestinations(record2);
-    assertEquals(SRC, record2.getSourcePath());
-    assertEquals(DST, record2.getDestinations());
-    assertEquals(DATE_CREATED, record2.getDateCreated());
-    assertEquals(DATE_MOD, record2.getDateModified());
-    assertTrue(record2.isReadOnly());
-    assertEquals(order, record2.getDestOrder());
-
-    RouterQuotaUsage quotaGet = record2.getQuota();
-    assertEquals(NS_COUNT, quotaGet.getFileAndDirectoryCount());
-    assertEquals(NS_QUOTA, quotaGet.getQuota());
-    assertEquals(SS_COUNT, quotaGet.getSpaceConsumed());
-    assertEquals(SS_QUOTA, quotaGet.getSpaceQuota());
-  }
-
-  @Test
-  public void testReadOnly() throws IOException {
-
-    Map<String, String> dest = new LinkedHashMap<>();
-    dest.put(DST_NS_0, DST_PATH_0);
-    dest.put(DST_NS_1, DST_PATH_1);
-    MountTable record1 = MountTable.newInstance(SRC, dest);
-    record1.setReadOnly(true);
-
-    validateDestinations(record1);
-    assertEquals(SRC, record1.getSourcePath());
-    assertEquals(DST, record1.getDestinations());
-    assertTrue(DATE_CREATED > 0);
-    assertTrue(DATE_MOD > 0);
-    assertTrue(record1.isReadOnly());
-
-    MountTable record2 = MountTable.newInstance(
-        SRC, DST_MAP, DATE_CREATED, DATE_MOD);
-    record2.setReadOnly(true);
-
-    validateDestinations(record2);
-    assertEquals(SRC, record2.getSourcePath());
-    assertEquals(DST, record2.getDestinations());
-    assertEquals(DATE_CREATED, record2.getDateCreated());
-    assertEquals(DATE_MOD, record2.getDateModified());
-    assertTrue(record2.isReadOnly());
-  }
-
-  @Test
-  public void testOrder() throws IOException {
-    testOrder(DestinationOrder.HASH);
-    testOrder(DestinationOrder.LOCAL);
-    testOrder(DestinationOrder.RANDOM);
-  }
-
-  private void testOrder(final DestinationOrder order)
-      throws IOException {
-
-    MountTable record = MountTable.newInstance(
-        SRC, DST_MAP, DATE_CREATED, DATE_MOD);
-    record.setDestOrder(order);
-
-    validateDestinations(record);
-    assertEquals(SRC, record.getSourcePath());
-    assertEquals(DST, record.getDestinations());
-    assertEquals(DATE_CREATED, record.getDateCreated());
-    assertEquals(DATE_MOD, record.getDateModified());
-    assertEquals(order, record.getDestOrder());
-  }
-
-  private void validateDestinations(MountTable record) {
-
-    assertEquals(SRC, record.getSourcePath());
-    assertEquals(2, record.getDestinations().size());
-
-    RemoteLocation location1 = record.getDestinations().get(0);
-    assertEquals(DST_NS_0, location1.getNameserviceId());
-    assertEquals(DST_PATH_0, location1.getDest());
-
-    RemoteLocation location2 = record.getDestinations().get(1);
-    assertEquals(DST_NS_1, location2.getNameserviceId());
-    assertEquals(DST_PATH_1, location2.getDest());
-  }
-
-  @Test
-  public void testQuota() throws IOException {
-    MountTable record = MountTable.newInstance(SRC, DST_MAP);
-    record.setQuota(QUOTA);
-
-    validateDestinations(record);
-    assertEquals(SRC, record.getSourcePath());
-    assertEquals(DST, record.getDestinations());
-    assertTrue(DATE_CREATED > 0);
-    assertTrue(DATE_MOD > 0);
-
-    RouterQuotaUsage quotaGet = record.getQuota();
-    assertEquals(NS_COUNT, quotaGet.getFileAndDirectoryCount());
-    assertEquals(NS_QUOTA, quotaGet.getQuota());
-    assertEquals(SS_COUNT, quotaGet.getSpaceConsumed());
-    assertEquals(SS_QUOTA, quotaGet.getSpaceQuota());
-  }
-
-  @Test
-  public void testValidation() throws IOException {
-    Map<String, String> destinations = new HashMap<>();
-    destinations.put("ns0", "/testValidate-dest");
-    try {
-      MountTable.newInstance("testValidate", destinations);
-      fail("Mount table entry should be created failed.");
-    } catch (Exception e) {
-      GenericTestUtils.assertExceptionContains(
-          MountTable.ERROR_MSG_MUST_START_WITH_BACK_SLASH, e);
-    }
-
-    destinations.clear();
-    destinations.put("ns0", "testValidate-dest");
-    try {
-      MountTable.newInstance("/testValidate", destinations);
-      fail("Mount table entry should be created failed.");
-    } catch (Exception e) {
-      GenericTestUtils.assertExceptionContains(
-          MountTable.ERROR_MSG_ALL_DEST_MUST_START_WITH_BACK_SLASH, e);
-    }
-
-    destinations.clear();
-    destinations.put("", "/testValidate-dest");
-    try {
-      MountTable.newInstance("/testValidate", destinations);
-      fail("Mount table entry should be created failed.");
-    } catch (Exception e) {
-      GenericTestUtils.assertExceptionContains(
-          MountTable.ERROR_MSG_INVAILD_DEST_NS, e);
-    }
-
-    destinations.clear();
-    destinations.put("ns0", "/testValidate-dest");
-    MountTable record = MountTable.newInstance("/testValidate", destinations);
-    assertNotNull(record);
-
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestRouterState.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestRouterState.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestRouterState.java
deleted file mode 100644
index dfe2bc9..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestRouterState.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.store.records;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hdfs.server.federation.router.RouterServiceState;
-import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
-import org.junit.Test;
-
-/**
- * Test the Router State records.
- */
-public class TestRouterState {
-
-  private static final String ADDRESS = "address";
-  private static final String VERSION = "version";
-  private static final String COMPILE_INFO = "compileInfo";
-  private static final long START_TIME = 100;
-  private static final long DATE_MODIFIED = 200;
-  private static final long DATE_CREATED = 300;
-  private static final long FILE_RESOLVER_VERSION = 500;
-  private static final RouterServiceState STATE = RouterServiceState.RUNNING;
-
-
-  private RouterState generateRecord() throws IOException {
-    RouterState record = RouterState.newInstance(ADDRESS, START_TIME, STATE);
-    record.setVersion(VERSION);
-    record.setCompileInfo(COMPILE_INFO);
-    record.setDateCreated(DATE_CREATED);
-    record.setDateModified(DATE_MODIFIED);
-
-    StateStoreVersion version = StateStoreVersion.newInstance();
-    version.setMountTableVersion(FILE_RESOLVER_VERSION);
-    record.setStateStoreVersion(version);
-    return record;
-  }
-
-  private void validateRecord(RouterState record) throws IOException {
-    assertEquals(ADDRESS, record.getAddress());
-    assertEquals(START_TIME, record.getDateStarted());
-    assertEquals(STATE, record.getStatus());
-    assertEquals(COMPILE_INFO, record.getCompileInfo());
-    assertEquals(VERSION, record.getVersion());
-
-    StateStoreVersion version = record.getStateStoreVersion();
-    assertEquals(FILE_RESOLVER_VERSION, version.getMountTableVersion());
-  }
-
-  @Test
-  public void testGetterSetter() throws IOException {
-    RouterState record = generateRecord();
-    validateRecord(record);
-  }
-
-  @Test
-  public void testSerialization() throws IOException {
-
-    RouterState record = generateRecord();
-
-    StateStoreSerializer serializer = StateStoreSerializer.getSerializer();
-    String serializedString = serializer.serializeString(record);
-    RouterState newRecord =
-        serializer.deserialize(serializedString, RouterState.class);
-
-    validateRecord(newRecord);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml
index 7f3bcca..18c2d17 100644
--- a/hadoop-hdfs-project/pom.xml
+++ b/hadoop-hdfs-project/pom.xml
@@ -37,6 +37,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <module>hadoop-hdfs-httpfs</module>
     <module>hadoop-hdfs/src/contrib/bkjournal</module>
     <module>hadoop-hdfs-nfs</module>
+    <module>hadoop-hdfs-rbf</module>
   </modules>
 
   <build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index f781004..c16924b 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -206,6 +206,11 @@
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdfs-rbf</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-hdfs</artifactId>
         <version>${project.version}</version>
         <type>test-jar</type>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-project/src/site/site.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 232aa78..f925d2e 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -100,7 +100,7 @@
       <item name="Memory Storage Support" href="hadoop-project-dist/hadoop-hdfs/MemoryStorage.html"/>
       <item name="Upgrade Domain" href="hadoop-project-dist/hadoop-hdfs/HdfsUpgradeDomain.html"/>
       <item name="DataNode Admin" href="hadoop-project-dist/hadoop-hdfs/HdfsDataNodeAdminGuide.html"/>
-      <item name="Router Federation" href="hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.html"/>
+      <item name="Router Federation" href="hadoop-project-dist/hadoop-hdfs-rbf/HDFSRouterFederation.html"/>
     </menu>
 
     <menu name="MapReduce" inherit="top">
@@ -186,6 +186,7 @@
     <menu name="Configuration" inherit="top">
       <item name="core-default.xml" href="hadoop-project-dist/hadoop-common/core-default.xml"/>
       <item name="hdfs-default.xml" href="hadoop-project-dist/hadoop-hdfs/hdfs-default.xml"/>
+      <item name="hdfs-rbf-default.xml" href="hadoop-project-dist/hadoop-hdfs-rbf/hdfs-rbf-default.xml"/>
       <item name="mapred-default.xml" href="hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml"/>
       <item name="yarn-default.xml" href="hadoop-yarn/hadoop-yarn-common/yarn-default.xml"/>
       <item name="Deprecated Properties" href="hadoop-project-dist/hadoop-common/DeprecatedProperties.html"/>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message