hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From min...@apache.org
Subject [1/2] hadoop git commit: HDFS-9005. Provide configuration support for upgrade domain.
Date Tue, 02 May 2017 13:53:49 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 c9bf21b0f -> c4c553321


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4c55332/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
new file mode 100644
index 0000000..cd5ae95
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.util;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.HostFileManager;
+
+import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+
+import static org.junit.Assert.assertTrue;
+
+public class HostsFileWriter {
+  private FileSystem localFileSys;
+  private Path fullDir;
+  private Path excludeFile;
+  private Path includeFile;
+  private Path combinedFile;
+  private boolean isLegacyHostsFile = false;
+
+  public void initialize(Configuration conf, String dir) throws IOException {
+    localFileSys = FileSystem.getLocal(conf);
+    Path workingDir = new Path(MiniDFSCluster.getBaseDirectory());
+    this.fullDir = new Path(workingDir, dir);
+    assertTrue(localFileSys.mkdirs(this.fullDir));
+
+    if (conf.getClass(
+        DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
+            HostFileManager.class, HostConfigManager.class).equals(
+                HostFileManager.class)) {
+      isLegacyHostsFile = true;
+    }
+    if (isLegacyHostsFile) {
+      excludeFile = new Path(fullDir, "exclude");
+      includeFile = new Path(fullDir, "include");
+      DFSTestUtil.writeFile(localFileSys, excludeFile, "");
+      DFSTestUtil.writeFile(localFileSys, includeFile, "");
+      conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
+      conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
+    } else {
+      combinedFile = new Path(fullDir, "all");
+      conf.set(DFSConfigKeys.DFS_HOSTS, combinedFile.toString());
+    }
+  }
+
+  public void initExcludeHost(String hostNameAndPort) throws IOException {
+    if (isLegacyHostsFile) {
+      DFSTestUtil.writeFile(localFileSys, excludeFile, hostNameAndPort);
+    } else {
+      DatanodeAdminProperties dn = new DatanodeAdminProperties();
+      String [] hostAndPort = hostNameAndPort.split(":");
+      dn.setHostName(hostAndPort[0]);
+      dn.setPort(Integer.parseInt(hostAndPort[1]));
+      dn.setAdminState(AdminStates.DECOMMISSIONED);
+      HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
+      allDNs.add(dn);
+      CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
+    }
+  }
+
+  public void initIncludeHosts(String[] hostNameAndPorts) throws IOException {
+    StringBuilder includeHosts = new StringBuilder();
+    if (isLegacyHostsFile) {
+      for(String hostNameAndPort : hostNameAndPorts) {
+        includeHosts.append(hostNameAndPort).append("\n");
+      }
+      DFSTestUtil.writeFile(localFileSys, includeFile,
+          includeHosts.toString());
+    } else {
+      HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
+      for(String hostNameAndPort : hostNameAndPorts) {
+        String[] hostAndPort = hostNameAndPort.split(":");
+        DatanodeAdminProperties dn = new DatanodeAdminProperties();
+        dn.setHostName(hostAndPort[0]);
+        dn.setPort(Integer.parseInt(hostAndPort[1]));
+        allDNs.add(dn);
+      }
+      CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
+    }
+  }
+
+  public void initIncludeHosts(DatanodeAdminProperties[] datanodes)
+      throws IOException {
+    CombinedHostsFileWriter.writeFile(combinedFile.toString(),
+        new HashSet<>(Arrays.asList(datanodes)));
+  }
+
+  public void cleanup() throws IOException {
+    if (localFileSys.exists(fullDir)) {
+      FileUtils.deleteQuietly(new File(fullDir.toUri().getPath()));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4c55332/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
new file mode 100644
index 0000000..c3946e4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import java.io.File;
+import java.io.FileWriter;
+
+import java.util.Set;
+
+import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
+import org.junit.Before;
+import org.junit.After;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/*
+ * Test for JSON based HostsFileReader
+ */
+public class TestCombinedHostsFileReader {
+
+  // Using /test/build/data/tmp directory to store temporary files
+  static final String HOSTS_TEST_DIR = new File(System.getProperty(
+      "test.build.data", "/tmp")).getAbsolutePath();
+  File NEW_FILE = new File(HOSTS_TEST_DIR, "dfs.hosts.new.json");
+
+  static final String TEST_CACHE_DATA_DIR =
+      System.getProperty("test.cache.data", "build/test/cache");
+  File EXISTING_FILE = new File(TEST_CACHE_DATA_DIR, "dfs.hosts.json");
+
+  @Before
+  public void setUp() throws Exception {
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    // Delete test file after running tests
+    NEW_FILE.delete();
+
+  }
+
+  /*
+   * Load the existing test json file
+   */
+  @Test
+  public void testLoadExistingJsonFile() throws Exception {
+    Set<DatanodeAdminProperties> all =
+        CombinedHostsFileReader.readFile(EXISTING_FILE.getAbsolutePath());
+    assertEquals(5, all.size());
+  }
+
+  /*
+   * Test empty json config file
+   */
+  @Test
+  public void testEmptyCombinedHostsFileReader() throws Exception {
+    FileWriter hosts = new FileWriter(NEW_FILE);
+    hosts.write("");
+    hosts.close();
+    Set<DatanodeAdminProperties> all =
+        CombinedHostsFileReader.readFile(NEW_FILE.getAbsolutePath());
+    assertEquals(0, all.size());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4c55332/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json
new file mode 100644
index 0000000..64fca48
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json
@@ -0,0 +1,5 @@
+{"hostName": "host1"}
+{"hostName": "host2", "upgradeDomain": "ud0"}
+{"hostName": "host3", "adminState": "DECOMMISSIONED"}
+{"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"}
+{"hostName": "host5", "port": 8090}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message