Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id E1839DEA6 for ; Fri, 29 Jun 2012 22:24:53 +0000 (UTC) Received: (qmail 13306 invoked by uid 500); 29 Jun 2012 22:24:53 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 13273 invoked by uid 500); 29 Jun 2012 22:24:53 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 13265 invoked by uid 99); 29 Jun 2012 22:24:53 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 29 Jun 2012 22:24:53 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 29 Jun 2012 22:24:52 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 31C672388ADA; Fri, 29 Jun 2012 22:24:32 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1355584 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/test/java/org/apache/hadoop/hdfs/ src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/test/java/org/apache/hadoop/hdfs/server/namenode/ Date: Fri, 29 Jun 2012 22:24:31 -0000 To: hdfs-commits@hadoop.apache.org From: todd@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120629222432.31C672388ADA@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: todd Date: Fri Jun 29 22:24:29 2012 New Revision: 1355584 URL: http://svn.apache.org/viewvc?rev=1355584&view=rev Log: HDFS-3446. HostsFileReader silently ignores bad includes/excludes. Contributed by Matthew Jacobs. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1355584&r1=1355583&r2=1355584&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Jun 29 22:24:29 2012 @@ -171,6 +171,9 @@ Branch-2 ( Unreleased changes ) INCOMPATIBLE CHANGES + HDFS-3446. HostsFileReader silently ignores bad includes/excludes + (Matthew Jacobs via todd) + NEW FEATURES HDFS-744. Support hsync in HDFS. (Lars Hofhansl via szetszwo) Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java?rev=1355584&r1=1355583&r2=1355584&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java Fri Jun 29 22:24:29 2012 @@ -78,6 +78,7 @@ public class TestDecommission { // Setup conf conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); + conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL); @@ -85,6 +86,7 @@ public class TestDecommission { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, NAMENODE_REPLICATION_INTERVAL); + writeConfigFile(hostsFile, null); writeConfigFile(excludeFile, null); } @@ -508,7 +510,6 @@ public class TestDecommission { public void testHostsFile(int numNameNodes) throws IOException, InterruptedException { - conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath()); int numDatanodes = 1; cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(numNameNodes)) Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java?rev=1355584&r1=1355583&r2=1355584&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java Fri Jun 29 22:24:29 2012 @@ -389,9 +389,12 @@ public class TestBlocksWithNotEnoughRack Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/decommission"); Path excludeFile = new Path(dir, "exclude"); + Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); + DFSTestUtil.writeFile(localFileSys, includeFile, ""); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); + conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); // Two blocks and four racks String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"}; @@ -439,8 +442,11 @@ public class TestBlocksWithNotEnoughRack Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/decommission"); Path excludeFile = new Path(dir, "exclude"); + Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); + DFSTestUtil.writeFile(localFileSys, includeFile, ""); + conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); // All hosts are on two racks, only one host on /rack2 Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1355584&r1=1355583&r2=1355584&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Fri Jun 29 22:24:29 2012 @@ -119,11 +119,15 @@ public class NNThroughputBenchmark { "${hadoop.tmp.dir}/dfs/hosts/exclude"); File excludeFile = new File(config.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, "exclude")); - if(! excludeFile.exists()) { + if(!excludeFile.exists()) { if(!excludeFile.getParentFile().mkdirs()) throw new IOException("NNThroughputBenchmark: cannot mkdir " + excludeFile); } new FileOutputStream(excludeFile).close(); + // set include file + config.set(DFSConfigKeys.DFS_HOSTS, "${hadoop.tmp.dir}/dfs/hosts/include"); + File includeFile = new File(config.get(DFSConfigKeys.DFS_HOSTS, "include")); + new FileOutputStream(includeFile).close(); // Start the NameNode String[] argv = new String[] {}; nameNode = NameNode.createNameNode(argv, config); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java?rev=1355584&r1=1355583&r2=1355584&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java Fri Jun 29 22:24:29 2012 @@ -74,14 +74,16 @@ public class TestDecommissioningStatus { assertTrue(localFileSys.mkdirs(dir)); excludeFile = new Path(dir, "exclude"); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); - conf - .setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); + Path includeFile = new Path(dir, "include"); + conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1); writeConfigFile(localFileSys, excludeFile, null); + writeConfigFile(localFileSys, includeFile, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1355584&r1=1355583&r2=1355584&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Fri Jun 29 22:24:29 2012 @@ -507,7 +507,7 @@ public class TestStartup extends TestCas // Setup conf conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); - writeConfigFile(localFileSys, excludeFile, null); + writeConfigFile(localFileSys, excludeFile, null); conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath()); // write into hosts file ArrayListlist = new ArrayList(); @@ -553,15 +553,15 @@ public class TestStartup extends TestCas localFileSys.delete(name, true); } + FSDataOutputStream stm = localFileSys.create(name); if (nodes != null) { - FSDataOutputStream stm = localFileSys.create(name); for (Iterator it = nodes.iterator(); it.hasNext();) { String node = it.next(); stm.writeBytes(node); stm.writeBytes("\n"); } - stm.close(); } + stm.close(); } private void cleanupFile(FileSystem fileSys, Path name) throws IOException {