Return-Path: Delivered-To: apmail-hadoop-core-commits-archive@www.apache.org Received: (qmail 78592 invoked from network); 20 Oct 2008 19:07:01 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.2) by minotaur.apache.org with SMTP; 20 Oct 2008 19:07:01 -0000 Received: (qmail 91581 invoked by uid 500); 20 Oct 2008 19:07:03 -0000 Delivered-To: apmail-hadoop-core-commits-archive@hadoop.apache.org Received: (qmail 91553 invoked by uid 500); 20 Oct 2008 19:07:03 -0000 Mailing-List: contact core-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: core-dev@hadoop.apache.org Delivered-To: mailing list core-commits@hadoop.apache.org Received: (qmail 91543 invoked by uid 99); 20 Oct 2008 19:07:03 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 20 Oct 2008 12:07:03 -0700 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 20 Oct 2008 19:06:02 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id A65D22388961; Mon, 20 Oct 2008 12:06:40 -0700 (PDT) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r706371 - in /hadoop/core/trunk: CHANGES.txt src/test/org/apache/hadoop/hdfs/TestFileCreation.java src/test/org/apache/hadoop/hdfs/TestFileCreationClient.java Date: Mon, 20 Oct 2008 19:06:40 -0000 To: core-commits@hadoop.apache.org From: cdouglas@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20081020190640.A65D22388961@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: cdouglas Date: Mon Oct 20 12:06:40 2008 New Revision: 706371 URL: http://svn.apache.org/viewvc?rev=706371&view=rev Log: HADOOP-4464. Separate out TestFileCreationClient from TestFileCreation. Contributed by Tsz Wo (Nicholas), SZE. Added: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreationClient.java Modified: hadoop/core/trunk/CHANGES.txt hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java Modified: hadoop/core/trunk/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=706371&r1=706370&r2=706371&view=diff ============================================================================== --- hadoop/core/trunk/CHANGES.txt (original) +++ hadoop/core/trunk/CHANGES.txt Mon Oct 20 12:06:40 2008 @@ -983,6 +983,9 @@ HADOOP-4457. Fixes an input split logging problem introduced by HADOOP-3245. (Amareshwari Sriramadasu via ddas) + HADOOP-4464. Separate out TestFileCreationClient from TestFileCreation. + (Tsz Wo (Nicholas), SZE via cdouglas) + Release 0.18.2 - Unreleased BUG FIXES Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java?rev=706371&r1=706370&r2=706371&view=diff ============================================================================== --- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java (original) +++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java Mon Oct 20 12:06:40 2008 @@ -703,103 +703,4 @@ System.out.println("testLeaseExpireHardLimit successful"); } - - /** Test lease recovery Triggered by DFSClient. */ - public void testClientTriggeredLeaseRecovery() throws Exception { - final int REPLICATION = 3; - Configuration conf = new Configuration(); - conf.setInt("dfs.datanode.handler.count", 1); - conf.setInt("dfs.replication", REPLICATION); - MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION, true, null); - - try { - final FileSystem fs = cluster.getFileSystem(); - final Path dir = new Path("/wrwelkj"); - - SlowWriter[] slowwriters = new SlowWriter[10]; - for(int i = 0; i < slowwriters.length; i++) { - slowwriters[i] = new SlowWriter(fs, new Path(dir, "file" + i)); - } - - try { - for(int i = 0; i < slowwriters.length; i++) { - slowwriters[i].start(); - } - - Thread.sleep(1000); // let writers get started - - //stop a datanode, it should have least recover. - cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION)); - - //let the slow writer writes a few more seconds - System.out.println("Wait a few seconds"); - Thread.sleep(5000); - } - finally { - for(int i = 0; i < slowwriters.length; i++) { - if (slowwriters[i] != null) { - slowwriters[i].running = false; - slowwriters[i].interrupt(); - } - } - for(int i = 0; i < slowwriters.length; i++) { - if (slowwriters[i] != null) { - slowwriters[i].join(); - } - } - } - - //Verify the file - System.out.println("Verify the file"); - for(int i = 0; i < slowwriters.length; i++) { - System.out.println(slowwriters[i].filepath + ": length=" - + fs.getFileStatus(slowwriters[i].filepath).getLen()); - FSDataInputStream in = null; - try { - in = fs.open(slowwriters[i].filepath); - for(int j = 0, x; (x = in.read()) != -1; j++) { - assertEquals(j, x); - } - } - finally { - IOUtils.closeStream(in); - } - } - } finally { - if (cluster != null) {cluster.shutdown();} - } - } - - static class SlowWriter extends Thread { - final FileSystem fs; - final Path filepath; - boolean running = true; - - SlowWriter(FileSystem fs, Path filepath) { - super(SlowWriter.class.getSimpleName() + ":" + filepath); - this.fs = fs; - this.filepath = filepath; - } - - public void run() { - FSDataOutputStream out = null; - int i = 0; - try { - out = fs.create(filepath); - for(; running; i++) { - System.out.println(getName() + " writes " + i); - out.write(i); - out.sync(); - sleep(100); - } - } - catch(Exception e) { - System.out.println(getName() + " dies: e=" + e); - } - finally { - System.out.println(getName() + ": i=" + i); - IOUtils.closeStream(out); - } - } - } } Added: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreationClient.java URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreationClient.java?rev=706371&view=auto ============================================================================== --- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreationClient.java (added) +++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreationClient.java Mon Oct 20 12:06:40 2008 @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.LeaseManager; +import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; +import org.apache.hadoop.io.IOUtils; +import org.apache.log4j.Level; + +/** + * This class tests that a file need not be closed before its + * data can be read by another client. + */ +public class TestFileCreationClient extends junit.framework.TestCase { + static final String DIR = "/" + TestFileCreationClient.class.getSimpleName() + "/"; + + { + ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL); + } + + /** Test lease recovery Triggered by DFSClient. */ + public void testClientTriggeredLeaseRecovery() throws Exception { + final int REPLICATION = 3; + Configuration conf = new Configuration(); + conf.setInt("dfs.datanode.handler.count", 1); + conf.setInt("dfs.replication", REPLICATION); + MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION, true, null); + + try { + final FileSystem fs = cluster.getFileSystem(); + final Path dir = new Path("/wrwelkj"); + + SlowWriter[] slowwriters = new SlowWriter[10]; + for(int i = 0; i < slowwriters.length; i++) { + slowwriters[i] = new SlowWriter(fs, new Path(dir, "file" + i)); + } + + try { + for(int i = 0; i < slowwriters.length; i++) { + slowwriters[i].start(); + } + + Thread.sleep(1000); // let writers get started + + //stop a datanode, it should have least recover. + cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION)); + + //let the slow writer writes a few more seconds + System.out.println("Wait a few seconds"); + Thread.sleep(5000); + } + finally { + for(int i = 0; i < slowwriters.length; i++) { + if (slowwriters[i] != null) { + slowwriters[i].running = false; + slowwriters[i].interrupt(); + } + } + for(int i = 0; i < slowwriters.length; i++) { + if (slowwriters[i] != null) { + slowwriters[i].join(); + } + } + } + + //Verify the file + System.out.println("Verify the file"); + for(int i = 0; i < slowwriters.length; i++) { + System.out.println(slowwriters[i].filepath + ": length=" + + fs.getFileStatus(slowwriters[i].filepath).getLen()); + FSDataInputStream in = null; + try { + in = fs.open(slowwriters[i].filepath); + for(int j = 0, x; (x = in.read()) != -1; j++) { + assertEquals(j, x); + } + } + finally { + IOUtils.closeStream(in); + } + } + } finally { + if (cluster != null) {cluster.shutdown();} + } + } + + static class SlowWriter extends Thread { + final FileSystem fs; + final Path filepath; + boolean running = true; + + SlowWriter(FileSystem fs, Path filepath) { + super(SlowWriter.class.getSimpleName() + ":" + filepath); + this.fs = fs; + this.filepath = filepath; + } + + public void run() { + FSDataOutputStream out = null; + int i = 0; + try { + out = fs.create(filepath); + for(; running; i++) { + System.out.println(getName() + " writes " + i); + out.write(i); + out.sync(); + sleep(100); + } + } + catch(Exception e) { + System.out.println(getName() + " dies: e=" + e); + } + finally { + System.out.println(getName() + ": i=" + i); + IOUtils.closeStream(out); + } + } + } +}