Return-Path: Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: (qmail 54870 invoked from network); 9 Dec 2009 18:51:25 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.3) by minotaur.apache.org with SMTP; 9 Dec 2009 18:51:25 -0000 Received: (qmail 88702 invoked by uid 500); 9 Dec 2009 18:51:25 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 88651 invoked by uid 500); 9 Dec 2009 18:51:25 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 88641 invoked by uid 99); 9 Dec 2009 18:51:25 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 09 Dec 2009 18:51:25 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 09 Dec 2009 18:51:15 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id 47A112388962; Wed, 9 Dec 2009 18:50:54 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r888919 - in /hadoop/hdfs/branches/branch-0.21: CHANGES.txt src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java Date: Wed, 09 Dec 2009 18:50:54 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20091209185054.47A112388962@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Wed Dec 9 18:50:53 2009 New Revision: 888919 URL: http://svn.apache.org/viewvc?rev=888919&view=rev Log: HDFS-813. Enable the append test in TestReadWhileWriting. Modified: hadoop/hdfs/branches/branch-0.21/CHANGES.txt hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java Modified: hadoop/hdfs/branches/branch-0.21/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/CHANGES.txt?rev=888919&r1=888918&r2=888919&view=diff ============================================================================== --- hadoop/hdfs/branches/branch-0.21/CHANGES.txt (original) +++ hadoop/hdfs/branches/branch-0.21/CHANGES.txt Wed Dec 9 18:50:53 2009 @@ -313,6 +313,8 @@ HDFS-804. New unit tests for concurrent lease recovery. (cos) + HDFS-813. Enable the append test in TestReadWhileWriting. (szetszwo) + BUG FIXES HDFS-76. Better error message to users when commands fail because of Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java?rev=888919&r1=888918&r2=888919&view=diff ============================================================================== --- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java (original) +++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java Wed Dec 9 18:50:53 2009 @@ -17,23 +17,24 @@ */ package org.apache.hadoop.hdfs; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UnixUserGroupInformation; import org.apache.hadoop.security.UserGroupInformation; import org.apache.log4j.Level; import org.junit.Assert; import org.junit.Test; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - /** Test reading from hdfs while a file is being written. */ public class TestReadWhileWriting { { @@ -44,6 +45,7 @@ private static final String DIR = "/" + TestReadWhileWriting.class.getSimpleName() + "/"; private static final int BLOCK_SIZE = 8192; + private static final long LEASE_LIMIT = 500; /** Test reading while writing. */ @Test @@ -51,13 +53,13 @@ final Configuration conf = new HdfsConfiguration(); //enable append conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); + conf.setLong("dfs.heartbeat.interval", 1); // create cluster final MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null); try { - //change the lease soft limit to 1 second. - final long leaseSoftLimit = 1000; - cluster.setLeasePeriod(leaseSoftLimit, FSConstants.LEASE_HARDLIMIT_PERIOD); + //change the lease limits. + cluster.setLeasePeriod(LEASE_LIMIT, LEASE_LIMIT); //wait for the cluster cluster.waitActive(); @@ -82,25 +84,41 @@ // of data can be read successfully. checkFile(p, half, conf); - /* TODO: enable the following when append is done. //c. On M1, append another half block of data. Close file on M1. { - //sleep to make sure the lease is expired the soft limit. - Thread.sleep(2*leaseSoftLimit); - - FSDataOutputStream out = fs.append(p); + //sleep to let the lease is expired. + Thread.sleep(2*LEASE_LIMIT); + + final DistributedFileSystem dfs = (DistributedFileSystem)FileSystem.newInstance(conf); + final FSDataOutputStream out = append(dfs, p); write(out, 0, half); out.close(); } //d. On M2, open file and read 1 block of data from it. Close file. checkFile(p, 2*half, conf); - */ } finally { cluster.shutdown(); } } + /** Try openning a file for append. */ + private static FSDataOutputStream append(FileSystem fs, Path p) throws Exception { + for(int i = 0; i < 10; i++) { + try { + return fs.append(p); + } catch(RemoteException re) { + if (re.getClassName().equals(RecoveryInProgressException.class.getName())) { + AppendTestUtil.LOG.info("Will sleep and retry, i=" + i +", p="+p, re); + Thread.sleep(1000); + } + else + throw re; + } + } + throw new IOException("Cannot append to " + p); + } + static private int userCount = 0; //check the file static void checkFile(Path p, int expectedsize, Configuration conf