Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id ABD4A1238 for ; Tue, 26 Apr 2011 19:50:22 +0000 (UTC) Received: (qmail 20622 invoked by uid 500); 26 Apr 2011 19:50:22 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 20578 invoked by uid 500); 26 Apr 2011 19:50:22 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 20568 invoked by uid 99); 26 Apr 2011 19:50:22 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 26 Apr 2011 19:50:22 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 26 Apr 2011 19:50:19 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id 30EE42388994; Tue, 26 Apr 2011 19:49:58 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1096875 - in /hadoop/hdfs/trunk/src: java/org/apache/hadoop/hdfs/DFSClient.java test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java Date: Tue, 26 Apr 2011 19:49:58 -0000 To: hdfs-commits@hadoop.apache.org From: jitendra@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20110426194958.30EE42388994@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: jitendra Date: Tue Apr 26 19:49:57 2011 New Revision: 1096875 URL: http://svn.apache.org/viewvc?rev=1096875&view=rev Log: HDFS-1843. Discover file not found early for file append. Contributed by Bharath Mundlapudi. Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1096875&r1=1096874&r2=1096875&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Tue Apr 26 19:49:57 2011 @@ -755,10 +755,14 @@ public class DFSClient implements FSCons * * @see ClientProtocol#append(String, String) */ - OutputStream append(String src, int buffersize, Progressable progress) + OutputStream append(String src, int buffersize, Progressable progress) throws IOException { checkOpen(); HdfsFileStatus stat = getFileInfo(src); + if (stat == null) { // No file found + throw new FileNotFoundException("failed to append to non-existent file " + + src + " on client " + clientName); + } OutputStream result = callAppend(stat, src, buffersize, progress); leasechecker.put(src, result); return result; Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java?rev=1096875&r1=1096874&r2=1096875&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java Tue Apr 26 19:49:57 2011 @@ -17,12 +17,15 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.*; + import java.io.File; import java.io.IOException; +import java.io.FileNotFoundException; import java.net.InetSocketAddress; import java.util.List; -import junit.framework.TestCase; +import org.junit.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; @@ -41,7 +44,7 @@ import org.apache.hadoop.hdfs.server.dat * This class tests the building blocks that are needed to * support HDFS appends. */ -public class TestFileAppend extends TestCase { +public class TestFileAppend{ boolean simulatedStorage = false; private static byte[] fileContents = null; @@ -101,6 +104,7 @@ public class TestFileAppend extends Test * Test that copy on write for blocks works correctly * @throws IOException an exception might be thrown */ + @Test public void testCopyOnWrite() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { @@ -171,6 +175,7 @@ public class TestFileAppend extends Test * Test a simple flush on a simple HDFS file. * @throws IOException an exception might be thrown */ + @Test public void testSimpleFlush() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { @@ -226,6 +231,7 @@ public class TestFileAppend extends Test * Test that file data can be flushed. * @throws IOException an exception might be thrown */ + @Test public void testComplexFlush() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { @@ -268,4 +274,26 @@ public class TestFileAppend extends Test cluster.shutdown(); } } + + /** + * FileNotFoundException is expected for appending to a non-exisiting file + * + * @throws FileNotFoundException as the result + */ + @Test(expected = FileNotFoundException.class) + public void testFileNotFound() throws IOException { + Configuration conf = new HdfsConfiguration(); + if (simulatedStorage) { + conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); + } + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + FileSystem fs = cluster.getFileSystem(); + try { + Path file1 = new Path("/nonexistingfile.dat"); + fs.append(file1); + } finally { + fs.close(); + cluster.shutdown(); + } + } }