Return-Path: Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: (qmail 26321 invoked from network); 16 Nov 2009 22:27:06 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.3) by minotaur.apache.org with SMTP; 16 Nov 2009 22:27:06 -0000 Received: (qmail 64119 invoked by uid 500); 16 Nov 2009 22:27:06 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 64095 invoked by uid 500); 16 Nov 2009 22:27:06 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 64085 invoked by uid 99); 16 Nov 2009 22:27:06 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 16 Nov 2009 22:27:06 +0000 X-ASF-Spam-Status: No, hits=-2.6 required=5.0 tests=AWL,BAYES_00 X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 16 Nov 2009 22:27:04 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id ABFDB2388A19; Mon, 16 Nov 2009 22:26:43 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r881014 - in /hadoop/hdfs/trunk: CHANGES.txt src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java Date: Mon, 16 Nov 2009 22:26:43 -0000 To: hdfs-commits@hadoop.apache.org From: cos@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20091116222643.ABFDB2388A19@eris.apache.org> Author: cos Date: Mon Nov 16 22:26:43 2009 New Revision: 881014 URL: http://svn.apache.org/viewvc?rev=881014&view=rev Log: HDFS-741. TestHFlush test doesn't seek() past previously written part of the file. Contributed by Konstantin Boudnik Modified: hadoop/hdfs/trunk/CHANGES.txt hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java Modified: hadoop/hdfs/trunk/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=881014&r1=881013&r2=881014&view=diff ============================================================================== --- hadoop/hdfs/trunk/CHANGES.txt (original) +++ hadoop/hdfs/trunk/CHANGES.txt Mon Nov 16 22:26:43 2009 @@ -58,6 +58,9 @@ HDFS-774. Intermittent race condition in TestFiPipelines (cos) + HDFS-741. TestHFlush test doesn't seek() past previously written part of + the file (cos, szetszwo) + Release 0.21.0 - Unreleased INCOMPATIBLE CHANGES Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java?rev=881014&r1=881013&r2=881014&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java Mon Nov 16 22:26:43 2009 @@ -18,18 +18,17 @@ package org.apache.hadoop.hdfs; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataInputStream; import static org.junit.Assert.assertEquals; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import static org.junit.Assert.assertTrue; import org.junit.Test; import java.io.IOException; /** Class contains a set of tests to verify the correctness of - * newly introduced {@link DFSClient#hflush()} method */ + * newly introduced {@link FSDataOutputStream#hflush()} method */ public class TestHFlush { private final String fName = "hflushtest.dat"; @@ -115,9 +114,12 @@ System.arraycopy(fileContent, tenth * i, expected, 0, tenth); // Open the same file for read. Need to create new reader after every write operation(!) is = fileSystem.open(path); - is.read(toRead, tenth * i, tenth); + is.seek(tenth * i); + int readBytes = is.read(toRead, 0, tenth); + System.out.println("Has read " + readBytes); + assertTrue("Should've get more bytes", (readBytes > 0) && (readBytes <= tenth)); is.close(); - checkData(toRead, 0, expected, "Partial verification"); + checkData(toRead, 0, readBytes, expected, "Partial verification"); } System.out.println("Writing " + (tenth * SECTIONS) + " to " + (tenth * SECTIONS + rounding) + " section to file " + fileName); stm.write(fileContent, tenth * SECTIONS, rounding); @@ -125,10 +127,6 @@ assertEquals("File size doesn't match ", AppendTestUtil.FILE_SIZE, fileSystem.getFileStatus(path).getLen()); AppendTestUtil.checkFullFile(fileSystem, path, fileContent.length, fileContent, "hflush()"); - - } catch (IOException ioe) { - ioe.printStackTrace(); - throw ioe; } catch (Exception e) { e.printStackTrace(); } finally { @@ -136,9 +134,9 @@ cluster.shutdown(); } } - static void checkData(final byte[] actual, int from, - final byte[] expected, String message) { - for (int idx = 0; idx < actual.length; idx++) { + static void checkData(final byte[] actual, int from, int len, + final byte[] expected, String message) { + for (int idx = 0; idx < len; idx++) { assertEquals(message+" byte "+(from+idx)+" differs. expected "+ expected[from+idx]+" actual "+actual[idx], expected[from+idx], actual[idx]);