hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1131124 - in /hadoop/hdfs/trunk: CHANGES.txt src/java/org/apache/hadoop/hdfs/DFSInputStream.java src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java
Date Fri, 03 Jun 2011 18:04:42 GMT
Author: szetszwo
Date: Fri Jun  3 18:04:42 2011
New Revision: 1131124

URL: http://svn.apache.org/viewvc?rev=1131124&view=rev
Log:
HDFS-1907. Fix position read for reading still-being-written file in DFSInputStream.  Contributed
by John George

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1131124&r1=1131123&r2=1131124&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Jun  3 18:04:42 2011
@@ -678,6 +678,9 @@ Trunk (unreleased changes)
     HDFS-1995. Federation: Minor bug fixes and modification cluster web UI.
     (Tanping Wang via suresh)
 
+    HDFS-1907. Fix position read for reading still-being-written file in
+    DFSInputStream.  (John George via szetszwo)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1131124&r1=1131123&r2=1131124&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java Fri Jun  3 18:04:42
2011
@@ -306,11 +306,22 @@ public class DFSInputStream extends FSIn
       blocks = getFinalizedBlockRange(offset, length);
     }
     else {
-      if (length + offset > locatedBlocks.getFileLength()) {
+      final boolean readPastEnd = offset + length > locatedBlocks.getFileLength();
+      /* if requested length is greater than current file length
+       * then, it could possibly be from the current block being
+       * written to. First get the finalized block range and then
+       * if necessary, get the length of last block being written
+       * to.
+       */
+      if (readPastEnd)
         length = locatedBlocks.getFileLength() - offset;
-      }
+
       blocks = getFinalizedBlockRange(offset, length);
-      blocks.add(locatedBlocks.getLastLocatedBlock());
+      /* requested length is greater than what finalized blocks 
+       * have.
+       */
+      if (readPastEnd)
+        blocks.add(locatedBlocks.getLastLocatedBlock());
     }
     return blocks;
   }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java?rev=1131124&r1=1131123&r2=1131124&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java Fri Jun  3 18:04:42
2011
@@ -106,12 +106,23 @@ public class TestWriteRead {
     positionReadOption = false;
     String fname = filenameOption;
     
+    positionReadOption = false;   // sequential read
     // need to run long enough to fail: takes 25 to 35 seec on Mac
     int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE);
     LOG.info("Summary status from test1: status= " + stat);
-    Assert.assertTrue(stat == 0);
+    Assert.assertEquals(0, stat);
   }
-      
+
+  /** Junit Test position read while writing. */
+  @Test
+  public void TestWriteReadPos() throws IOException {
+    String fname = filenameOption;
+    positionReadOption = true;   // position read
+    int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE);
+    Assert.assertEquals(0, stat);
+  }
+
+   
   // equivalent of TestWriteRead1
   private int clusterTestWriteRead1() throws IOException {
     int stat = testWriteAndRead(filenameOption, loopOption, chunkSizeOption);



Mime
View raw message