hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From junping...@apache.org
Subject hadoop git commit: HDFS-9220. Reading small file (< 512 bytes) that is open for append fails due to incorrect checksum. Contributed by Jing Zhao. (cherry picked from commit c7c36cbd6218f46c33d7fb2f60cd52cb29e6d720) (cherry picked from commit 4cf7f8441a28
Date Wed, 03 Feb 2016 01:00:29 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 fc01ec866 -> d8416aec3


HDFS-9220. Reading small file (< 512 bytes) that is open for append fails due to incorrect
checksum. Contributed by Jing Zhao.
(cherry picked from commit c7c36cbd6218f46c33d7fb2f60cd52cb29e6d720)
(cherry picked from commit 4cf7f8441a288cefd44f126d60dae1998239892a)

Conflicts:

	hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8416aec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8416aec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8416aec

Branch: refs/heads/branch-2.6
Commit: d8416aec3cde83ecea8b8b7b2c83d3a643e9f08c
Parents: fc01ec8
Author: Kihwal Lee <kihwal@apache.org>
Authored: Thu Oct 15 11:26:14 2015 -0500
Committer: Junping Du <junping_du@apache.org>
Committed: Tue Feb 2 17:10:32 2016 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hdfs/server/datanode/BlockReceiver.java     |  5 +--
 .../org/apache/hadoop/hdfs/TestFileAppend2.java | 43 ++++++++++++++++++++
 3 files changed, 48 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8416aec/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 66f76ad..52ab6cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -49,6 +49,9 @@ Release 2.6.4 - UNRELEASED
     HDFS-8722. Optimize datanode writes for small writes and flushes. 
     (Kihwal Lee)
 
+    HDFS-9220. Reading small file (< 512 bytes) that is open for append fails
+    due to incorrect checksum (Jing Zhao via kihwal)
+
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8416aec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 43f766e..e702774 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -742,11 +742,10 @@ class BlockReceiver implements Closeable {
             final int offset = checksumBuf.arrayOffset() +
                 checksumBuf.position() + skip;
             final int end = offset + checksumLen - skip;
-            // If offset > end, there is no more checksum to write.
+            // If offset >= end, there is no more checksum to write.
             // I.e. a partial chunk checksum rewrite happened and there is no
             // more to write after that.
-            if (offset > end) {
-              assert crcBytes != null;
+            if (offset >= end && doCrcRecalc) {
               lastCrc = crcBytes;
             } else {
               final int remainingBytes = checksumLen - skip;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8416aec/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
index eecd23b..672fb63 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
@@ -28,6 +28,7 @@ import java.util.Arrays;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -390,4 +391,46 @@ public class TestFileAppend2 {
     //
     assertTrue("testComplexAppend Worker encountered exceptions.", globalStatus);
   }
+
+  /**
+   * Make sure when the block length after appending is less than 512 bytes, the
+   * checksum re-calculation and overwrite are performed correctly.
+   */
+  @Test
+  public void testAppendLessThanChecksumChunk() throws Exception {
+    final byte[] buf = new byte[1024];
+    final MiniDFSCluster cluster = new MiniDFSCluster
+        .Builder(new HdfsConfiguration()).numDataNodes(1).build();
+    cluster.waitActive();
+    DistributedFileSystem fs = cluster.getFileSystem();
+    FSDataOutputStream out = null;
+    FSDataInputStream in = null;
+    try {
+      final int len1 = 200;
+      final int len2 = 300;
+      final Path p = new Path("/foo");
+
+      out = fs.create(p);
+      out.write(buf, 0, len1);
+      out.close();
+
+      out = fs.append(p);
+      out.write(buf, 0, len2);
+      // flush but leave open
+      out.hflush();
+
+      // read data to verify the replica's content and checksum are correct
+      in = fs.open(p);
+      final int length = in.read(0, buf, 0, len1 + len2);
+      assertTrue(length > 0);
+    } finally {
+      if (in != null) {
+        in.close();
+      }
+      if (out != null) {
+        out.close();
+      }
+      cluster.shutdown();
+    }
+  }
 }


Mime
View raw message