hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From zhang...@apache.org
Subject [31/50] [abbrv] hadoop git commit: HDFS-9220. Reading small file (< 512 bytes) that is open for append fails due to incorrect checksum. Contributed by Jing Zhao.
Date Sat, 17 Oct 2015 03:43:32 GMT
HDFS-9220. Reading small file (< 512 bytes) that is open for append fails due to incorrect
checksum. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7c36cbd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7c36cbd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7c36cbd

Branch: refs/heads/HDFS-7966
Commit: c7c36cbd6218f46c33d7fb2f60cd52cb29e6d720
Parents: dc45a7a
Author: Kihwal Lee <kihwal@apache.org>
Authored: Thu Oct 15 11:24:14 2015 -0500
Committer: Kihwal Lee <kihwal@apache.org>
Committed: Thu Oct 15 11:24:14 2015 -0500

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hdfs/server/datanode/BlockReceiver.java     |  5 ++-
 .../org/apache/hadoop/hdfs/TestFileAppend2.java | 37 ++++++++++++++++++++
 3 files changed, 42 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7c36cbd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 674b090..c7a2423 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2129,6 +2129,9 @@ Release 2.7.2 - UNRELEASED
     HDFS-8676. Delayed rolling upgrade finalization can cause heartbeat
     expiration. (Walter Su via kihwal)
 
+    HDFS-9220. Reading small file (< 512 bytes) that is open for append fails
+    due to incorrect checksum (Jing Zhao via kihwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7c36cbd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 4c40e83..99cdbea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -753,11 +753,10 @@ class BlockReceiver implements Closeable {
             final int offset = checksumBuf.arrayOffset() +
                 checksumBuf.position() + skip;
             final int end = offset + checksumLen - skip;
-            // If offset > end, there is no more checksum to write.
+            // If offset >= end, there is no more checksum to write.
             // I.e. a partial chunk checksum rewrite happened and there is no
             // more to write after that.
-            if (offset > end) {
-              assert crcBytes != null;
+            if (offset >= end && doCrcRecalc) {
               lastCrc = crcBytes;
             } else {
               final int remainingBytes = checksumLen - skip;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7c36cbd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
index 8a95027..3c72db3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
@@ -30,6 +30,7 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -549,4 +550,40 @@ public class TestFileAppend2 {
   public void testComplexAppend2() throws IOException {
     testComplexAppend(true);
   }
+
+  /**
+   * Make sure when the block length after appending is less than 512 bytes, the
+   * checksum re-calculation and overwrite are performed correctly.
+   */
+  @Test
+  public void testAppendLessThanChecksumChunk() throws Exception {
+    final byte[] buf = new byte[1024];
+    final MiniDFSCluster cluster = new MiniDFSCluster
+        .Builder(new HdfsConfiguration()).numDataNodes(1).build();
+    cluster.waitActive();
+
+    try (DistributedFileSystem fs = cluster.getFileSystem()) {
+      final int len1 = 200;
+      final int len2 = 300;
+      final Path p = new Path("/foo");
+
+      FSDataOutputStream out = fs.create(p);
+      out.write(buf, 0, len1);
+      out.close();
+
+      out = fs.append(p);
+      out.write(buf, 0, len2);
+      // flush but leave open
+      out.hflush();
+
+      // read data to verify the replica's content and checksum are correct
+      FSDataInputStream in = fs.open(p);
+      final int length = in.read(0, buf, 0, len1 + len2);
+      assertTrue(length > 0);
+      in.close();
+      out.close();
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }


Mime
View raw message