hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject hadoop git commit: HDFS-9705. Refine the behaviour of getFileChecksum when length = 0. Contributed by SammiChen and Kai Zheng.
Date Wed, 29 Mar 2017 04:41:16 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 554bc8342 -> 82b4a9c3d


HDFS-9705. Refine the behaviour of getFileChecksum when length = 0. Contributed by SammiChen
and Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82b4a9c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82b4a9c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82b4a9c3

Branch: refs/heads/branch-2
Commit: 82b4a9c3d058bcf54051d3075ca14f30988f5cb4
Parents: 554bc83
Author: Andrew Wang <wang@apache.org>
Authored: Tue Mar 28 21:40:53 2017 -0700
Committer: Andrew Wang <wang@apache.org>
Committed: Tue Mar 28 21:40:53 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  6 +--
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 40 +++++++++++++++-----
 .../apache/hadoop/hdfs/TestFSOutputSummer.java  |  8 +++-
 3 files changed, 40 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82b4a9c3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index af311f3..9e8cba7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1950,12 +1950,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
       // If there is no block allocated for the file,
       // return one with the magic entry that matches what previous
       // hdfs versions return.
-      if (locatedblocks.size() == 0) {
+      if (locatedblocks.size() == 0 || length == 0) {
         return new MD5MD5CRC32GzipFileChecksum(0, 0, fileMD5);
       }
-
-      // we should never get here since the validity was checked
-      // when getCrcType() was called above.
+      // We will get here if above condition is not met.
       return null;
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82b4a9c3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 9932f9c..98bffd6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -921,13 +921,6 @@ public class TestDistributedFileSystem {
         out.close();
       }
 
-      // verify the magic val for zero byte files
-      {
-        final FileChecksum zeroChecksum = hdfs.getFileChecksum(zeroByteFile);
-        assertEquals(zeroChecksum.toString(),
-            "MD5-of-0MD5-of-0CRC32:70bc8f4b72a86921468bf8e8441dce51");
-      }
-
       //write another file
       final Path bar = new Path(dir, "bar" + n);
       {
@@ -936,8 +929,37 @@ public class TestDistributedFileSystem {
         out.write(data);
         out.close();
       }
-  
-      { //verify checksum
+
+      {
+        // verify the magic val for zero byte file
+        final FileChecksum zeroChecksum = hdfs.getFileChecksum(zeroByteFile);
+        final String magicValue =
+            "MD5-of-0MD5-of-0CRC32:70bc8f4b72a86921468bf8e8441dce51";
+        // verify the magic val for zero byte files
+        assertEquals(magicValue, zeroChecksum.toString());
+
+        // verify checksum for empty file and 0 request length
+        final FileChecksum checksumWith0 = hdfs.getFileChecksum(bar, 0);
+        assertEquals(zeroChecksum, checksumWith0);
+
+        // verify none existent file
+        try {
+          hdfs.getFileChecksum(new Path(dir, "none-existent"), 8);
+          fail();
+        } catch (Exception ioe) {
+          FileSystem.LOG.info("GOOD: getting an exception", ioe);
+        }
+
+        // verify none existent file and 0 request length
+        try {
+          final FileChecksum noneExistentChecksumWith0 =
+              hdfs.getFileChecksum(new Path(dir, "none-existent"), 0);
+          fail();
+        } catch (Exception ioe) {
+          FileSystem.LOG.info("GOOD: getting an exception", ioe);
+        }
+
+        // verify checksums
         final FileChecksum barcs = hdfs.getFileChecksum(bar);
         final int barhashcode = barcs.hashCode();
         assertEquals(hdfsfoocs.hashCode(), barhashcode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82b4a9c3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
index 9dcd449..ebbf2ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -27,6 +28,7 @@ import java.util.Random;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.junit.Test;
@@ -104,7 +106,11 @@ public class TestFSOutputSummer {
     checkAndEraseData(actual, 0, expected, "Read Sanity Test");
     stm.close();
     // do a sanity check. Get the file checksum
-    fileSys.getFileChecksum(name);
+    FileChecksum fileChecksum = fileSys.getFileChecksum(name);
+    if (fileSys.getConf().get(
+        DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY).toString().equals("NULL")) {
+      assertNull(fileChecksum);
+    }
   }
 
   private void cleanupFile(Path name) throws IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message