hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject git commit: HDFS-7263. Snapshot read can reveal future bytes for appended files. Contributed by Tao Luo.
Date Thu, 30 Oct 2014 03:22:28 GMT
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2a6be65c8 -> 0126cf16b


HDFS-7263. Snapshot read can reveal future bytes for appended files. Contributed by Tao Luo.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0126cf16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0126cf16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0126cf16

Branch: refs/heads/trunk
Commit: 0126cf16b73843da2e504b6a03fee8bd93a404d5
Parents: 2a6be65
Author: Tao Luo <tao.luo@wandisco.com>
Authored: Wed Oct 29 20:15:01 2014 -0700
Committer: Konstantin V Shvachko <shv@apache.org>
Committed: Wed Oct 29 20:21:12 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  3 +-
 .../snapshot/TestSnapshotFileLength.java        | 42 +++++++++++++++-----
 3 files changed, 36 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0126cf16/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 11f757a..90c4b3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -367,6 +367,8 @@ Release 2.7.0 - UNRELEASED
     HDFS-7235. DataNode#transferBlock should report blocks that don't exist
     using reportBadBlock (yzhang via cmccabe)
 
+    HDFS-7263. Snapshot read can reveal future bytes for appended files.
+    (Tao Luo via shv)
 
 Release 2.6.0 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0126cf16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index e83f067..008cc54 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -804,7 +804,8 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
           }
           int realLen = (int) Math.min(len, (blockEnd - pos + 1L));
           if (locatedBlocks.isLastBlockComplete()) {
-            realLen = (int) Math.min(realLen, locatedBlocks.getFileLength());
+            realLen = (int) Math.min(realLen,
+                locatedBlocks.getFileLength() - pos);
           }
           int result = readBuffer(strategy, off, realLen, corruptedBlockMap);
           

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0126cf16/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
index 32534f0..98aafc1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
@@ -21,7 +21,10 @@ import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
 
 
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.hdfs.AppendTestUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -55,6 +58,8 @@ public class TestSnapshotFileLength {
 
   @Before
   public void setUp() throws Exception {
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCKSIZE);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCKSIZE);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
                                               .build();
     cluster.waitActive();
@@ -81,40 +86,57 @@ public class TestSnapshotFileLength {
 
     int bytesRead;
     byte[] buffer = new byte[BLOCKSIZE * 8];
+    int origLen = BLOCKSIZE + 1;
+    int toAppend = BLOCKSIZE;
     FSDataInputStream fis = null;
     FileStatus fileStatus = null;
 
     // Create and write a file.
     Path file1 = new Path(sub, file1Name);
-    DFSTestUtil.createFile(hdfs, file1, 0, REPLICATION, SEED);
-    DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE);
+    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, 0, BLOCKSIZE, REPLICATION, SEED);
+    DFSTestUtil.appendFile(hdfs, file1, origLen);
 
     // Create a snapshot on the parent directory.
     hdfs.allowSnapshot(sub);
     hdfs.createSnapshot(sub, snapshot1);
 
-    // Write more data to the file.
-    DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE);
+    Path file1snap1
+        = SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
+
+    // Append to the file.
+    FSDataOutputStream out = hdfs.append(file1);
+    try {
+      AppendTestUtil.write(out, 0, toAppend);
+      // Test reading from snapshot of file that is open for append
+      byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
+      assertThat("Wrong data size in snapshot.",
+          dataFromSnapshot.length, is(origLen));
+    } finally {
+      out.close();
+    }
 
     // Make sure we can read the entire file via its non-snapshot path.
     fileStatus = hdfs.getFileStatus(file1);
-    assertThat(fileStatus.getLen(), is((long) BLOCKSIZE * 2));
+    assertThat(fileStatus.getLen(), is((long) origLen + toAppend));
     fis = hdfs.open(file1);
     bytesRead = fis.read(0, buffer, 0, buffer.length);
-    assertThat(bytesRead, is(BLOCKSIZE * 2));
+    assertThat(bytesRead, is(origLen + toAppend));
     fis.close();
 
     // Try to open the file via its snapshot path.
-    Path file1snap1 =
-        SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
     fis = hdfs.open(file1snap1);
     fileStatus = hdfs.getFileStatus(file1snap1);
-    assertThat(fileStatus.getLen(), is((long) BLOCKSIZE));
+    assertThat(fileStatus.getLen(), is((long) origLen));
 
     // Make sure we can only read up to the snapshot length.
     bytesRead = fis.read(0, buffer, 0, buffer.length);
-    assertThat(bytesRead, is(BLOCKSIZE));
+    assertThat(bytesRead, is(origLen));
     fis.close();
+
+    byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs,
+        file1snap1);
+    assertThat("Wrong data size in snapshot.",
+        dataFromSnapshot.length, is(origLen));
   }
 
   /**


Mime
View raw message