hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From x...@apache.org
Subject hadoop git commit: HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao.
Date Wed, 07 Mar 2018 03:51:17 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 dbd69cb2c -> 4f3c4e451


HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao.

(cherry picked from commit 8faf0b50d435039f69ea35f592856ca04d378809)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f3c4e45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f3c4e45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f3c4e45

Branch: refs/heads/branch-2
Commit: 4f3c4e4514d3ab7f2788bd87c690a77e9d543aa5
Parents: dbd69cb
Author: Xiaoyu Yao <xyao@apache.org>
Authored: Thu Feb 8 08:59:48 2018 -0800
Committer: Xiaoyu Yao <xyao@apache.org>
Committed: Tue Mar 6 18:49:13 2018 -0800

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSDirConcatOp.java     |   4 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 126 +++++++++++++++++++
 2 files changed, 129 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f3c4e45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index c51b178..14df725 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -246,7 +246,9 @@ class FSDirConcatOp {
     for (INodeFile nodeToRemove : srcList) {
       if(nodeToRemove != null) {
         nodeToRemove.clearBlocks();
-        nodeToRemove.getParent().removeChild(nodeToRemove);
+        // Ensure the nodeToRemove is cleared from snapshot diff list
+        nodeToRemove.getParent().removeChild(nodeToRemove,
+            targetIIP.getLatestSnapshotId());
         fsd.getINodeMap().remove(nodeToRemove);
         count++;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f3c4e45/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index ca53788..8bd7967 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -26,18 +26,22 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.security.PrivilegedAction;
 
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -61,11 +65,15 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests snapshot deletion.
  */
 public class TestSnapshotDeletion {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestSnapshotDeletion.class);
   protected static final long seed = 0;
   protected static final short REPLICATION = 3;
   protected static final short REPLICATION_1 = 2;
@@ -1232,4 +1240,122 @@ public class TestSnapshotDeletion {
     // make sure bar has been cleaned from inodeMap
     Assert.assertNull(fsdir.getInode(fileId));
   }
+
+  @Test
+  public void testSnapshotWithConcatException() throws Exception {
+    final Path st = new Path("/st");
+    hdfs.mkdirs(st);
+    hdfs.allowSnapshot(st);
+
+    Path[] files = new Path[3];
+    for (int i = 0; i < 3; i++) {
+      files[i] = new Path(st, i+ ".txt");
+    }
+
+    Path dest = new Path(st, "dest.txt");
+    hdfs.createNewFile(dest);
+    hdfs.createSnapshot(st, "ss");
+
+    for (int j = 0; j < 3; j++) {
+      FileSystem fs = cluster.getFileSystem();
+      DFSTestUtil.createFile(fs, files[j], false, 1024,
+          1024, 512, (short) 1, RandomUtils.nextLong(1, 512), true);
+    }
+
+    hdfs.createSnapshot(st, "s0");
+
+    // Verify the SnapshotException is thrown as expected for HDFS-4529
+    exception.expect(RemoteException.class);
+    String error = "Concat: the source file /st/0.txt is in snapshot";
+    exception.expectMessage(error);
+    hdfs.concat(dest, files);
+
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    hdfs.saveNamespace();
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+    cluster.restartNameNodes();
+  }
+
+  @Test
+  public void testSnapshotDeleteWithConcat() throws Exception {
+    final Path st = new Path("/st");
+    hdfs.mkdirs(st);
+    hdfs.allowSnapshot(st);
+
+    Path[] files = new Path[3];
+    for (int i = 0; i < 3; i++) {
+      files[i] = new Path(st, i+ ".txt");
+    }
+
+    Path dest = new Path(st, "dest.txt");
+    hdfs.createNewFile(dest);
+    hdfs.createSnapshot(st, "ss");
+
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < 3; j++) {
+        FileSystem fs = cluster.getFileSystem();
+        DFSTestUtil.createFile(fs, files[j], false, 1024,
+            1024, 512, (short) 1, RandomUtils.nextLong(1, 512), true);
+      }
+
+      hdfs.concat(dest, files);
+
+      hdfs.createSnapshot(st, "s" + i);
+    }
+
+
+    hdfs.deleteSnapshot(st, "s1");
+
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    hdfs.saveNamespace();
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+    cluster.restartNameNodes();
+  }
+
+  @Test
+  public void testSnapshotDiffReportWithConcat() throws Exception {
+    final Path st = new Path("/st");
+    hdfs.mkdirs(st);
+    hdfs.allowSnapshot(st);
+
+    Path[] files = new Path[3];
+    for (int i = 0; i < 3; i++) {
+      files[i] = new Path(st, i+ ".txt");
+    }
+
+    Path dest = new Path(st, "dest.txt");
+    hdfs.createNewFile(dest);
+    hdfs.createSnapshot(st, "ss");
+
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < 3; j++) {
+        FileSystem fs = cluster.getFileSystem();
+        DFSTestUtil.createFile(fs, files[j], false, 1024,
+            1024, 512, (short) 1, RandomUtils.nextLong(1, 512), true);
+      }
+
+      hdfs.concat(dest, files);
+
+      hdfs.createSnapshot(st, "s" + i);
+
+      SnapshotDiffReport sdr = hdfs.getSnapshotDiffReport(st, "s" + i, "ss");
+      LOG.info("Snapshot Diff s{} to ss : {}", i, sdr);
+      Assert.assertEquals(sdr.getDiffList().size(), 1);
+      Assert.assertTrue(sdr.getDiffList().get(0).getType() ==
+          SnapshotDiffReport.DiffType.MODIFY);
+      Assert.assertTrue(new Path(st, DFSUtilClient.bytes2String(
+          sdr.getDiffList().get(0).getSourcePath())).equals(dest));
+    }
+
+    hdfs.deleteSnapshot(st, "s1");
+
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    hdfs.saveNamespace();
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+    cluster.restartNameNodes();
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message