Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id E0C2DF696 for ; Wed, 29 May 2013 20:55:18 +0000 (UTC) Received: (qmail 11987 invoked by uid 500); 29 May 2013 20:55:18 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 11926 invoked by uid 500); 29 May 2013 20:55:18 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 11917 invoked by uid 99); 29 May 2013 20:55:18 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 29 May 2013 20:55:18 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 29 May 2013 20:55:15 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 4B53923889F1; Wed, 29 May 2013 20:54:54 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1487643 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Date: Wed, 29 May 2013 20:54:54 -0000 To: hdfs-commits@hadoop.apache.org From: jing9@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20130529205454.4B53923889F1@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: jing9 Date: Wed May 29 20:54:53 2013 New Revision: 1487643 URL: http://svn.apache.org/r1487643 Log: HDFS-4842. Identify the correct prior snapshot when deleting a snapshot under a renamed subtree. Contributed by Jing Zhao. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1487643&r1=1487642&r2=1487643&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed May 29 20:54:53 2013 @@ -265,7 +265,7 @@ Trunk (Unreleased) HDFS-4687. TestDelegationTokenForProxyUser#testWebHdfsDoAs is flaky with JDK7. (Andrew Wang via atm) - BREAKDOWN OF HDFS-2802 HDFS SNAPSHOT SUBTASKS + BREAKDOWN OF HDFS-2802 HDFS SNAPSHOT SUBTASKS AND RELATED JIRAS HDFS-4076. Support snapshot of single files. (szetszwo) @@ -604,6 +604,9 @@ Trunk (Unreleased) HDFS-4809. When a QuotaExceededException is thrown during rename, the quota usage should be subtracted back. (Jing Zhao via szetszwo) + HDFS-4842. Identify the correct prior snapshot when deleting a + snapshot under a renamed subtree. (jing9) + Release 2.0.5-beta - UNRELEASED INCOMPATIBLE CHANGES Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java?rev=1487643&r1=1487642&r2=1487643&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java Wed May 29 20:54:53 2013 @@ -493,6 +493,11 @@ public abstract class INodeReference ext if (prior == null) { prior = getPriorSnapshot(this); } + + if (prior != null + && Snapshot.ID_COMPARATOR.compare(snapshot, prior) <= 0) { + return Quota.Counts.newInstance(); + } Quota.Counts counts = getReferredINode().cleanSubtree(snapshot, prior, collectedBlocks, removedINodes); @@ -596,7 +601,11 @@ public abstract class INodeReference ext if (prior == null) { prior = getPriorSnapshot(this); } - if (snapshot != null && snapshot.equals(prior)) { + // if prior is not null, and prior is not before the to-be-deleted + // snapshot, we can quit here and leave the snapshot deletion work to + // the src tree of rename + if (snapshot != null && prior != null + && Snapshot.ID_COMPARATOR.compare(snapshot, prior) <= 0) { return Quota.Counts.newInstance(); } return getReferredINode().cleanSubtree(snapshot, prior, Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1487643&r1=1487642&r2=1487643&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Wed May 29 20:54:53 2013 @@ -1915,4 +1915,166 @@ public class TestRenameWithSnapshots { INodeFile barNode = (INodeFile) fsdir.getINode4Write(bar3.toString()); assertSame(fsdir.getINode4Write(dir3.toString()), barNode.getParent()); } + + /** + * Rename and deletion snapshot under the same the snapshottable directory. + */ + @Test + public void testRenameDirAndDeleteSnapshot_6() throws Exception { + final Path test = new Path("/test"); + final Path dir1 = new Path(test, "dir1"); + final Path dir2 = new Path(test, "dir2"); + hdfs.mkdirs(dir1); + hdfs.mkdirs(dir2); + + final Path foo = new Path(dir2, "foo"); + final Path bar = new Path(foo, "bar"); + final Path file = new Path(bar, "file"); + DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED); + + // take a snapshot on /test + SnapshotTestHelper.createSnapshot(hdfs, test, "s0"); + + // delete /test/dir2/foo/bar/file after snapshot s0, so that there is a + // snapshot copy recorded in bar + hdfs.delete(file, true); + + // rename foo from dir2 to dir1 + final Path newfoo = new Path(dir1, foo.getName()); + hdfs.rename(foo, newfoo); + + final Path foo_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0", + "dir2/foo"); + assertTrue("the snapshot path " + foo_s0 + " should exist", + hdfs.exists(foo_s0)); + + // delete snapshot s0. The deletion will first go down through dir1, and + // find foo in the created list of dir1. Then it will use null as the prior + // snapshot and continue the snapshot deletion process in the subtree of + // foo. We need to make sure the snapshot s0 can be deleted cleanly in the + // foo subtree. + hdfs.deleteSnapshot(test, "s0"); + // check the internal + assertFalse("after deleting s0, " + foo_s0 + " should not exist", + hdfs.exists(foo_s0)); + INodeDirectoryWithSnapshot dir2Node = (INodeDirectoryWithSnapshot) fsdir + .getINode4Write(dir2.toString()); + assertTrue("the diff list of " + dir2 + + " should be empty after deleting s0", dir2Node.getDiffs().asList() + .isEmpty()); + + assertTrue(hdfs.exists(newfoo)); + INode fooRefNode = fsdir.getINode4Write(newfoo.toString()); + assertTrue(fooRefNode instanceof INodeReference.DstReference); + INodeDirectory fooNode = fooRefNode.asDirectory(); + // fooNode should be still INodeDirectoryWithSnapshot since we call + // recordModification before the rename + assertTrue(fooNode instanceof INodeDirectoryWithSnapshot); + assertTrue(((INodeDirectoryWithSnapshot) fooNode).getDiffs().asList() + .isEmpty()); + INodeDirectory barNode = fooNode.getChildrenList(null).get(0).asDirectory(); + // bar should also be an INodeDirectoryWithSnapshot, and both of its diff + // list and children list are empty + assertTrue(((INodeDirectoryWithSnapshot) barNode).getDiffs().asList() + .isEmpty()); + assertTrue(barNode.getChildrenList(null).isEmpty()); + + restartClusterAndCheckImage(); + } + + /** + * Unit test for HDFS-4842. + */ + @Test + public void testRenameDirAndDeleteSnapshot_7() throws Exception { + fsn.getSnapshotManager().setAllowNestedSnapshots(true); + final Path test = new Path("/test"); + final Path dir1 = new Path(test, "dir1"); + final Path dir2 = new Path(test, "dir2"); + hdfs.mkdirs(dir1); + hdfs.mkdirs(dir2); + + final Path foo = new Path(dir2, "foo"); + final Path bar = new Path(foo, "bar"); + final Path file = new Path(bar, "file"); + DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED); + + // take a snapshot s0 and s1 on /test + SnapshotTestHelper.createSnapshot(hdfs, test, "s0"); + SnapshotTestHelper.createSnapshot(hdfs, test, "s1"); + // delete file so we have a snapshot copy for s1 in bar + hdfs.delete(file, true); + + // create another snapshot on dir2 + SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2"); + + // rename foo from dir2 to dir1 + final Path newfoo = new Path(dir1, foo.getName()); + hdfs.rename(foo, newfoo); + + // delete snapshot s1 + hdfs.deleteSnapshot(test, "s1"); + + // make sure the snapshot copy of file in s1 is merged to s0. For + // HDFS-4842, we need to make sure that we do not wrongly use s2 as the + // prior snapshot of s1. + final Path file_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2", + "foo/bar/file"); + assertFalse(hdfs.exists(file_s2)); + final Path file_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0", + "dir2/foo/bar/file"); + assertTrue(hdfs.exists(file_s0)); + + // check dir1: foo should be in the created list of s0 + INodeDirectoryWithSnapshot dir1Node = (INodeDirectoryWithSnapshot) fsdir + .getINode4Write(dir1.toString()); + List dir1DiffList = dir1Node.getDiffs().asList(); + assertEquals(1, dir1DiffList.size()); + List dList = dir1DiffList.get(0).getChildrenDiff() + .getList(ListType.DELETED); + assertTrue(dList.isEmpty()); + List cList = dir1DiffList.get(0).getChildrenDiff() + .getList(ListType.CREATED); + assertEquals(1, cList.size()); + INode cNode = cList.get(0); + INode fooNode = fsdir.getINode4Write(newfoo.toString()); + assertSame(cNode, fooNode); + + // check foo and its subtree + final Path newbar = new Path(newfoo, bar.getName()); + INodeDirectoryWithSnapshot barNode = (INodeDirectoryWithSnapshot) fsdir + .getINode4Write(newbar.toString()); + assertSame(fooNode.asDirectory(), barNode.getParent()); + // bar should only have a snapshot diff for s0 + List barDiffList = barNode.getDiffs().asList(); + assertEquals(1, barDiffList.size()); + DirectoryDiff diff = barDiffList.get(0); + assertEquals("s0", Snapshot.getSnapshotName(diff.snapshot)); + // and file should be stored in the deleted list of this snapshot diff + assertEquals("file", diff.getChildrenDiff().getList(ListType.DELETED) + .get(0).getLocalName()); + + // check dir2: a WithName instance for foo should be in the deleted list + // of the snapshot diff for s2 + INodeDirectoryWithSnapshot dir2Node = (INodeDirectoryWithSnapshot) fsdir + .getINode4Write(dir2.toString()); + List dir2DiffList = dir2Node.getDiffs().asList(); + // dir2Node should contain 2 snapshot diffs, one for s2, and the other was + // originally s1 (created when dir2 was transformed to a snapshottable dir), + // and currently is s0 + assertEquals(2, dir2DiffList.size()); + dList = dir2DiffList.get(1).getChildrenDiff().getList(ListType.DELETED); + assertEquals(1, dList.size()); + cList = dir2DiffList.get(0).getChildrenDiff().getList(ListType.CREATED); + assertTrue(cList.isEmpty()); + final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2", + foo.getName()); + INodeReference.WithName fooNode_s2 = + (INodeReference.WithName) fsdir.getINode(foo_s2.toString()); + assertSame(dList.get(0), fooNode_s2); + assertSame(fooNode.asReference().getReferredINode(), + fooNode_s2.getReferredINode()); + + restartClusterAndCheckImage(); + } }