Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id E490F9E00 for ; Mon, 5 Nov 2012 01:23:17 +0000 (UTC) Received: (qmail 55700 invoked by uid 500); 5 Nov 2012 01:23:17 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 55638 invoked by uid 500); 5 Nov 2012 01:23:17 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 55629 invoked by uid 99); 5 Nov 2012 01:23:17 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 05 Nov 2012 01:23:17 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 05 Nov 2012 01:23:15 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 6F8E223889D5; Mon, 5 Nov 2012 01:22:55 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1405688 - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ src/test/java/org/apache/hadoop/hdfs/se... Date: Mon, 05 Nov 2012 01:22:55 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20121105012255.6F8E223889D5@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Mon Nov 5 01:22:54 2012 New Revision: 1405688 URL: http://svn.apache.org/viewvc?rev=1405688&view=rev Log: HDFS-4147. When there is a snapshot in a subtree, deletion of the subtree should fail. Contributed by Jing Zhao Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1405688&r1=1405687&r2=1405688&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt Mon Nov 5 01:22:54 2012 @@ -53,3 +53,6 @@ Branch-2802 Snapshot (Unreleased) HDFS-4149. Implement the disallowSnapshot(..) in FSNamesystem and add resetSnapshottable(..) to SnapshotManager. (szetszwo) + + HDFS-4147. When there is a snapshot in a subtree, deletion of the subtree + should fail. (Jing Zhao via szetszwo) Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1405688&r1=1405687&r2=1405688&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Mon Nov 5 01:22:54 2012 @@ -997,7 +997,7 @@ public class FSDirectory implements Clos * @return true on successful deletion; else false */ boolean delete(String src, ListcollectedBlocks) - throws UnresolvedLinkException { + throws IOException { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src); } @@ -1006,7 +1006,22 @@ public class FSDirectory implements Clos int filesRemoved; writeLock(); try { - filesRemoved = unprotectedDelete(src, collectedBlocks, now); + INode[] inodes = rootDir.getExistingPathINodes(normalizePath(src), false); + if (checkPathINodes(inodes, src) == 0) { + filesRemoved = 0; + } else { + // Before removing the node, first check if the targetNode is for a + // snapshottable dir with snapshots, or its descendants have + // snapshottable dir with snapshots + INode targetNode = inodes[inodes.length-1]; + INode snapshotNode = hasSnapshot(targetNode); + if (snapshotNode != null) { + throw new IOException("The direcotry " + targetNode.getFullPathName() + + " cannot be deleted since " + snapshotNode.getFullPathName() + + " is snapshottable and already has snapshots"); + } + filesRemoved = unprotectedDelete(inodes, collectedBlocks, now); + } } finally { writeUnlock(); } @@ -1020,6 +1035,23 @@ public class FSDirectory implements Clos return true; } + private int checkPathINodes(INode[] inodes, String src) { + if (inodes == null || inodes.length == 0 + || inodes[inodes.length - 1] == null) { + if(NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: " + + "failed to remove " + src + " because it does not exist"); + } + return 0; + } else if (inodes.length == 1) { // src is the root + NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " + + "failed to remove " + src + + " because the root is not allowed to be deleted"); + return 0; + } + return inodes.length; + } + /** * @return true if the path is a non-empty directory; otherwise, return false. */ @@ -1050,7 +1082,14 @@ public class FSDirectory implements Clos throws UnresolvedLinkException { assert hasWriteLock(); List collectedBlocks = new ArrayList(); - int filesRemoved = unprotectedDelete(src, collectedBlocks, mtime); + int filesRemoved = 0; + + INode[] inodes = rootDir.getExistingPathINodes(normalizePath(src), false); + if (checkPathINodes(inodes, src) == 0) { + filesRemoved = 0; + } else { + filesRemoved = unprotectedDelete(inodes, collectedBlocks, mtime); + } if (filesRemoved > 0) { getFSNamesystem().removePathAndBlocks(src, collectedBlocks); } @@ -1059,32 +1098,16 @@ public class FSDirectory implements Clos /** * Delete a path from the name space * Update the count at each ancestor directory with quota - * @param src a string representation of a path to an inode + * @param inodes the INode array resolved from the path * @param collectedBlocks blocks collected from the deleted path * @param mtime the time the inode is removed * @return the number of inodes deleted; 0 if no inodes are deleted. */ - int unprotectedDelete(String src, List collectedBlocks, - long mtime) throws UnresolvedLinkException { + int unprotectedDelete(INode[] inodes, List collectedBlocks, + long mtime) { assert hasWriteLock(); - src = normalizePath(src); - - INode[] inodes = rootDir.getExistingPathINodes(src, false); + INode targetNode = inodes[inodes.length-1]; - - if (targetNode == null) { // non-existent src - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: " - +"failed to remove "+src+" because it does not exist"); - } - return 0; - } - if (inodes.length == 1) { // src is the root - NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " + - "failed to remove " + src + - " because the root is not allowed to be deleted"); - return 0; - } int pos = inodes.length - 1; // Remove the node from the namespace targetNode = removeChild(inodes, pos); @@ -1096,10 +1119,34 @@ public class FSDirectory implements Clos int filesRemoved = targetNode.collectSubtreeBlocksAndClear(collectedBlocks); if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: " - +src+" is removed"); + + targetNode.getFullPathName() + " is removed"); } return filesRemoved; } + + /** + * Check if the given INode (or one of its descendants) is snapshottable and + * already has snapshots. + * + * @param target The given INode + * @return The INode which is snapshottable and already has snapshots. + */ + private static INode hasSnapshot(INode target) { + if (target instanceof INodeDirectory) { + INodeDirectory targetDir = (INodeDirectory) target; + if (targetDir.isSnapshottable() + && ((INodeDirectorySnapshottable) targetDir).getNumSnapshots() > 0) { + return target; + } + for (INode child : targetDir.getChildren()) { + INode snapshotDir = hasSnapshot(child); + if (snapshotDir != null) { + return snapshotDir; + } + } + } + return null; + } /** * Replaces the specified INode. Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java?rev=1405688&r1=1405687&r2=1405688&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java Mon Nov 5 01:22:54 2012 @@ -75,7 +75,7 @@ public class INodeDirectorySnapshottable setSnapshotQuota(snapshotQuota); } - int getNumSnapshots() { + public int getNumSnapshots() { return snapshots.size(); } Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java?rev=1405688&r1=1405687&r2=1405688&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java Mon Nov 5 01:22:54 2012 @@ -33,9 +33,12 @@ import org.apache.hadoop.hdfs.DFSTestUti import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.ipc.RemoteException; import org.junit.After; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; /** * This class tests snapshot functionality. One or multiple snapshots are @@ -50,7 +53,8 @@ public class TestSnapshot { private final Path dir = new Path("/TestSnapshot"); private final Path sub1 = new Path(dir, "sub1"); - + private final Path subsub1 = new Path(sub1, "subsub1"); + protected Configuration conf; protected MiniDFSCluster cluster; protected FSNamesystem fsn; @@ -61,6 +65,9 @@ public class TestSnapshot { * records a snapshot root. */ protected static ArrayList snapshotList = new ArrayList(); + + @Rule + public ExpectedException exception = ExpectedException.none(); @Before public void setUp() throws Exception { @@ -187,6 +194,66 @@ public class TestSnapshot { } /** + * Creating snapshots for a directory that is not snapshottable must fail. + * + * TODO: Listing/Deleting snapshots for a directory that is not snapshottable + * should also fail. + */ + @Test + public void testSnapshottableDirectory() throws Exception { + Path file0 = new Path(sub1, "file0"); + Path file1 = new Path(sub1, "file1"); + DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed); + + exception.expect(RemoteException.class); + hdfs.createSnapshot("s1", sub1.toString()); + } + + /** + * Deleting snapshottable directory with snapshots must fail. + */ + @Test + public void testDeleteDirectoryWithSnapshot() throws Exception { + Path file0 = new Path(sub1, "file0"); + Path file1 = new Path(sub1, "file1"); + DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed); + + // Allow snapshot for sub1, and create snapshot for it + hdfs.allowSnapshot(sub1.toString()); + hdfs.createSnapshot("s1", sub1.toString()); + + // Deleting a snapshottable dir with snapshots should fail + exception.expect(RemoteException.class); + hdfs.delete(sub1, true); + } + + /** + * Deleting directory with snapshottable descendant with snapshots must fail. + */ + @Test + public void testDeleteDirectoryWithSnapshot2() throws Exception { + Path file0 = new Path(sub1, "file0"); + Path file1 = new Path(sub1, "file1"); + DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed); + + Path subfile1 = new Path(subsub1, "file0"); + Path subfile2 = new Path(subsub1, "file1"); + DFSTestUtil.createFile(hdfs, subfile1, BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, subfile2, BLOCKSIZE, REPLICATION, seed); + + // Allow snapshot for subsub1, and create snapshot for it + hdfs.allowSnapshot(subsub1.toString()); + hdfs.createSnapshot("s1", subsub1.toString()); + + // Deleting dir while its descedant subsub1 having snapshots should fail + exception.expect(RemoteException.class); + hdfs.delete(dir, true); + } + + /** * Base class to present changes applied to current file/dir. A modification * can be file creation, deletion, or other modifications such as appending on * an existing file. Three abstract methods need to be implemented by