Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 6705A1086A for ; Fri, 12 Jul 2013 00:28:16 +0000 (UTC) Received: (qmail 81498 invoked by uid 500); 12 Jul 2013 00:28:16 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 81449 invoked by uid 500); 12 Jul 2013 00:28:16 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 81441 invoked by uid 99); 12 Jul 2013 00:28:16 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 12 Jul 2013 00:28:16 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 12 Jul 2013 00:28:12 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id B5DAF2388994; Fri, 12 Jul 2013 00:27:50 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1502403 - in /hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ src/test/java/org/apache/hadoop/hdfs/ser... Date: Fri, 12 Jul 2013 00:27:50 -0000 To: hdfs-commits@hadoop.apache.org From: jing9@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20130712002750.B5DAF2388994@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: jing9 Date: Fri Jul 12 00:27:50 2013 New Revision: 1502403 URL: http://svn.apache.org/r1502403 Log: HDFS-4978. Merge r1502402 from branch-2. Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1502403&r1=1502402&r2=1502403&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Jul 12 00:27:50 2013 @@ -958,6 +958,8 @@ Release 2.1.0-beta - 2013-07-02 HDFS-4797. BlockScanInfo does not override equals(..) and hashCode() consistently. (szetszwo) + HDFS-4978. Make disallowSnapshot idempotent. (jing9) + Release 2.0.5-alpha - 06/06/2013 INCOMPATIBLE CHANGES Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1502403&r1=1502402&r2=1502403&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Fri Jul 12 00:27:50 2013 @@ -1041,6 +1041,7 @@ public interface ClientProtocol { * @param snapshotRoot the directory to be snapped * @throws IOException on error */ + @Idempotent public void allowSnapshot(String snapshotRoot) throws IOException; @@ -1049,6 +1050,7 @@ public interface ClientProtocol { * @param snapshotRoot the directory to disallow snapshot * @throws IOException on error */ + @Idempotent public void disallowSnapshot(String snapshotRoot) throws IOException; @@ -1067,6 +1069,7 @@ public interface ClientProtocol { * @return The difference report represented as a {@link SnapshotDiffReport}. * @throws IOException on error */ + @Idempotent public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, String fromSnapshot, String toSnapshot) throws IOException; } Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1502403&r1=1502402&r2=1502403&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java Fri Jul 12 00:27:50 2013 @@ -38,8 +38,6 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.namenode.INodesInPath; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SnapshotDiffInfo; -import com.google.common.base.Preconditions; - /** * Manage snapshottable directories and their snapshots. * @@ -128,8 +126,7 @@ public class SnapshotManager implements /** Remove the given snapshottable directory from {@link #snapshottables}. */ private void removeSnapshottable(INodeDirectorySnapshottable s) { - final INodeDirectorySnapshottable removed = snapshottables.remove(s.getId()); - Preconditions.checkState(s == removed); + snapshottables.remove(s.getId()); } /** Remove snapshottable directories from {@link #snapshottables} */ @@ -148,17 +145,18 @@ public class SnapshotManager implements */ public void resetSnapshottable(final String path) throws IOException { final INodesInPath iip = fsdir.getINodesInPath4Write(path); - final INodeDirectorySnapshottable s = INodeDirectorySnapshottable.valueOf( - iip.getLastINode(), path); + final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path); + if (!d.isSnapshottable()) { + // the directory is already non-snapshottable + return; + } + final INodeDirectorySnapshottable s = (INodeDirectorySnapshottable) d; if (s.getNumSnapshots() > 0) { throw new SnapshotException("The directory " + path + " has snapshot(s). " + "Please redo the operation after removing all the snapshots."); } if (s == fsdir.getRoot()) { - if (s.getSnapshotQuota() == 0) { - throw new SnapshotException("Root is not a snapshottable directory"); - } s.setSnapshotQuota(0); } else { s.replaceSelf(iip.getLatestSnapshot(), fsdir.getINodeMap()); Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java?rev=1502403&r1=1502402&r2=1502403&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java Fri Jul 12 00:27:50 2013 @@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.na import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import java.io.IOException; import java.util.Random; @@ -41,7 +40,6 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -132,13 +130,6 @@ public class TestNestedSnapshots { print("delete snapshot " + rootSnapshot); hdfs.disallowSnapshot(rootPath); print("disallow snapshot " + rootStr); - try { - hdfs.disallowSnapshot(rootPath); - fail("Expect snapshot exception when disallowing snapshot on root again"); - } catch (SnapshotException e) { - GenericTestUtils.assertExceptionContains( - "Root is not a snapshottable directory", e); - } //change foo to non-snapshottable hdfs.deleteSnapshot(foo, s1name); Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java?rev=1502403&r1=1502402&r2=1502403&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java Fri Jul 12 00:27:50 2013 @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -48,6 +49,7 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node; import org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer; @@ -374,6 +376,71 @@ public class TestSnapshot { "Directory is not a snapshottable directory: " + dir, e); } } + + /** + * Test multiple calls of allowSnapshot and disallowSnapshot, to make sure + * they are idempotent + */ + @Test + public void testAllowAndDisallowSnapshot() throws Exception { + final Path dir = new Path("/dir"); + final Path file0 = new Path(dir, "file0"); + final Path file1 = new Path(dir, "file1"); + DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed); + + INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory(); + assertFalse(dirNode.isSnapshottable()); + + hdfs.allowSnapshot(dir); + dirNode = fsdir.getINode4Write(dir.toString()).asDirectory(); + assertTrue(dirNode.isSnapshottable()); + // call allowSnapshot again + hdfs.allowSnapshot(dir); + dirNode = fsdir.getINode4Write(dir.toString()).asDirectory(); + assertTrue(dirNode.isSnapshottable()); + + // disallowSnapshot on dir + hdfs.disallowSnapshot(dir); + dirNode = fsdir.getINode4Write(dir.toString()).asDirectory(); + assertFalse(dirNode.isSnapshottable()); + // do it again + hdfs.disallowSnapshot(dir); + dirNode = fsdir.getINode4Write(dir.toString()).asDirectory(); + assertFalse(dirNode.isSnapshottable()); + + // same process on root + + final Path root = new Path("/"); + INodeDirectory rootNode = fsdir.getINode4Write(root.toString()) + .asDirectory(); + assertTrue(rootNode.isSnapshottable()); + // root is snapshottable dir, but with 0 snapshot quota + assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota()); + + hdfs.allowSnapshot(root); + rootNode = fsdir.getINode4Write(root.toString()).asDirectory(); + assertTrue(rootNode.isSnapshottable()); + assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT, + ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota()); + // call allowSnapshot again + hdfs.allowSnapshot(root); + rootNode = fsdir.getINode4Write(root.toString()).asDirectory(); + assertTrue(rootNode.isSnapshottable()); + assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT, + ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota()); + + // disallowSnapshot on dir + hdfs.disallowSnapshot(root); + rootNode = fsdir.getINode4Write(root.toString()).asDirectory(); + assertTrue(rootNode.isSnapshottable()); + assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota()); + // do it again + hdfs.disallowSnapshot(root); + rootNode = fsdir.getINode4Write(root.toString()).asDirectory(); + assertTrue(rootNode.isSnapshottable()); + assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota()); + } /** * Prepare a list of modifications. A modification may be a file creation,