hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1477181 - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/
Date Mon, 29 Apr 2013 16:37:36 GMT
Author: szetszwo
Date: Mon Apr 29 16:37:30 2013
New Revision: 1477181

URL: http://svn.apache.org/r1477181
Log:
HDFS-4578.  Restrict snapshot IDs to 24-bit wide.  Contributed by Arpit Agarwal

Added:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
Modified:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1477181&r1=1477180&r2=1477181&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
Mon Apr 29 16:37:30 2013
@@ -318,3 +318,6 @@ Branch-2802 Snapshot (Unreleased)
 
   HDFS-4767. If a directory is snapshottable, do not replace it when clearing
   quota.  (Jing Zhao via szetszwo)
+
+  HDFS-4578.  Restrict snapshot IDs to 24-bit wide.  (Arpit Agarwal via
+  szetszwo)

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1477181&r1=1477180&r2=1477181&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
Mon Apr 29 16:37:30 2013
@@ -54,6 +54,7 @@ import com.google.common.base.Preconditi
  */
 public class SnapshotManager implements SnapshotStats {
   private final FSDirectory fsdir;
+  private static final int SNAPSHOT_ID_BIT_WIDTH = 24;
 
   private final AtomicInteger numSnapshots = new AtomicInteger();
 
@@ -129,7 +130,25 @@ public class SnapshotManager implements 
   }
 
   /**
+  * Find the source root directory where the snapshot will be taken
+  * for a given path.
+  *
+  * @param path The directory path where the snapshot will be taken.
+  * @return Snapshottable directory.
+  * @throws IOException
+  *           Throw IOException when the given path does not lead to an
+  *           existing snapshottable directory.
+  */
+  public INodeDirectorySnapshottable getSnapshottableRoot(final String path
+      ) throws IOException {
+    final INodesInPath i = fsdir.getINodesInPath4Write(path);
+    return INodeDirectorySnapshottable.valueOf(i.getLastINode(), path);
+  }
+
+  /**
    * Create a snapshot of the given path.
+   * It is assumed that the caller will perform synchronization.
+   *
    * @param path
    *          The directory path where the snapshot will be taken.
    * @param snapshotName
@@ -142,10 +161,17 @@ public class SnapshotManager implements 
    */
   public String createSnapshot(final String path, String snapshotName
       ) throws IOException {
-    // Find the source root directory path where the snapshot is taken.
-    final INodesInPath i = fsdir.getINodesInPath4Write(path);
-    final INodeDirectorySnapshottable srcRoot
-        = INodeDirectorySnapshottable.valueOf(i.getLastINode(), path);
+    INodeDirectorySnapshottable srcRoot = getSnapshottableRoot(path);
+
+    if (snapshotCounter == getMaxSnapshotID()) {
+      // We have reached the maximum allowable snapshot ID and since we don't
+      // handle rollover we will fail all subsequent snapshot creation
+      // requests.
+      //
+      throw new SnapshotException(
+          "Failed to create the snapshot. The FileSystem has run out of " +
+          "snapshot IDs and ID rollover is not supported.");
+    }
 
     srcRoot.addSnapshot(snapshotCounter, snapshotName);
       
@@ -166,14 +192,10 @@ public class SnapshotManager implements 
       BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
       throws IOException {
     // parse the path, and check if the path is a snapshot path
-    INodesInPath inodesInPath = fsdir.getINodesInPath4Write(path.toString());
-    // transfer the inode for path to an INodeDirectorySnapshottable.
     // the INodeDirectorySnapshottable#valueOf method will throw Exception 
     // if the path is not for a snapshottable directory
-    INodeDirectorySnapshottable dir = INodeDirectorySnapshottable.valueOf(
-        inodesInPath.getLastINode(), path.toString());
-    
-    dir.removeSnapshot(snapshotName, collectedBlocks, removedINodes);
+    INodeDirectorySnapshottable srcRoot = getSnapshottableRoot(path);
+    srcRoot.removeSnapshot(snapshotName, collectedBlocks, removedINodes);
     numSnapshots.getAndDecrement();
   }
 
@@ -298,4 +320,14 @@ public class SnapshotManager implements 
     
     return snapshotRoot.computeDiff(from, to);
   }
-}
\ No newline at end of file
+
+  /**
+   * Returns the maximum allowable snapshot ID based on the bit width of the
+   * snapshot ID.
+   *
+   * @return maximum allowable snapshot ID.
+   */
+   public int getMaxSnapshotID() {
+    return ((1 << SNAPSHOT_ID_BIT_WIDTH) - 1);
+  }
+}

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java?rev=1477181&r1=1477180&r2=1477181&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
Mon Apr 29 16:37:30 2013
@@ -140,6 +140,10 @@ public class TestNestedSnapshots {
     }
   }
 
+  /**
+   * Test the snapshot limit of a single snapshottable directory.
+   * @throws Exception
+   */
   @Test (timeout=300000)
   public void testSnapshotLimit() throws Exception {
     final int step = 1000;

Added: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java?rev=1477181&view=auto
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
(added)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
Mon Apr 29 16:37:30 2013
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import java.util.ArrayList;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.junit.*;
+import static org.mockito.Mockito.*;
+
+
+/**
+ * Testing snapshot manager functionality.
+ */
+public class TestSnapshotManager {
+  private static final int testMaxSnapshotLimit = 7;
+
+  /**
+   * Test that the global limit on snapshots is honored.
+   */
+  @Test (timeout=10000)
+  public void testSnapshotLimits() throws Exception {
+    // Setup mock objects for SnapshotManager.createSnapshot.
+    //
+    INodeDirectorySnapshottable ids = mock(INodeDirectorySnapshottable.class);
+    FSDirectory fsdir = mock(FSDirectory.class);
+
+    SnapshotManager sm = spy(new SnapshotManager(fsdir));
+    doReturn(ids).when(sm).getSnapshottableRoot(anyString());
+    doReturn(testMaxSnapshotLimit).when(sm).getMaxSnapshotID();
+
+    // Create testMaxSnapshotLimit snapshots. These should all succeed.
+    //
+    for (Integer i = 0; i < testMaxSnapshotLimit; ++i) {
+      sm.createSnapshot("dummy", i.toString());
+    }
+
+    // Attempt to create one more snapshot. This should fail due to snapshot
+    // ID rollover.
+    //
+    try {
+      sm.createSnapshot("dummy", "shouldFailSnapshot");
+      Assert.fail("Expected SnapshotException not thrown");
+    } catch (SnapshotException se) {
+      Assert.assertTrue(
+          se.getMessage().toLowerCase().contains("rollover"));
+    }
+
+    // Delete a snapshot to free up a slot.
+    //
+    sm.deleteSnapshot("", "", mock(INode.BlocksMapUpdateInfo.class), new ArrayList<INode>());
+
+    // Attempt to create a snapshot again. It should still fail due
+    // to snapshot ID rollover.
+    //
+    try {
+      sm.createSnapshot("dummy", "shouldFailSnapshot2");
+      Assert.fail("Expected SnapshotException not thrown");
+    } catch (SnapshotException se) {
+      Assert.assertTrue(
+          se.getMessage().toLowerCase().contains("rollover"));
+    }
+  }
+}



Mime
View raw message