hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1426432 - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ src/test/java/org/apache/hadoop/hdfs/se...
Date Fri, 28 Dec 2012 08:49:33 GMT
Author: szetszwo
Date: Fri Dec 28 08:49:33 2012
New Revision: 1426432

URL: http://svn.apache.org/viewvc?rev=1426432&view=rev
Log:
HDFS-4330. Support snapshots up to the snapshot limit.

Modified:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1426432&r1=1426431&r2=1426432&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
Fri Dec 28 08:49:33 2012
@@ -87,3 +87,5 @@ Branch-2802 Snapshot (Unreleased)
   HDFS-4317. Change INode and its subclasses to support HDFS-4103. (szetszwo)
 
   HDFS-4103. Support O(1) snapshot creation. (szetszwo)
+
+  HDFS-4330. Support snapshots up to the snapshot limit. (szetszwo)

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1426432&r1=1426431&r2=1426432&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
Fri Dec 28 08:49:33 2012
@@ -2022,7 +2022,7 @@ public class FSDirectory implements Clos
         }
       } else {
         // a non-quota directory; so replace it with a directory with quota
-        return dirNode.replaceSelf4Quota(latest, oldNsQuota, oldDsQuota);
+        return dirNode.replaceSelf4Quota(latest, nsQuota, dsQuota);
       }
       return (oldNsQuota != nsQuota || oldDsQuota != dsQuota) ? dirNode : null;
     }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1426432&r1=1426431&r2=1426432&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
Fri Dec 28 08:49:33 2012
@@ -31,7 +31,11 @@ import org.apache.hadoop.hdfs.util.ReadO
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
-/** The directory with snapshots. */
+/**
+ * The directory with snapshots. It maintains a list of snapshot diffs for
+ * storing snapshot data. When there are modifications to the directory, the old
+ * data is stored in the latest snapshot, if there is any.
+ */
 public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
   /**
    * The difference between the current state and a previous snapshot
@@ -351,8 +355,8 @@ public class INodeDirectoryWithSnapshot 
 
     /** Compare diffs with snapshot ID. */
     @Override
-    public int compareTo(final Snapshot that_snapshot) {
-      return Snapshot.ID_COMPARATOR.compare(this.snapshot, that_snapshot);
+    public int compareTo(final Snapshot that) {
+      return Snapshot.ID_COMPARATOR.compare(this.snapshot, that);
     }
     
     /** Is the inode the root of the snapshot? */
@@ -381,9 +385,13 @@ public class INodeDirectoryWithSnapshot 
 
     private INodeDirectory getSnapshotINode() {
       // get from this diff, then the posterior diff and then the current inode
-      return snapshotINode != null? snapshotINode
-          : posteriorDiff != null? posteriorDiff.getSnapshotINode()
-              : INodeDirectoryWithSnapshot.this; 
+      for(SnapshotDiff d = this; ; d = d.posteriorDiff) {
+        if (d.snapshotINode != null) {
+          return d.snapshotINode;
+        } else if (d.posteriorDiff == null) {
+          return INodeDirectoryWithSnapshot.this;
+        }
+      }
     }
 
     /**
@@ -429,17 +437,18 @@ public class INodeDirectoryWithSnapshot 
 
     /** @return the child with the given name. */
     INode getChild(byte[] name, boolean checkPosterior) {
-      final INode[] array = diff.accessPrevious(name);
-      if (array != null) {
-        // this diff is able to find it
-        return array[0]; 
-      } else if (!checkPosterior) {
-        // Since checkPosterior is false, return null, i.e. not found.   
-        return null;
-      } else {
-        // return the posterior INode.
-        return posteriorDiff != null? posteriorDiff.getChild(name, true)
-            : INodeDirectoryWithSnapshot.this.getChild(name, null);
+      for(SnapshotDiff d = this; ; d = d.posteriorDiff) {
+        final INode[] array = d.diff.accessPrevious(name);
+        if (array != null) {
+          // the diff is able to find it
+          return array[0]; 
+        } else if (!checkPosterior) {
+          // Since checkPosterior is false, return null, i.e. not found.   
+          return null;
+        } else if (d.posteriorDiff == null) {
+          // no more posterior diff, get from current inode.
+          return INodeDirectoryWithSnapshot.this.getChild(name, null);
+        }
       }
     }
     
@@ -554,6 +563,7 @@ public class INodeDirectoryWithSnapshot 
     return save2Snapshot(latest, null);
   }
 
+  /** Save the snapshot copy to the latest snapshot. */
   public Pair<INodeDirectory, INodeDirectory> save2Snapshot(Snapshot latest,
       INodeDirectory snapshotCopy) {
     return latest == null? null

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java?rev=1426432&r1=1426431&r2=1426432&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
Fri Dec 28 08:49:33 2012
@@ -51,7 +51,6 @@ public class TestFSDirectory {
 
   private final Path sub11 = new Path(sub1, "sub11");
   private final Path file3 = new Path(sub11, "file3");
-  private final Path file4 = new Path(sub1, "z_file4");
   private final Path file5 = new Path(sub1, "z_file5");
 
   private final Path sub2 = new Path(dir, "sub2");
@@ -106,29 +105,13 @@ public class TestFSDirectory {
 
     for(; (line = in.readLine()) != null; ) {
       line = line.trim();
-      if (!line.contains("snapshot")) {
-        Assert.assertTrue(line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
+      if (!line.isEmpty() && !line.contains("snapshot")) {
+        Assert.assertTrue("line=" + line,
+            line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
             || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM));
         checkClassName(line);
       }
     }
-
-    LOG.info("Create a new file " + file4);
-    DFSTestUtil.createFile(hdfs, file4, 1024, REPLICATION, seed);
-
-    final StringBuffer b2 = root.dumpTreeRecursively();
-    System.out.println("b2=" + b2);
-
-    int i = 0;
-    int j = b1.length() - 1;
-    for(; b1.charAt(i) == b2.charAt(i); i++);
-    int k = b2.length() - 1;
-    for(; b1.charAt(j) == b2.charAt(k); j--, k--);
-    final String diff = b2.substring(i, k + 1);
-    System.out.println("i=" + i + ", j=" + j + ", k=" + k);
-    System.out.println("diff=" + diff);
-    Assert.assertTrue(i > j);
-    Assert.assertTrue(diff.contains(file4.getName()));
   }
   
   @Test

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java?rev=1426432&r1=1426431&r2=1426432&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
Fri Dec 28 08:49:33 2012
@@ -26,6 +26,8 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Random;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -38,6 +40,8 @@ import org.apache.hadoop.hdfs.server.nam
  * Helper for writing snapshot related tests
  */
 public class SnapshotTestHelper {
+  public static final Log LOG = LogFactory.getLog(SnapshotTestHelper.class);
+
   private SnapshotTestHelper() {
     // Cannot be instantinatied
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java?rev=1426432&r1=1426431&r2=1426432&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
Fri Dec 28 08:49:33 2012
@@ -17,13 +17,17 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
+import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT;
+
 import java.io.IOException;
+import java.util.Random;
 
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -56,6 +60,8 @@ public class TestNestedSnapshots {
   }
 
   private static final long SEED = 0;
+  private static Random RANDOM = new Random(SEED);
+
   private static final short REPLICATION = 3;
   private static final long BLOCKSIZE = 1024;
   
@@ -89,7 +95,7 @@ public class TestNestedSnapshots {
    */
   @Test
   public void testNestedSnapshots() throws Exception {
-    final Path foo = new Path("/test/foo");
+    final Path foo = new Path("/testNestedSnapshots/foo");
     final Path bar = new Path(foo, "bar");
     final Path file1 = new Path(bar, "file1");
     DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);
@@ -117,8 +123,8 @@ public class TestNestedSnapshots {
     assertFile(s1path, s2path, file2, true, false, false);
   }
 
-  private static void print(String mess) throws UnresolvedLinkException {
-    System.out.println("XXX " + mess);
+  private static void print(String message) throws UnresolvedLinkException {
+    System.out.println("XXX " + message);
     SnapshotTestHelper.dumpTreeRecursively(fsn.getFSDirectory().getINode("/"));
   }
 
@@ -135,4 +141,42 @@ public class TestNestedSnapshots {
       Assert.assertEquals("Failed on " + paths[i], expected[i], computed);
     }
   }
+
+  @Test
+  public void testSnapshotLimit() throws Exception {
+    final int step = 1000;
+    final String dirStr = "/testSnapshotLimit/dir";
+    final Path dir = new Path(dirStr);
+    hdfs.mkdirs(dir, new FsPermission((short)0777));
+    hdfs.allowSnapshot(dirStr);
+
+    int s = 0;
+    for(; s < SNAPSHOT_LIMIT; s++) {
+      final String snapshotName = "s" + s;
+      hdfs.createSnapshot(snapshotName, dirStr);
+
+      //create a file occasionally 
+      if (s % step == 0) {
+        final Path file = new Path(dirStr, "f" + s);
+        DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
+      }
+    }
+
+    try {
+      hdfs.createSnapshot("s" + s, dirStr);
+      Assert.fail("Expected to fail to create snapshot, but didn't.");
+    } catch(IOException ioe) {
+      SnapshotTestHelper.LOG.info("The exception is expected.", ioe);
+    }
+
+    for(int f = 0; f < SNAPSHOT_LIMIT; f += step) {
+      final String file = "f" + f;
+      s = RANDOM.nextInt(step);
+      for(; s < SNAPSHOT_LIMIT; s += RANDOM.nextInt(step)) {
+        final Path p = SnapshotTestHelper.getSnapshotPath(dir, "s" + s, file);
+        //the file #f exists in snapshot #s iff s > f.
+        Assert.assertEquals(s > f, hdfs.exists(p));
+      }
+    }
+  }
 }



Mime
View raw message