hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1404498 - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ src/test/java/org/apache/hadoop/hdfs/ src/test/java/org/apache/hadoop/hdfs/server/namenode/ s...
Date Thu, 01 Nov 2012 08:29:41 GMT
Author: suresh
Date: Thu Nov  1 08:29:41 2012
New Revision: 1404498

URL: http://svn.apache.org/viewvc?rev=1404498&view=rev
Log:
HDFS-4133. Add testcases for testing basic snapshot functionalities. Contributed by Jing Zhao.

Added:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
Removed:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshot.java
Modified:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1404498&r1=1404497&r2=1404498&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
Thu Nov  1 08:29:41 2012
@@ -37,3 +37,6 @@ Branch-2802 Snapshot (Unreleased)
   HDFS-4111. Support snapshot of subtrees. (szetszwo via suresh)
 
   HDFS-4119. Complete the allowSnapshot code and add a test for it. (szetszwo)
+
+  HDFS-4133. Add testcases for testing basic snapshot functionalities.
+  (Jing Zhao via suresh)

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java?rev=1404498&r1=1404497&r2=1404498&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
Thu Nov  1 08:29:41 2012
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 import java.io.IOException;
+import java.io.PrintWriter;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -115,4 +116,16 @@ public class INodeDirectorySnapshottable
     setModificationTime(timestamp);
     return r;
   }
+  
+  @Override
+  public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix) {
+    super.dumpTreeRecursively(out, prefix);
+
+    out.print(prefix);
+    out.print(snapshots.size());
+    out.print(snapshots.size() <= 1 ? " snapshot of " : " snapshots of ");
+    out.println(getLocalName());
+
+    dumpTreeRecursively(out, prefix, snapshots);
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1404498&r1=1404497&r2=1404498&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
Thu Nov  1 08:29:41 2012
@@ -609,6 +609,25 @@ public class DFSTestUtil {
   }
   
   /**
+   * Append specified length of bytes to a given file
+   * @param fs The file system
+   * @param p Path of the file to append
+   * @param length Length of bytes to append to the file
+   * @throws IOException
+   */
+  public static void appendFile(FileSystem fs, Path p, int length)
+      throws IOException {
+    assert fs.exists(p);
+    assert length >= 0;
+    byte[] toAppend = new byte[length];
+    Random random = new Random();
+    random.nextBytes(toAppend);
+    FSDataOutputStream out = fs.append(p);
+    out.write(toAppend);
+    out.close();
+  }
+  
+  /**
    * @return url content as string (UTF-8 encoding assumed)
    */
   public static String urlGet(URL url) throws IOException {

Added: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java?rev=1404498&view=auto
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
(added)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
Thu Nov  1 08:29:41 2012
@@ -0,0 +1,394 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshotRoot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/** Test snapshot related operations. */
+public class TestSnapshotPathINodes {
+  private static final long seed = 0;
+  private static final short REPLICATION = 3;
+
+  private final Path dir = new Path("/TestSnapshot");
+  
+  private final Path sub1 = new Path(dir, "sub1");
+  private final Path file1 = new Path(sub1, "file1");
+  private final Path file2 = new Path(sub1, "file2");
+
+  private Configuration conf;
+  private MiniDFSCluster cluster;
+  private FSNamesystem fsn;
+  private FSDirectory fsdir;
+
+  private DistributedFileSystem hdfs;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = new Configuration();
+    cluster = new MiniDFSCluster.Builder(conf)
+      .numDataNodes(REPLICATION)
+      .build();
+    cluster.waitActive();
+    
+    fsn = cluster.getNamesystem();
+    fsdir = fsn.getFSDirectory();
+    
+    hdfs = cluster.getFileSystem();
+    DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
+    DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  /** Test allow-snapshot operation. */
+  @Test
+  public void testAllowSnapshot() throws Exception {
+    final String path = sub1.toString();
+    final INode before = fsdir.getINode(path);
+    
+    // Before a directory is snapshottable
+    Assert.assertTrue(before instanceof INodeDirectory);
+    Assert.assertFalse(before instanceof INodeDirectorySnapshottable);
+
+    // After a directory is snapshottable
+    hdfs.allowSnapshot(path);
+    final INode after = fsdir.getINode(path);
+    Assert.assertTrue(after instanceof INodeDirectorySnapshottable);
+  }
+  
+  /** 
+   * Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)} 
+   * for normal (non-snapshot) file.
+   */
+  @Test
+  public void testNonSnapshotPathINodes() throws Exception {
+    // Get the inodes by resolving the path of a normal file
+    String[] names = INode.getPathNames(file1.toString());
+    byte[][] components = INode.getPathComponents(names);
+    INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
+        components.length, false);
+    INode[] inodes = nodesInPath.getINodes();
+    // The number of inodes should be equal to components.length
+    assertEquals(inodes.length, components.length);
+    // The returned nodesInPath should be non-snapshot
+    assertFalse(nodesInPath.isSnapshot());
+    assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
+    // The last INode should be associated with file1
+    assertEquals(inodes[components.length - 1].getFullPathName(),
+        file1.toString());
+    assertEquals(inodes[components.length - 2].getFullPathName(),
+        sub1.toString());
+    assertEquals(inodes[components.length - 3].getFullPathName(),
+        dir.toString());
+    
+    // Call getExistingPathINodes and request only one INode. This is used
+    // when identifying the INode for a given path.
+    nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 1, false);
+    inodes = nodesInPath.getINodes();
+    assertEquals(inodes.length, 1);
+    assertFalse(nodesInPath.isSnapshot());
+    assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
+    assertEquals(inodes[0].getFullPathName(), file1.toString());
+    
+    // Call getExistingPathINodes and request 2 INodes. This is usually used
+    // when identifying the parent INode of a given path.
+    nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 2, false);
+    inodes = nodesInPath.getINodes();
+    assertEquals(inodes.length, 2);
+    assertFalse(nodesInPath.isSnapshot());
+    assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
+    assertEquals(inodes[1].getFullPathName(), file1.toString());
+    assertEquals(inodes[0].getFullPathName(), sub1.toString());
+  }
+  
+  /** 
+   * Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)} 
+   * for snapshot file.
+   */
+  @Test
+  public void testSnapshotPathINodes() throws Exception {
+    // Create a snapshot for the dir, and check the inodes for the path
+    // pointing to a snapshot file
+    hdfs.allowSnapshot(sub1.toString());
+    hdfs.createSnapshot("s1", sub1.toString());
+    // The path when accessing the snapshot file of file1 is
+    // /TestSnapshot/sub1/.snapshot/s1/file1
+    String snapshotPath = sub1.toString() + "/.snapshot/s1/file1";
+    String[] names = INode.getPathNames(snapshotPath);
+    byte[][] components = INode.getPathComponents(names);
+    INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
+        components.length, false);
+    INode[] inodes = nodesInPath.getINodes();
+    // Length of inodes should be (components.length - 1), since we will ignore
+    // ".snapshot" 
+    assertEquals(inodes.length, components.length - 1);
+    assertTrue(nodesInPath.isSnapshot());
+    // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1}
+    assertEquals(nodesInPath.getSnapshotRootIndex(), 3);
+    assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof 
+        INodeDirectorySnapshotRoot);
+    // Check the INode for file1 (snapshot file)
+    INode snapshotFileNode = inodes[inodes.length - 1]; 
+    assertEquals(snapshotFileNode.getLocalName(), file1.getName());
+    assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
+    assertTrue(snapshotFileNode.getParent() instanceof 
+        INodeDirectorySnapshotRoot);
+    
+    // Call getExistingPathINodes and request only one INode.
+    nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 1, false);
+    inodes = nodesInPath.getINodes();
+    assertEquals(inodes.length, 1);
+    assertTrue(nodesInPath.isSnapshot());
+    // The snapshotroot (s1) is not included in inodes. Thus the
+    // snapshotRootIndex should be -1.
+    assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
+    // Check the INode for file1 (snapshot file)
+    snapshotFileNode = inodes[inodes.length - 1]; 
+    assertEquals(snapshotFileNode.getLocalName(), file1.getName());
+    assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
+    
+    // Call getExistingPathINodes and request 2 INodes.
+    nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 2, false);
+    inodes = nodesInPath.getINodes();
+    assertEquals(inodes.length, 2);
+    assertTrue(nodesInPath.isSnapshot());
+    // There should be two INodes in inodes: s1 and snapshot of file1. Thus the
+    // SnapshotRootIndex should be 0.
+    assertEquals(nodesInPath.getSnapshotRootIndex(), 0);
+    snapshotFileNode = inodes[inodes.length - 1];
+    // Check the INode for snapshot of file1
+    assertEquals(snapshotFileNode.getLocalName(), file1.getName());
+    assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
+    
+    // Resolve the path "/TestSnapshot/sub1/.snapshot"  
+    String dotSnapshotPath = sub1.toString() + "/.snapshot";
+    names = INode.getPathNames(dotSnapshotPath);
+    components = INode.getPathComponents(names);
+    nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
+        components.length, false);
+    inodes = nodesInPath.getINodes();
+    // The number of INodes returned should be components.length - 1 since we
+    // will ignore ".snapshot"
+    assertEquals(inodes.length, components.length - 1);
+    assertTrue(nodesInPath.isSnapshot());
+    // No SnapshotRoot dir is included in the resolved inodes  
+    assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
+    // The last INode should be the INode for sub1
+    assertEquals(inodes[inodes.length - 1].getFullPathName(), sub1.toString());
+    assertFalse(inodes[inodes.length - 1] instanceof INodeFileSnapshot);
+  }
+  
+  /** 
+   * Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)} 
+   * for snapshot file after deleting the original file.
+   */
+  @Test
+  public void testSnapshotPathINodesAfterDeletion() throws Exception {
+    // Create a snapshot for the dir, and check the inodes for the path
+    // pointing to a snapshot file
+    hdfs.allowSnapshot(sub1.toString());
+    hdfs.createSnapshot("s1", sub1.toString());
+    
+    // Delete the original file /TestSnapshot/sub1/file1
+    hdfs.delete(file1, false);
+    
+    // Check the INodes for path /TestSnapshot/sub1/file1
+    String[] names = INode.getPathNames(file1.toString());
+    byte[][] components = INode.getPathComponents(names);
+    INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
+        components.length, false);
+    INode[] inodes = nodesInPath.getINodes();
+    // The length of inodes should be equal to components.length
+    assertEquals(inodes.length, components.length);
+    // The number of non-null elements should be components.length - 1 since
+    // file1 has been deleted
+    assertEquals(nodesInPath.getSize(), components.length - 1);
+    // The returned nodesInPath should be non-snapshot
+    assertFalse(nodesInPath.isSnapshot());
+    assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
+    // The last INode should be null, and the one before should be associated
+    // with sub1
+    assertNull(inodes[components.length - 1]);
+    assertEquals(inodes[components.length - 2].getFullPathName(),
+        sub1.toString());
+    assertEquals(inodes[components.length - 3].getFullPathName(),
+        dir.toString());
+    
+    // Resolve the path for the snapshot file
+    // /TestSnapshot/sub1/.snapshot/s1/file1
+    String snapshotPath = sub1.toString() + "/.snapshot/s1/file1";
+    names = INode.getPathNames(snapshotPath);
+    components = INode.getPathComponents(names);
+    nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
+        components.length, false);
+    inodes = nodesInPath.getINodes();
+    // Length of inodes should be (components.length - 1), since we will ignore
+    // ".snapshot" 
+    assertEquals(inodes.length, components.length - 1);
+    assertTrue(nodesInPath.isSnapshot());
+    // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1}
+    assertEquals(nodesInPath.getSnapshotRootIndex(), 3);
+    assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof 
+        INodeDirectorySnapshotRoot);
+    // Check the INode for file1 (snapshot file)
+    INode snapshotFileNode = inodes[inodes.length - 1]; 
+    assertEquals(snapshotFileNode.getLocalName(), file1.getName());
+    assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
+    assertTrue(snapshotFileNode.getParent() instanceof 
+        INodeDirectorySnapshotRoot);
+  }
+  
+  /** 
+   * Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)} 
+   * for snapshot file while adding a new file after snapshot.
+   */
+  @Test
+  public void testSnapshotPathINodesWithAddedFile() throws Exception {
+    // Create a snapshot for the dir, and check the inodes for the path
+    // pointing to a snapshot file
+    hdfs.allowSnapshot(sub1.toString());
+    hdfs.createSnapshot("s1", sub1.toString());
+    
+    // Add a new file /TestSnapshot/sub1/file3
+    final Path file3 = new Path(sub1, "file3");
+    DFSTestUtil.createFile(hdfs, file3, 1024, REPLICATION, seed);
+  
+    // Check the inodes for /TestSnapshot/sub1/file3
+    String[] names = INode.getPathNames(file3.toString());
+    byte[][] components = INode.getPathComponents(names);
+    INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
+        components.length, false);
+    INode[] inodes = nodesInPath.getINodes();
+    // The number of inodes should be equal to components.length
+    assertEquals(inodes.length, components.length);
+    // The returned nodesInPath should be non-snapshot
+    assertFalse(nodesInPath.isSnapshot());
+    assertEquals(nodesInPath.getSnapshotRootIndex(), -1);
+    // The last INode should be associated with file3
+    assertEquals(inodes[components.length - 1].getFullPathName(),
+        file3.toString());
+    assertEquals(inodes[components.length - 2].getFullPathName(),
+        sub1.toString());
+    assertEquals(inodes[components.length - 3].getFullPathName(),
+        dir.toString());
+    
+    // Check the inodes for /TestSnapshot/sub1/.snapshot/s1/file3
+    String snapshotPath = sub1.toString() + "/.snapshot/s1/file3";
+    names = INode.getPathNames(snapshotPath);
+    components = INode.getPathComponents(names);
+    nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
+        components.length, false);
+    inodes = nodesInPath.getINodes();
+    // Length of inodes should be (components.length - 1), since we will ignore
+    // ".snapshot" 
+    assertEquals(inodes.length, components.length - 1);
+    // The number of non-null inodes should be components.length - 2, since
+    // snapshot of file3 does not exist
+    assertEquals(nodesInPath.getSize(), components.length - 2);
+    assertTrue(nodesInPath.isSnapshot());
+    // SnapshotRootIndex should still be 3: {root, Testsnapshot, sub1, s1, null}
+    assertEquals(nodesInPath.getSnapshotRootIndex(), 3);
+    assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof 
+        INodeDirectorySnapshotRoot);
+    // Check the last INode in inodes, which should be null
+    assertNull(inodes[inodes.length - 1]);
+    assertTrue(inodes[inodes.length - 2] instanceof 
+        INodeDirectorySnapshotRoot);
+  }
+  
+  /** 
+   * Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)} 
+   * for snapshot file while modifying file after snapshot.
+   */
+  @Test
+  public void testSnapshotPathINodesAfterModification() throws Exception {
+    // First check the INode for /TestSnapshot/sub1/file1
+    String[] names = INode.getPathNames(file1.toString());
+    byte[][] components = INode.getPathComponents(names);
+    INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components,
+        components.length, false);
+    INode[] inodes = nodesInPath.getINodes();
+    // The number of inodes should be equal to components.length
+    assertEquals(inodes.length, components.length);
+    // The last INode should be associated with file1
+    assertEquals(inodes[components.length - 1].getFullPathName(),
+        file1.toString());
+    
+    // Create a snapshot for the dir, and check the inodes for the path
+    // pointing to a snapshot file
+    hdfs.allowSnapshot(sub1.toString());
+    hdfs.createSnapshot("s1", sub1.toString());
+    
+    // Modify file1
+    DFSTestUtil.appendFile(hdfs, file1, "the content for appending");
+    // Check the INode for /TestSnapshot/sub1/file1 again
+    INodesInPath newNodesInPath = fsdir.rootDir
+        .getExistingPathINodes(components, components.length, false);
+    INode[] newInodes = newNodesInPath.getINodes();
+    // The number of inodes should be equal to components.length
+    assertEquals(newInodes.length, components.length);
+    // The last INode should be associated with file1
+    assertEquals(newInodes[components.length - 1].getFullPathName(),
+        file1.toString());
+    // The modification time of the INode for file3 should have been changed
+    Assert.assertFalse(inodes[components.length - 1].getModificationTime() ==
+        newInodes[components.length - 1].getModificationTime());
+    
+    // Check the INodes for snapshot of file1
+    String snapshotPath = sub1.toString() + "/.snapshot/s1/file1";
+    names = INode.getPathNames(snapshotPath);
+    components = INode.getPathComponents(names);
+    INodesInPath ssNodesInPath = fsdir.rootDir.getExistingPathINodes(
+        components, components.length, false);
+    INode[] ssInodes = ssNodesInPath.getINodes();
+    // Length of ssInodes should be (components.length - 1), since we will
+    // ignore ".snapshot" 
+    assertEquals(ssInodes.length, components.length - 1);
+    assertTrue(ssNodesInPath.isSnapshot());
+    // Check the INode for snapshot of file1
+    INode snapshotFileNode = ssInodes[ssInodes.length - 1]; 
+    assertEquals(snapshotFileNode.getLocalName(), file1.getName());
+    assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
+    // The modification time of the snapshot INode should be the same with the
+    // original INode before modification
+    assertEquals(inodes[inodes.length - 1].getModificationTime(),
+        ssInodes[ssInodes.length - 1].getModificationTime());
+  }
+}

Added: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java?rev=1404498&view=auto
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
(added)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
Thu Nov  1 08:29:41 2012
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+
+/**
+ * Helper for writing snapshot related tests
+ */
+public class SnapshotTestHelper {
+  private SnapshotTestHelper() {
+    // Cannot be instantinatied
+  }
+
+  public static Path getSnapshotRoot(Path snapshottedDir, String snapshotName) {
+    return new Path(snapshottedDir, ".snapshot/" + snapshotName);
+  }
+
+  public static Path getSnapshotPath(Path snapshottedDir, String snapshotName,
+      String fileLocalName) {
+    return new Path(getSnapshotRoot(snapshottedDir, snapshotName),
+        fileLocalName);
+  }
+
+  /**
+   * Create snapshot for a dir using a given snapshot name
+   * 
+   * @param hdfs DistributedFileSystem instance
+   * @param snapshottedDir The dir to be snapshotted
+   * @param snapshotName The name of the snapshot
+   * @return The path of the snapshot root
+   */
+  public static Path createSnapshot(DistributedFileSystem hdfs,
+      Path snapshottedDir, String snapshotName) throws Exception {
+    assert hdfs.exists(snapshottedDir);
+    hdfs.allowSnapshot(snapshottedDir.toString());
+    hdfs.createSnapshot(snapshotName, snapshottedDir.toString());
+    return SnapshotTestHelper.getSnapshotRoot(snapshottedDir, snapshotName);
+  }
+
+  /**
+   * Check the functionality of a snapshot.
+   * 
+   * @param hdfs DistributedFileSystem instance
+   * @param snapshotRoot The root of the snapshot
+   * @param snapshottedDir The snapshotted directory
+   */
+  public static void checkSnapshotCreation(DistributedFileSystem hdfs,
+      Path snapshotRoot, Path snapshottedDir) throws Exception {
+    // Currently we only check if the snapshot was created successfully
+    assertTrue(hdfs.exists(snapshotRoot));
+    // Compare the snapshot with the current dir
+    FileStatus[] currentFiles = hdfs.listStatus(snapshottedDir);
+    FileStatus[] snapshotFiles = hdfs.listStatus(snapshotRoot);
+    assertEquals(currentFiles.length, snapshotFiles.length);
+  }
+}

Added: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java?rev=1404498&view=auto
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
(added)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
Thu Nov  1 08:29:41 2012
@@ -0,0 +1,361 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * This class tests snapshot functionality. One or multiple snapshots are
+ * created. The snapshotted directory is changed and verification is done to
+ * ensure snapshots remain unchanges.
+ */
+public class TestSnapshot {
+  protected static final long seed = 0;
+  protected static final short REPLICATION = 3;
+  protected static final long BLOCKSIZE = 1024;
+  public static final int SNAPSHOTNUMBER = 10;
+
+  private final Path dir = new Path("/TestSnapshot");
+  private final Path sub1 = new Path(dir, "sub1");
+
+  protected Configuration conf;
+  protected MiniDFSCluster cluster;
+  protected FSNamesystem fsn;
+  protected DistributedFileSystem hdfs;
+
+  /**
+   * The list recording all previous snapshots. Each element in the array
+   * records a snapshot root.
+   */
+  protected static ArrayList<Path> snapshotList = new ArrayList<Path>();
+
+  @Before
+  public void setUp() throws Exception {
+    conf = new Configuration();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
+        .build();
+    cluster.waitActive();
+
+    fsn = cluster.getNamesystem();
+    hdfs = cluster.getFileSystem();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Make changes (modification, deletion, creation) to the current files/dir.
+   * Then check if the previous snapshots are still correct.
+   * 
+   * @param modifications Modifications that to be applied to the current dir.
+   */
+  public void modifyCurrentDirAndCheckSnapshots(Modification[] modifications)
+      throws Exception {
+    for (Modification modification : modifications) {
+      modification.loadSnapshots();
+      modification.modify();
+      modification.checkSnapshots();
+    }
+  }
+
+  /**
+   * Generate the snapshot name based on its index.
+   * 
+   * @param snapshotIndex The index of the snapshot
+   * @return The snapshot name
+   */
+  private String genSnapshotName(int snapshotIndex) {
+    return "s" + snapshotIndex;
+  }
+
+  /**
+   * Main test, where we will go in the following loop:
+   * 
+   * Create snapshot <----------------------+ -> Check snapshot creation | ->
+   * Change the current/live files/dir | -> Check previous snapshots
+   * -----------+
+   * 
+   * @param snapshottedDir The dir to be snapshotted
+   * @param modificiationsList The list of modifications. Each element in the
+   *          list is a group of modifications applied to current dir.
+   */
+  protected void testSnapshot(Path snapshottedDir,
+      ArrayList<Modification[]> modificationsList) throws Exception {
+    int snapshotIndex = 0;
+    for (Modification[] modifications : modificationsList) {
+      // 1. create snapshot
+      // TODO: we also need to check creating snapshot for a directory under a
+      // snapshottable directory
+      Path snapshotRoot = SnapshotTestHelper.createSnapshot(hdfs,
+          snapshottedDir, genSnapshotName(snapshotIndex++));
+      snapshotList.add(snapshotRoot);
+      // 2. Check the basic functionality of the snapshot(s)
+      SnapshotTestHelper.checkSnapshotCreation(hdfs, snapshotRoot,
+          snapshottedDir);
+      // 3. Make changes to the current directory
+      for (Modification m : modifications) {
+        m.loadSnapshots();
+        m.modify();
+        m.checkSnapshots();
+      }
+    }
+  }
+
+  /**
+   * Prepare a list of modifications. A modification may be a file creation,
+   * file deletion, or a modification operation such as appending to an existing
+   * file.
+   * 
+   * @param number
+   *          Number of times that we make modifications to the current
+   *          directory.
+   * @return A list of modifications. Each element in the list is a group of
+   *         modifications that will be apply to the "current" directory.
+   * @throws Exception
+   */
+  private ArrayList<Modification[]> prepareModifications(int number)
+      throws Exception {
+    final Path[] files = new Path[3];
+    files[0] = new Path(sub1, "file0");
+    files[1] = new Path(sub1, "file1");
+    files[2] = new Path(sub1, "file2");
+    DFSTestUtil.createFile(hdfs, files[0], BLOCKSIZE, REPLICATION, seed);
+    DFSTestUtil.createFile(hdfs, files[1], BLOCKSIZE, REPLICATION, seed);
+
+    ArrayList<Modification[]> mList = new ArrayList<Modification[]>();
+    //
+    // Modification iterations are as follows:
+    // Iteration 0 - delete:file0, append:file1, create:file2
+    // Iteration 1 - delete:file1, append:file2, create:file0
+    // Iteration 3 - delete:file2, append:file0, create:file1
+    // ...
+    //
+    for (int i = 0; i < number; i++) {
+      Modification[] mods = new Modification[3];
+      // delete files[i % 3]
+      mods[0] = new FileDeletion(files[i % 3], hdfs);
+      // modify files[(i+1) % 3]
+      mods[1] = new FileAppend(files[(i + 1) % 3], hdfs, (int) BLOCKSIZE);
+      // create files[(i+2) % 3]
+      mods[2] = new FileCreation(files[(i + 2) % 3], hdfs, (int) BLOCKSIZE);
+      mList.add(mods);
+    }
+    return mList;
+  }
+
+  @Test
+  public void testSnapshot() throws Exception {
+    ArrayList<Modification[]> mList = prepareModifications(SNAPSHOTNUMBER);
+    testSnapshot(sub1, mList);
+  }
+
+  /**
+   * Base class to present changes applied to current file/dir. A modification
+   * can be file creation, deletion, or other modifications such as appending on
+   * an existing file. Three abstract methods need to be implemented by
+   * subclasses: loadSnapshots() captures the states of snapshots before the
+   * modification, modify() applies the modification to the current directory,
+   * and checkSnapshots() verifies the snapshots do not change after the
+   * modification.
+   */
+  static abstract class Modification {
+    protected final Path file;
+    protected final FileSystem fs;
+    final String type;
+    protected final Random random;
+
+    Modification(Path file, FileSystem fs, String type) {
+      this.file = file;
+      this.fs = fs;
+      this.type = type;
+      this.random = new Random();
+    }
+
+    abstract void loadSnapshots() throws Exception;
+
+    abstract void modify() throws Exception;
+
+    abstract void checkSnapshots() throws Exception;
+  }
+
+  /**
+   * Appending a specified length to an existing file
+   */
+  static class FileAppend extends Modification {
+    final int appendLen;
+    private final HashMap<Path, Long> snapshotFileLengthMap;
+
+    FileAppend(Path file, FileSystem fs, int len) throws Exception {
+      super(file, fs, "append");
+      assert len >= 0;
+      this.appendLen = len;
+      this.snapshotFileLengthMap = new HashMap<Path, Long>();
+    }
+
+    @Override
+    void loadSnapshots() throws Exception {
+      for (Path snapshotRoot : snapshotList) {
+        Path snapshotFile = new Path(snapshotRoot, file.getName());
+        if (fs.exists(snapshotFile)) {
+          long snapshotFileLen = fs.getFileStatus(snapshotFile).getLen();
+          snapshotFileLengthMap.put(snapshotFile, snapshotFileLen);
+        } else {
+          snapshotFileLengthMap.put(snapshotFile, -1L);
+        }
+      }
+    }
+
+    @Override
+    void modify() throws Exception {
+      assert fs.exists(file);
+      FSDataOutputStream out = fs.append(file);
+      byte[] buffer = new byte[appendLen];
+      random.nextBytes(buffer);
+      out.write(buffer);
+      out.close();
+    }
+
+    @Override
+    void checkSnapshots() throws Exception {
+      byte[] buffer = new byte[32];
+      for (Path snapshotRoot : snapshotList) {
+        Path snapshotFile = new Path(snapshotRoot, file.getName());
+        long currentSnapshotFileLen = -1L;
+        if (fs.exists(snapshotFile)) {
+          currentSnapshotFileLen = fs.getFileStatus(snapshotFile).getLen();
+        }
+        long originalSnapshotFileLen = snapshotFileLengthMap.get(snapshotFile);
+        assertEquals(currentSnapshotFileLen, originalSnapshotFileLen);
+        // Read the snapshot file out of the boundary
+        if (fs.exists(snapshotFile)) {
+          FSDataInputStream input = fs.open(snapshotFile);
+          int readLen = input.read(currentSnapshotFileLen, buffer, 0, 1);
+          assertEquals(readLen, -1);
+        }
+      }
+    }
+  }
+
+  /**
+   * New file creation
+   */
+  static class FileCreation extends Modification {
+    final int fileLen;
+    private final HashMap<Path, FileStatus> fileStatusMap;
+
+    FileCreation(Path file, FileSystem fs, int len) {
+      super(file, fs, "creation");
+      assert len >= 0;
+      this.fileLen = len;
+      fileStatusMap = new HashMap<Path, FileStatus>();
+    }
+
+    @Override
+    void loadSnapshots() throws Exception {
+      for (Path snapshotRoot : snapshotList) {
+        Path snapshotFile = new Path(snapshotRoot, file.getName());
+        boolean exist = fs.exists(snapshotFile);
+        if (exist) {
+          fileStatusMap.put(snapshotFile, fs.getFileStatus(snapshotFile));
+        } else {
+          fileStatusMap.put(snapshotFile, null);
+        }
+      }
+    }
+
+    @Override
+    void modify() throws Exception {
+      DFSTestUtil.createFile(fs, file, fileLen, fileLen, BLOCKSIZE,
+          REPLICATION, seed);
+    }
+
+    @Override
+    void checkSnapshots() throws Exception {
+      for (Path snapshotRoot : snapshotList) {
+        Path snapshotFile = new Path(snapshotRoot, file.getName());
+        boolean currentSnapshotFileExist = fs.exists(snapshotFile);
+        boolean originalSnapshotFileExist = !(fileStatusMap.get(snapshotFile) == null);
+        assertEquals(currentSnapshotFileExist, originalSnapshotFileExist);
+        if (currentSnapshotFileExist) {
+          FileStatus currentSnapshotStatus = fs.getFileStatus(snapshotFile);
+          FileStatus originalStatus = fileStatusMap.get(snapshotFile);
+          assertEquals(currentSnapshotStatus, originalStatus);
+        }
+      }
+    }
+  }
+
+  /**
+   * File deletion
+   */
+  static class FileDeletion extends Modification {
+    private final HashMap<Path, Boolean> snapshotFileExistenceMap;
+
+    FileDeletion(Path file, FileSystem fs) {
+      super(file, fs, "deletion");
+      snapshotFileExistenceMap = new HashMap<Path, Boolean>();
+    }
+
+    @Override
+    void loadSnapshots() throws Exception {
+      for (Path snapshotRoot : snapshotList) {
+        Path snapshotFile = new Path(snapshotRoot, file.getName());
+        boolean existence = fs.exists(snapshotFile);
+        snapshotFileExistenceMap.put(snapshotFile, existence);
+      }
+    }
+
+    @Override
+    void modify() throws Exception {
+      fs.delete(file, true);
+    }
+
+    @Override
+    void checkSnapshots() throws Exception {
+      for (Path snapshotRoot : snapshotList) {
+        Path snapshotFile = new Path(snapshotRoot, file.getName());
+        boolean currentSnapshotFileExist = fs.exists(snapshotFile);
+        boolean originalSnapshotFileExist = snapshotFileExistenceMap
+            .get(snapshotFile);
+        assertEquals(currentSnapshotFileExist, originalSnapshotFileExist);
+      }
+    }
+  }
+}



Mime
View raw message