hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1455396 [2/2] - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ src/main/java/org/apache/hadoop/hd...
Date Tue, 12 Mar 2013 02:41:25 GMT
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java?rev=1455396&r1=1455395&r2=1455396&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
Tue Mar 12 02:41:25 2013
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 
 import java.io.FileNotFoundException;
+import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -37,6 +38,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
@@ -141,6 +143,17 @@ public class TestSnapshotDeletion {
     hdfs.delete(dir, true);
   }
   
+  private void checkQuotaUsageComputation(final Path dirPath,
+      final long expectedNs, final long expectedDs) throws IOException {
+    INode node = fsdir.getINode(dirPath.toString());
+    assertTrue(node.isDirectory() && node.isQuotaSet());
+    INodeDirectoryWithQuota dirNode = (INodeDirectoryWithQuota) node;
+    assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
+        dirNode.getNamespace());
+    assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
+        dirNode.getDiskspace());
+  }
+  
   /**
    * Test deleting a directory which is a descendant of a snapshottable
    * directory. In the test we need to cover the following cases:
@@ -177,14 +190,20 @@ public class TestSnapshotDeletion {
     
     // create snapshot s0
     SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
+    // check dir's quota usage
+    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 3);
+    
     // make a change: create a new file under subsub
     Path newFileAfterS0 = new Path(subsub, "newFile");
     DFSTestUtil.createFile(hdfs, newFileAfterS0, BLOCKSIZE, REPLICATION, seed);
     // further change: change the replicator factor of metaChangeFile
     hdfs.setReplication(metaChangeFile1, REPLICATION_1);
     hdfs.setReplication(metaChangeFile2, REPLICATION_1);
+    
     // create snapshot s1
     SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
+    // check dir's quota usage
+    checkQuotaUsageComputation(dir, 14L, BLOCKSIZE * REPLICATION * 4);
     
     // get two snapshots for later use
     Snapshot snapshot0 = ((INodeDirectorySnapshottable) fsdir.getINode(dir
@@ -198,6 +217,9 @@ public class TestSnapshotDeletion {
     // sure the deletion goes through an INodeDirectory, we delete the parent
     // of noChangeDir
     hdfs.delete(noChangeDirParent, true);
+    // while deletion, we add a diff for metaChangeFile2 as its snapshot copy
+    // for s1, we also add diffs for both sub and noChangeDirParent
+    checkQuotaUsageComputation(dir, 17L, BLOCKSIZE * REPLICATION * 4);
     
     // check the snapshot copy of noChangeDir 
     Path snapshotNoChangeDir = SnapshotTestHelper.getSnapshotPath(dir, "s1",
@@ -208,7 +230,7 @@ public class TestSnapshotDeletion {
     // should still be an INodeDirectory
     assertEquals(INodeDirectory.class, snapshotNode.getClass());
     ReadOnlyList<INode> children = snapshotNode.getChildrenList(null);
-    // check 2 children: noChangeFile and metaChangeFile1
+    // check 2 children: noChangeFile and metaChangeFile2
     assertEquals(2, children.size());
     INode noChangeFileSCopy = children.get(1);
     assertEquals(noChangeFile.getName(), noChangeFileSCopy.getLocalName());
@@ -229,7 +251,10 @@ public class TestSnapshotDeletion {
     // before deleting sub, we first create a new file under sub
     Path newFile = new Path(sub, "newFile");
     DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
+    checkQuotaUsageComputation(dir, 18L, BLOCKSIZE * REPLICATION * 5);
     hdfs.delete(sub, true);
+    // while deletion, we add diff for dir and metaChangeFile1
+    checkQuotaUsageComputation(dir, 20L, BLOCKSIZE * REPLICATION * 5);
     
     // make sure the whole subtree of sub is stored correctly in snapshot
     Path snapshotSub = SnapshotTestHelper.getSnapshotPath(dir, "s1",
@@ -307,11 +332,15 @@ public class TestSnapshotDeletion {
     }
     
     // create snapshot s1 for sub
-    hdfs.createSnapshot(sub, snapshotName);
+    SnapshotTestHelper.createSnapshot(hdfs, sub, snapshotName);
+    // check quota usage computation
+    checkQuotaUsageComputation(sub, 4, BLOCKSIZE * REPLICATION * 2);
     // delete s1
     hdfs.deleteSnapshot(sub, snapshotName);
+    checkQuotaUsageComputation(sub, 3, BLOCKSIZE * REPLICATION * 2);
     // now we can create a snapshot with the same name
     hdfs.createSnapshot(sub, snapshotName);
+    checkQuotaUsageComputation(sub, 4, BLOCKSIZE * REPLICATION * 2);
     
     // create a new file under sub
     Path newFile = new Path(sub, "newFile");
@@ -319,12 +348,14 @@ public class TestSnapshotDeletion {
     // create another snapshot s2
     String snapshotName2 = "s2";
     hdfs.createSnapshot(sub, snapshotName2);
+    checkQuotaUsageComputation(sub, 6, BLOCKSIZE * REPLICATION * 3);
     // Get the filestatus of sub under snapshot s2
     Path ss = SnapshotTestHelper
         .getSnapshotPath(sub, snapshotName2, "newFile");
     FileStatus statusBeforeDeletion = hdfs.getFileStatus(ss);
     // delete s1
     hdfs.deleteSnapshot(sub, snapshotName);
+    checkQuotaUsageComputation(sub, 5, BLOCKSIZE * REPLICATION * 3);
     FileStatus statusAfterDeletion = hdfs.getFileStatus(ss);
     System.out.println("Before deletion: " + statusBeforeDeletion.toString()
         + "\n" + "After deletion: " + statusAfterDeletion.toString());
@@ -356,19 +387,28 @@ public class TestSnapshotDeletion {
     
     // create snapshot s0 on dir
     SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
+    checkQuotaUsageComputation(dir, 8, 3 * BLOCKSIZE * REPLICATION);
     
     // delete /TestSnapshot/sub/noChangeDir/metaChangeDir/toDeleteFile
     hdfs.delete(toDeleteFile, true);
+    // the deletion adds diff of toDeleteFile and metaChangeDir
+    checkQuotaUsageComputation(dir, 10, 3 * BLOCKSIZE * REPLICATION);
     // change metadata of /TestSnapshot/sub/noChangeDir/metaChangeDir and
     // /TestSnapshot/sub/noChangeDir/metaChangeFile
     hdfs.setReplication(metaChangeFile, REPLICATION_1);
     hdfs.setOwner(metaChangeDir, "unknown", "unknown");
+    checkQuotaUsageComputation(dir, 11, 3 * BLOCKSIZE * REPLICATION);
     
     // create snapshot s1 on dir
     hdfs.createSnapshot(dir, "s1");
+    checkQuotaUsageComputation(dir, 12, 3 * BLOCKSIZE * REPLICATION);
     
     // delete snapshot s0
     hdfs.deleteSnapshot(dir, "s0");
+    // namespace: remove toDeleteFile and its diff, metaChangeFile's diff, 
+    // metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and 
+    // metaChangeFile's replication factor decreases
+    checkQuotaUsageComputation(dir, 7, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
     
     // check 1. there is no snapshot s0
     final INodeDirectorySnapshottable dirNode = 
@@ -423,7 +463,7 @@ public class TestSnapshotDeletion {
    */
   @Test (timeout=300000)
   public void testCombineSnapshotDiff1() throws Exception {
-    testCombineSnapshotDiffImpl(sub, "");
+    testCombineSnapshotDiffImpl(sub, "", 1);
   }
   
   /**
@@ -432,7 +472,7 @@ public class TestSnapshotDeletion {
    */
   @Test (timeout=300000)
   public void testCombineSnapshotDiff2() throws Exception {
-    testCombineSnapshotDiffImpl(sub, "subsub1/subsubsub1/");
+    testCombineSnapshotDiffImpl(sub, "subsub1/subsubsub1/", 3);
   }
   
   /**
@@ -442,54 +482,90 @@ public class TestSnapshotDeletion {
    *        where the modifications happen. It is represented as a relative 
    *        path to the snapshotRoot.
    */
-  private void testCombineSnapshotDiffImpl(Path snapshotRoot, String modDirStr)
-      throws Exception {
+  private void testCombineSnapshotDiffImpl(Path snapshotRoot, String modDirStr,
+      int dirNodeNum) throws Exception {
     Path modDir = modDirStr.isEmpty() ? snapshotRoot : new Path(snapshotRoot,
         modDirStr);
+    final int delta = modDirStr.isEmpty() ? 0 : 1;
     Path file10 = new Path(modDir, "file10");
     Path file11 = new Path(modDir, "file11");
     Path file12 = new Path(modDir, "file12");
     Path file13 = new Path(modDir, "file13");
     Path file14 = new Path(modDir, "file14");
     Path file15 = new Path(modDir, "file15");
-    final short REP_1 = REPLICATION - 1;
-    DFSTestUtil.createFile(hdfs, file10, BLOCKSIZE, REP_1, seed);
-    DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REP_1, seed);
-    DFSTestUtil.createFile(hdfs, file12, BLOCKSIZE, REP_1, seed);
-    DFSTestUtil.createFile(hdfs, file13, BLOCKSIZE, REP_1, seed);
+    DFSTestUtil.createFile(hdfs, file10, BLOCKSIZE, REPLICATION_1, seed);
+    DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION_1, seed);
+    DFSTestUtil.createFile(hdfs, file12, BLOCKSIZE, REPLICATION_1, seed);
+    DFSTestUtil.createFile(hdfs, file13, BLOCKSIZE, REPLICATION_1, seed);
 
     // create snapshot s1 for snapshotRoot
     SnapshotTestHelper.createSnapshot(hdfs, snapshotRoot, "s1");
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 5, 8 * BLOCKSIZE);
     
     // delete file11
     hdfs.delete(file11, true);
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 6 + delta,
+        8 * BLOCKSIZE);
+    
     // modify file12
     hdfs.setReplication(file12, REPLICATION);
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7 + delta,
+        9 * BLOCKSIZE);
+    
     // modify file13
     hdfs.setReplication(file13, REPLICATION);
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 8 + delta,
+        10 * BLOCKSIZE);
+    
     // create file14
     DFSTestUtil.createFile(hdfs, file14, BLOCKSIZE, REPLICATION, seed);
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 9 + delta,
+        13 * BLOCKSIZE);
+    
     // create file15
     DFSTestUtil.createFile(hdfs, file15, BLOCKSIZE, REPLICATION, seed);
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 10 + delta,
+        16 * BLOCKSIZE);
     
     // create snapshot s2 for snapshotRoot
     hdfs.createSnapshot(snapshotRoot, "s2");
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 11 + delta,
+        16 * BLOCKSIZE);
     
     // create file11 again: (0, d) + (c, 0)
     DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION, seed);
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 12 + delta * 2,
+        19 * BLOCKSIZE);
+    
     // delete file12
     hdfs.delete(file12, true);
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 13 + delta * 2,
+        19 * BLOCKSIZE);
+    
     // modify file13
     hdfs.setReplication(file13, (short) (REPLICATION - 2));
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 14 + delta * 2,
+        19 * BLOCKSIZE);
+    
     // delete file14: (c, 0) + (0, d)
     hdfs.delete(file14, true);
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 15 + delta * 2,
+        19 * BLOCKSIZE);
+    
     // modify file15
-    hdfs.setReplication(file15, REP_1);
+    hdfs.setReplication(file15, REPLICATION_1);
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 16 + delta * 2,
+        19 * BLOCKSIZE);
     
     // create snapshot s3 for snapshotRoot
     hdfs.createSnapshot(snapshotRoot, "s3");
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 17 + delta * 2,
+        19 * BLOCKSIZE);
+    
     // modify file10, to check if the posterior diff was set correctly
-    hdfs.setReplication(file10, REP_1);
+    hdfs.setReplication(file10, REPLICATION);
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 18 + delta * 2,
+        20 * BLOCKSIZE);
     
     Path file10_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
         modDirStr + "file10");
@@ -506,6 +582,9 @@ public class TestSnapshotDeletion {
     
     // delete s2, in which process we need to combine the diff in s2 to s1
     hdfs.deleteSnapshot(snapshotRoot, "s2");
+    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 12 + delta,
+        14 * BLOCKSIZE);
+    
     // check the correctness of s1
     FileStatus statusAfterDeletion10 = hdfs.getFileStatus(file10_s1);
     FileStatus statusAfterDeletion11 = hdfs.getFileStatus(file11_s1);
@@ -529,10 +608,10 @@ public class TestSnapshotDeletion {
     assertFalse(hdfs.exists(file15_s1));
     
     INodeFile nodeFile13 = (INodeFile) fsdir.getINode(file13.toString());
-    assertEquals(REP_1, nodeFile13.getBlockReplication());
+    assertEquals(REPLICATION_1, nodeFile13.getBlockReplication());
 
     INodeFile nodeFile12 = (INodeFile) fsdir.getINode(file12_s1.toString());
-    assertEquals(REP_1, nodeFile12.getBlockReplication());
+    assertEquals(REPLICATION_1, nodeFile12.getBlockReplication());
   }
   
   /** Test deleting snapshots with modification on the metadata of directory */ 
@@ -544,17 +623,24 @@ public class TestSnapshotDeletion {
     
     // create snapshot s1 for sub1, and change the metadata of sub1
     SnapshotTestHelper.createSnapshot(hdfs, sub, "s1");
+    checkQuotaUsageComputation(sub, 3, BLOCKSIZE * 3);
     hdfs.setOwner(sub, "user2", "group2");
+    checkQuotaUsageComputation(sub, 3, BLOCKSIZE * 3);
     
     // create snapshot s2 for sub1, but do not modify sub1 afterwards
     hdfs.createSnapshot(sub, "s2");
+    checkQuotaUsageComputation(sub, 4, BLOCKSIZE * 3);
     
     // create snapshot s3 for sub1, and change the metadata of sub1
     hdfs.createSnapshot(sub, "s3");
+    checkQuotaUsageComputation(sub, 5, BLOCKSIZE * 3);
     hdfs.setOwner(sub, "user3", "group3");
+    checkQuotaUsageComputation(sub, 5, BLOCKSIZE * 3);
     
     // delete snapshot s3
     hdfs.deleteSnapshot(sub, "s3");
+    checkQuotaUsageComputation(sub, 4, BLOCKSIZE * 3);
+    
     // check sub1's metadata in snapshot s2
     FileStatus statusOfS2 = hdfs.getFileStatus(new Path(sub,
         HdfsConstants.DOT_SNAPSHOT_DIR + "/s2"));
@@ -563,6 +649,8 @@ public class TestSnapshotDeletion {
     
     // delete snapshot s2
     hdfs.deleteSnapshot(sub, "s2");
+    checkQuotaUsageComputation(sub, 3, BLOCKSIZE * 3);
+    
     // check sub1's metadata in snapshot s1
     FileStatus statusOfS1 = hdfs.getFileStatus(new Path(sub,
         HdfsConstants.DOT_SNAPSHOT_DIR + "/s1"));
@@ -584,22 +672,33 @@ public class TestSnapshotDeletion {
     
     // create snapshot s0 on sub
     SnapshotTestHelper.createSnapshot(hdfs, sub, "s0");
+    checkQuotaUsageComputation(sub, 5, BLOCKSIZE * 6);
     // make some changes on both sub and subsub
     final Path subFile1 = new Path(sub, "file1");
     final Path subsubFile1 = new Path(subsub, "file1");
     DFSTestUtil.createFile(hdfs, subFile1, BLOCKSIZE, REPLICATION_1, seed);
     DFSTestUtil.createFile(hdfs, subsubFile1, BLOCKSIZE, REPLICATION, seed);
+    checkQuotaUsageComputation(sub, 8, BLOCKSIZE * 11);
     
     // create snapshot s1 on sub
     SnapshotTestHelper.createSnapshot(hdfs, sub, "s1");
+    checkQuotaUsageComputation(sub, 9, BLOCKSIZE * 11);
     
     // create snapshot s2 on dir
     SnapshotTestHelper.createSnapshot(hdfs, dir, "s2");
+    checkQuotaUsageComputation(dir, 11, BLOCKSIZE * 11);
+    checkQuotaUsageComputation(sub, 9, BLOCKSIZE * 11);
+    
     // make changes on subsub and subsubFile1
     hdfs.setOwner(subsub, "unknown", "unknown");
     hdfs.setReplication(subsubFile1, REPLICATION_1);
+    checkQuotaUsageComputation(dir, 13, BLOCKSIZE * 11);
+    checkQuotaUsageComputation(sub, 11, BLOCKSIZE * 11);
+    
     // make changes on sub
     hdfs.delete(subFile1, true);
+    checkQuotaUsageComputation(dir, 15, BLOCKSIZE * 11);
+    checkQuotaUsageComputation(sub, 13, BLOCKSIZE * 11);
     
     Path subsubSnapshotCopy = SnapshotTestHelper.getSnapshotPath(dir, "s2",
         sub.getName() + Path.SEPARATOR + subsub.getName());
@@ -618,6 +717,8 @@ public class TestSnapshotDeletion {
     
     // delete snapshot s2
     hdfs.deleteSnapshot(dir, "s2");
+    checkQuotaUsageComputation(dir, 13, BLOCKSIZE * 11);
+    checkQuotaUsageComputation(sub, 12, BLOCKSIZE * 11);
     
     // no snapshot copy for s2
     try {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java?rev=1455396&r1=1455395&r2=1455396&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java
Tue Mar 12 02:41:25 2013
@@ -86,7 +86,8 @@ public class TestDiff {
     // make modifications to current and record the diff
     final List<INode> current = new ArrayList<INode>(previous);
     
-    final List<Diff<byte[], INode>> diffs = new ArrayList<Diff<byte[],
INode>>();
+    final List<Diff<byte[], INode>> diffs = 
+        new ArrayList<Diff<byte[], INode>>();
     for(int j = 0; j < 5; j++) {
       diffs.add(new Diff<byte[], INode>());
     }
@@ -246,7 +247,8 @@ public class TestDiff {
     return new INodeDirectory(n, name, PERM, 0L);
   }
 
-  static void create(INode inode, final List<INode> current, Diff<byte[], INode>
diff) {
+  static void create(INode inode, final List<INode> current,
+      Diff<byte[], INode> diff) {
     final int i = Diff.search(current, inode.getKey());
     Assert.assertTrue(i < 0);
     current.add(-i - 1, inode);
@@ -272,7 +274,8 @@ public class TestDiff {
     }
   }
 
-  static void delete(INode inode, final List<INode> current, Diff<byte[], INode>
diff) {
+  static void delete(INode inode, final List<INode> current,
+      Diff<byte[], INode> diff) {
     final int i = Diff.search(current, inode.getKey());
     current.remove(i);
     if (diff != null) {
@@ -297,7 +300,8 @@ public class TestDiff {
     }
   }
 
-  static void modify(INode inode, final List<INode> current, Diff<byte[], INode>
diff) {
+  static void modify(INode inode, final List<INode> current,
+      Diff<byte[], INode> diff) {
     final int i = Diff.search(current, inode.getKey());
     Assert.assertTrue(i >= 0);
     final INodeDirectory oldinode = (INodeDirectory)current.get(i);



Mime
View raw message