hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ka...@apache.org
Subject [40/50] [abbrv] hadoop git commit: HDFS-7059. Avoid resolving path multiple times. Contributed by Jing Zhao.
Date Mon, 15 Dec 2014 18:36:31 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 1501fce..cfc7a24 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -89,18 +89,11 @@ public class INodesInPath {
     return buf.toString();
   }
 
-  static INodesInPath resolve(final INodeDirectory startingDir,
-      final byte[][] components) throws UnresolvedLinkException {
-    return resolve(startingDir, components, components.length, false);
-  }
-
   /**
-   * Retrieve existing INodes from a path. If existing is big enough to store
-   * all path components (existing and non-existing), then existing INodes
-   * will be stored starting from the root INode into existing[0]; if
-   * existing is not big enough to store all path components, then only the
-   * last existing and non existing INodes will be stored so that
-   * existing[existing.length-1] refers to the INode of the final component.
+   * Retrieve existing INodes from a path. For non-snapshot path,
+   * the number of INodes is equal to the number of path components. For
+   * snapshot path (e.g., /foo/.snapshot/s1/bar), the number of INodes is
+   * (number_of_path_components - 1).
    * 
    * An UnresolvedPathException is always thrown when an intermediate path 
    * component refers to a symbolic link. If the final path component refers 
@@ -110,56 +103,38 @@ public class INodesInPath {
    * <p>
    * Example: <br>
    * Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the
-   * following path components: ["","c1","c2","c3"],
-   * 
-   * <p>
-   * <code>getExistingPathINodes(["","c1","c2"], [?])</code> should fill the
-   * array with [c2] <br>
-   * <code>getExistingPathINodes(["","c1","c2","c3"], [?])</code> should fill
the
-   * array with [null]
-   * 
-   * <p>
-   * <code>getExistingPathINodes(["","c1","c2"], [?,?])</code> should fill the
-   * array with [c1,c2] <br>
-   * <code>getExistingPathINodes(["","c1","c2","c3"], [?,?])</code> should fill
-   * the array with [c2,null]
+   * following path components: ["","c1","c2","c3"]
    * 
    * <p>
-   * <code>getExistingPathINodes(["","c1","c2"], [?,?,?,?])</code> should fill
-   * the array with [rootINode,c1,c2,null], <br>
-   * <code>getExistingPathINodes(["","c1","c2","c3"], [?,?,?,?])</code> should
+   * <code>getExistingPathINodes(["","c1","c2"])</code> should fill
+   * the array with [rootINode,c1,c2], <br>
+   * <code>getExistingPathINodes(["","c1","c2","c3"])</code> should
    * fill the array with [rootINode,c1,c2,null]
    * 
    * @param startingDir the starting directory
    * @param components array of path component name
-   * @param numOfINodes number of INodes to return
    * @param resolveLink indicates whether UnresolvedLinkException should
    *        be thrown when the path refers to a symbolic link.
    * @return the specified number of existing INodes in the path
    */
   static INodesInPath resolve(final INodeDirectory startingDir,
-      final byte[][] components, final int numOfINodes,
-      final boolean resolveLink) throws UnresolvedLinkException {
+      final byte[][] components, final boolean resolveLink)
+      throws UnresolvedLinkException {
     Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0);
 
     INode curNode = startingDir;
     int count = 0;
-    int index = numOfINodes <= components.length ?
-        numOfINodes - components.length : 0;
     int inodeNum = 0;
-    int capacity = numOfINodes;
-    INode[] inodes = new INode[numOfINodes];
+    INode[] inodes = new INode[components.length];
     boolean isSnapshot = false;
     int snapshotId = CURRENT_STATE_ID;
 
     while (count < components.length && curNode != null) {
-      final boolean lastComp = (count == components.length - 1);      
-      if (index >= 0) {
-        inodes[inodeNum++] = curNode;
-      }
+      final boolean lastComp = (count == components.length - 1);
+      inodes[inodeNum++] = curNode;
       final boolean isRef = curNode.isReference();
       final boolean isDir = curNode.isDirectory();
-      final INodeDirectory dir = isDir? curNode.asDirectory(): null;  
+      final INodeDirectory dir = isDir? curNode.asDirectory(): null;
       if (!isRef && isDir && dir.isWithSnapshot()) {
         //if the path is a non-snapshot path, update the latest snapshot.
         if (!isSnapshot && shouldUpdateLatestId(
@@ -217,11 +192,7 @@ public class INodesInPath {
       if (isDotSnapshotDir(childName) && dir.isSnapshottable()) {
         // skip the ".snapshot" in components
         count++;
-        index++;
         isSnapshot = true;
-        if (index >= 0) { // decrease the capacity by 1 to account for .snapshot
-          capacity--;
-        }
         // check if ".snapshot" is the last element of components
         if (count == components.length - 1) {
           break;
@@ -240,14 +211,12 @@ public class INodesInPath {
             isSnapshot ? snapshotId : CURRENT_STATE_ID);
       }
       count++;
-      index++;
     }
-    if (isSnapshot && capacity < numOfINodes &&
-        !isDotSnapshotDir(components[components.length - 1])) {
+    if (isSnapshot && !isDotSnapshotDir(components[components.length - 1])) {
       // for snapshot path shrink the inode array. however, for path ending with
       // .snapshot, still keep last the null inode in the array
-      INode[] newNodes = new INode[capacity];
-      System.arraycopy(inodes, 0, newNodes, 0, capacity);
+      INode[] newNodes = new INode[components.length - 1];
+      System.arraycopy(inodes, 0, newNodes, 0, newNodes.length);
       inodes = newNodes;
     }
     return new INodesInPath(inodes, components, isSnapshot, snapshotId);
@@ -277,6 +246,24 @@ public class INodesInPath {
     return new INodesInPath(inodes, iip.path, iip.isSnapshot, iip.snapshotId);
   }
 
+  /**
+   * Extend a given INodesInPath with a child INode. The child INode will be
+   * appended to the end of the new INodesInPath.
+   */
+  public static INodesInPath append(INodesInPath iip, INode child,
+      byte[] childName) {
+    Preconditions.checkArgument(!iip.isSnapshot && iip.length() > 0);
+    Preconditions.checkArgument(iip.getLastINode() != null && iip
+        .getLastINode().isDirectory());
+    INode[] inodes = new INode[iip.length() + 1];
+    System.arraycopy(iip.inodes, 0, inodes, 0, inodes.length - 1);
+    inodes[inodes.length - 1] = child;
+    byte[][] path = new byte[iip.path.length + 1][];
+    System.arraycopy(iip.path, 0, path, 0, path.length - 1);
+    path[path.length - 1] = childName;
+    return new INodesInPath(inodes, path, false, iip.snapshotId);
+  }
+
   private final byte[][] path;
   /**
    * Array with the specified number of INodes resolved for a given path.
@@ -348,6 +335,10 @@ public class INodesInPath {
     return path[path.length - 1];
   }
 
+  public byte[][] getPathComponents() {
+    return path;
+  }
+
   /** @return the full path in string form */
   public String getPath() {
     return DFSUtil.byteArray2PathString(path);
@@ -370,6 +361,56 @@ public class INodesInPath {
   }
 
   /**
+   * @param length number of ancestral INodes in the returned INodesInPath
+   *               instance
+   * @return the INodesInPath instance containing ancestral INodes
+   */
+  private INodesInPath getAncestorINodesInPath(int length) {
+    Preconditions.checkArgument(length >= 0 && length < inodes.length);
+    final INode[] anodes = new INode[length];
+    final byte[][] apath;
+    final boolean isSnapshot;
+    final int snapshotId;
+    int dotSnapshotIndex = getDotSnapshotIndex();
+    if (this.isSnapshot && length >= dotSnapshotIndex + 1) {
+      apath = new byte[length + 1][];
+      isSnapshot = true;
+      snapshotId = this.snapshotId;
+    } else {
+      apath = new byte[length][];
+      isSnapshot = false;
+      snapshotId = this.isSnapshot ? CURRENT_STATE_ID : this.snapshotId;
+    }
+    System.arraycopy(this.inodes, 0, anodes, 0, length);
+    System.arraycopy(this.path, 0, apath, 0, apath.length);
+    return new INodesInPath(anodes, apath, isSnapshot, snapshotId);
+  }
+
+  /**
+   * @return an INodesInPath instance containing all the INodes in the parent
+   *         path. We do a deep copy here.
+   */
+  public INodesInPath getParentINodesInPath() {
+    return inodes.length > 1 ? getAncestorINodesInPath(inodes.length - 1) :
+        null;
+  }
+
+  private int getDotSnapshotIndex() {
+    if (isSnapshot) {
+      for (int i = 0; i < path.length; i++) {
+        if (isDotSnapshotDir(path[i])) {
+          return i;
+        }
+      }
+      throw new IllegalStateException("The path " + getPath()
+          + " is a snapshot path but does not contain "
+          + HdfsConstants.DOT_SNAPSHOT_DIR);
+    } else {
+      return -1;
+    }
+  }
+
+  /**
    * @return isSnapshot true for a snapshot path
    */
   boolean isSnapshot() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index e13a5c6..f076215 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -116,7 +116,7 @@ public class LeaseManager {
         final INodeFile cons;
         try {
           cons = this.fsnamesystem.getFSDirectory().getINode(path).asFile();
-            Preconditions.checkState(cons.isUnderConstruction());
+          Preconditions.checkState(cons.isUnderConstruction());
         } catch (UnresolvedLinkException e) {
           throw new AssertionError("Lease files should reside on this FS");
         }
@@ -481,8 +481,10 @@ public class LeaseManager {
       leaseToCheck.getPaths().toArray(leasePaths);
       for(String p : leasePaths) {
         try {
+          INodesInPath iip = fsnamesystem.getFSDirectory().getINodesInPath(p,
+              true);
           boolean completed = fsnamesystem.internalReleaseLease(leaseToCheck, p,
-              HdfsServerConstants.NAMENODE_LEASE_HOLDER);
+              iip, HdfsServerConstants.NAMENODE_LEASE_HOLDER);
           if (LOG.isDebugEnabled()) {
             if (completed) {
               LOG.debug("Lease recovery for " + p + " is complete. File closed.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
index eda0a28..b0275e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
@@ -827,8 +827,8 @@ public abstract class FSAclBaseTest {
     fs.setPermission(path,
       new FsPermissionExtension(FsPermission.
           createImmutable((short)0755), true, true));
-    INode inode = cluster.getNamesystem().getFSDirectory().getNode(
-      path.toUri().getPath(), false);
+    INode inode = cluster.getNamesystem().getFSDirectory().getINode(
+        path.toUri().getPath(), false);
     assertNotNull(inode);
     FsPermission perm = inode.getFsPermission();
     assertNotNull(perm);
@@ -1433,7 +1433,7 @@ public abstract class FSAclBaseTest {
   private static void assertAclFeature(Path pathToCheck,
       boolean expectAclFeature) throws IOException {
     INode inode = cluster.getNamesystem().getFSDirectory()
-      .getNode(pathToCheck.toUri().getPath(), false);
+      .getINode(pathToCheck.toUri().getPath(), false);
     assertNotNull(inode);
     AclFeature aclFeature = inode.getAclFeature();
     if (expectAclFeature) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index aecf55e..5450cf7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -711,8 +711,8 @@ public class TestFsck {
       DFSTestUtil.waitReplication(fs, filePath, (short)1);
       
       // intentionally corrupt NN data structure
-      INodeFile node = (INodeFile)cluster.getNamesystem().dir.getNode(
-          fileName, true);
+      INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
+          (fileName, true);
       final BlockInfo[] blocks = node.getBlocks(); 
       assertEquals(blocks.length, 1);
       blocks[0].setNumBytes(-1L);  // set the block length to be negative

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
index 9b454ea..2f114a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
@@ -62,9 +62,11 @@ public class TestLeaseManager {
    */
   @Test (timeout=1000)
   public void testCheckLeaseNotInfiniteLoop() {
+    FSDirectory dir = Mockito.mock(FSDirectory.class);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.isRunning()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
+    Mockito.when(fsn.getFSDirectory()).thenReturn(dir);
     LeaseManager lm = new LeaseManager(fsn);
 
     //Make sure the leases we are going to add exceed the hard limit

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
index 354bff1..e416e00 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
@@ -141,7 +140,8 @@ public class TestSnapshotPathINodes {
     // Get the inodes by resolving the path of a normal file
     String[] names = INode.getPathNames(file1.toString());
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     // The number of inodes should be equal to components.length
     assertEquals(nodesInPath.length(), components.length);
     // The returned nodesInPath should be non-snapshot
@@ -157,20 +157,10 @@ public class TestSnapshotPathINodes {
     assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(),
         dir.toString());
     
-    // Call getExistingPathINodes and request only one INode. This is used
-    // when identifying the INode for a given path.
-    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
-    assertEquals(nodesInPath.length(), 1);
-    assertSnapshot(nodesInPath, false, null, -1);
-    assertEquals(nodesInPath.getINode(0).getFullPathName(), file1.toString());
-    
-    // Call getExistingPathINodes and request 2 INodes. This is usually used
-    // when identifying the parent INode of a given path.
-    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
-    assertEquals(nodesInPath.length(), 2);
+    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
+    assertEquals(nodesInPath.length(), components.length);
     assertSnapshot(nodesInPath, false, null, -1);
-    assertEquals(nodesInPath.getINode(1).getFullPathName(), file1.toString());
-    assertEquals(nodesInPath.getINode(0).getFullPathName(), sub1.toString());
+    assertEquals(nodesInPath.getLastINode().getFullPathName(), file1.toString());
   }
   
   /** 
@@ -187,7 +177,8 @@ public class TestSnapshotPathINodes {
     String snapshotPath = sub1.toString() + "/.snapshot/s1/file1";
     String[] names = INode.getPathNames(snapshotPath);
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     // Length of inodes should be (components.length - 1), since we will ignore
     // ".snapshot" 
     assertEquals(nodesInPath.length(), components.length - 1);
@@ -200,27 +191,17 @@ public class TestSnapshotPathINodes {
     assertTrue(snapshotFileNode.getParent().isWithSnapshot());
     
     // Call getExistingPathINodes and request only one INode.
-    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
-    assertEquals(nodesInPath.length(), 1);
-    // The snapshotroot (s1) is not included in inodes. Thus the
-    // snapshotRootIndex should be -1.
-    assertSnapshot(nodesInPath, true, snapshot, -1);
+    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
+    assertEquals(nodesInPath.length(), components.length - 1);
+    assertSnapshot(nodesInPath, true, snapshot, 3);
     // Check the INode for file1 (snapshot file)
     assertINodeFile(nodesInPath.getLastINode(), file1);
-    
-    // Call getExistingPathINodes and request 2 INodes.
-    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
-    assertEquals(nodesInPath.length(), 2);
-    // There should be two INodes in inodes: s1 and snapshot of file1. Thus the
-    // SnapshotRootIndex should be 0.
-    assertSnapshot(nodesInPath, true, snapshot, 0);
-    assertINodeFile(nodesInPath.getLastINode(), file1);
-    
+
     // Resolve the path "/TestSnapshot/sub1/.snapshot"  
     String dotSnapshotPath = sub1.toString() + "/.snapshot";
     names = INode.getPathNames(dotSnapshotPath);
     components = INode.getPathComponents(names);
-    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
     // The number of INodes returned should still be components.length
     // since we put a null in the inode array for ".snapshot"
     assertEquals(nodesInPath.length(), components.length);
@@ -267,7 +248,8 @@ public class TestSnapshotPathINodes {
       String snapshotPath = sub1.toString() + "/.snapshot/s2/file1";
       String[] names = INode.getPathNames(snapshotPath);
       byte[][] components = INode.getPathComponents(names);
-      INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+      INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+          components, false);
       // Length of inodes should be (components.length - 1), since we will ignore
       // ".snapshot" 
       assertEquals(nodesInPath.length(), components.length - 1);
@@ -284,7 +266,8 @@ public class TestSnapshotPathINodes {
     // Check the INodes for path /TestSnapshot/sub1/file1
     String[] names = INode.getPathNames(file1.toString());
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     // The length of inodes should be equal to components.length
     assertEquals(nodesInPath.length(), components.length);
     // The number of non-null elements should be components.length - 1 since
@@ -333,7 +316,8 @@ public class TestSnapshotPathINodes {
       String snapshotPath = sub1.toString() + "/.snapshot/s4/file3";
       String[] names = INode.getPathNames(snapshotPath);
       byte[][] components = INode.getPathComponents(names);
-      INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+      INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+          components, false);
       // Length of inodes should be (components.length - 1), since we will ignore
       // ".snapshot" 
       assertEquals(nodesInPath.length(), components.length - 1);
@@ -352,7 +336,8 @@ public class TestSnapshotPathINodes {
     // Check the inodes for /TestSnapshot/sub1/file3
     String[] names = INode.getPathNames(file3.toString());
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     // The number of inodes should be equal to components.length
     assertEquals(nodesInPath.length(), components.length);
 
@@ -378,7 +363,8 @@ public class TestSnapshotPathINodes {
     // First check the INode for /TestSnapshot/sub1/file1
     String[] names = INode.getPathNames(file1.toString());
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     // The number of inodes should be equal to components.length
     assertEquals(nodesInPath.length(), components.length);
 
@@ -401,7 +387,8 @@ public class TestSnapshotPathINodes {
     String snapshotPath = sub1.toString() + "/.snapshot/s3/file1";
     names = INode.getPathNames(snapshotPath);
     components = INode.getPathComponents(names);
-    INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     // Length of ssInodes should be (components.length - 1), since we will
     // ignore ".snapshot" 
     assertEquals(ssNodesInPath.length(), components.length - 1);
@@ -419,7 +406,8 @@ public class TestSnapshotPathINodes {
     // Check the INode for /TestSnapshot/sub1/file1 again
     names = INode.getPathNames(file1.toString());
     components = INode.getPathComponents(names);
-    INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     assertSnapshot(newNodesInPath, false, s3, -1);
     // The number of inodes should be equal to components.length
     assertEquals(newNodesInPath.length(), components.length);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
index 62041e8..ba318de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.security.AccessControlException;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -169,7 +168,7 @@ public class TestOpenFilesWithSnapshot {
   }
 
   private void doTestMultipleSnapshots(boolean saveNamespace)
-      throws IOException, AccessControlException {
+      throws IOException {
     Path path = new Path("/test");
     doWriteAndAbort(fs, path);
     fs.createSnapshot(path, "s2");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
index e1ca263..5264cb7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
@@ -40,7 +40,7 @@ import org.junit.Test;
 /**
  * This class tests the replication handling/calculation of snapshots. In
  * particular, {@link INodeFile#getFileReplication()} and
- * {@link INodeFileWithSnapshot#getBlockReplication()} are tested to make sure
+ * {@link INodeFile#getBlockReplication()} are tested to make sure
  * the number of replication is calculated correctly with/without snapshots.
  */
 public class TestSnapshotReplication {
@@ -82,7 +82,7 @@ public class TestSnapshotReplication {
    * Check the replication of a given file. We test both
    * {@link INodeFile#getFileReplication()} and
    * {@link INodeFile#getBlockReplication()}.
-   * 
+   *
    * @param file The given file
    * @param replication The expected replication number
    * @param blockReplication The expected replication number for the block
@@ -132,8 +132,7 @@ public class TestSnapshotReplication {
    *          as their expected replication number stored in their corresponding
    *          INodes
    * @param expectedBlockRep
-   *          The expected replication number that should be returned by
-   *          {@link INodeFileWithSnapshot#getBlockReplication()} of all the INodes
+   *          The expected replication number
    * @throws Exception
    */
   private void checkSnapshotFileReplication(Path currentFile,
@@ -143,8 +142,8 @@ public class TestSnapshotReplication {
     assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
     // Then check replication for every snapshot
     for (Path ss : snapshotRepMap.keySet()) {
-      final INodesInPath iip = fsdir.getLastINodeInPath(ss.toString());
-      final INodeFile ssInode = (INodeFile)iip.getLastINode();
+      final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), true);
+      final INodeFile ssInode = iip.getLastINode().asFile();
       // The replication number derived from the
       // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
       assertEquals(expectedBlockRep, ssInode.getBlockReplication());


Mime
View raw message