hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sra...@apache.org
Subject svn commit: r1100026 [2/3] - in /hadoop/common/trunk: ./ src/java/ src/java/org/apache/hadoop/fs/ src/java/org/apache/hadoop/fs/viewfs/ src/test/core/org/apache/hadoop/fs/ src/test/core/org/apache/hadoop/fs/viewfs/
Date Fri, 06 May 2011 02:11:32 GMT
Added: hadoop/common/trunk/src/java/org/apache/hadoop/fs/viewfs/ViewFs.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/java/org/apache/hadoop/fs/viewfs/ViewFs.java?rev=1100026&view=auto
==============================================================================
--- hadoop/common/trunk/src/java/org/apache/hadoop/fs/viewfs/ViewFs.java (added)
+++ hadoop/common/trunk/src/java/org/apache/hadoop/fs/viewfs/ViewFs.java Fri May  6 02:11:31 2011
@@ -0,0 +1,809 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_RRR;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.local.LocalConfigKeys;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.viewfs.InodeTree.INode;
+import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Progressable;
+
+
+/**
+ * ViewFs (extends the AbstractFileSystem interface) implements a client-side
+ * mount table. The viewFs file system is implemented completely in memory on
+ * the client side. The client-side mount table allows a client to provide a 
+ * customized view of a file system namespace that is composed from 
+ * one or more individual file systems (a localFs or Hdfs, S3fs, etc).
+ * For example one could have a mount table that provides links such as
+ * <ul>
+ * <li>  /user          -> hdfs://nnContainingUserDir/user
+ * <li>  /project/foo   -> hdfs://nnProject1/projects/foo
+ * <li>  /project/bar   -> hdfs://nnProject2/projects/bar
+ * <li>  /tmp           -> hdfs://nnTmp/privateTmpForUserXXX
+ * </ul> 
+ * 
+ * ViewFs is specified with the following URI: <b>viewfs:///</b> 
+ * <p>
+ * To use viewfs one would typically set the default file system in the
+ * config  (i.e. fs.default.name< = viewfs:///) along with the
+ * mount table config variables as described below. 
+ * 
+ * <p>
+ * <b> ** Config variables to specify the mount table entries ** </b>
+ * <p>
+ * 
+ * The file system is initialized from the standard Hadoop config through
+ * config variables.
+ * See {@link FsConstants} for URI and Scheme constants; 
+ * See {@link Constants} for config var constants; 
+ * see {@link ConfigUtil} for convenient lib.
+ * 
+ * <p>
+ * All the mount table config entries for view fs are prefixed by 
+ * <b>fs.viewfs.mounttable.</b>
+ * For example the above example can be specified with the following
+ *  config variables:
+ *  <ul>
+ *  <li> fs.viewfs.mounttable.default.link./user=
+ *  hdfs://nnContainingUserDir/user
+ *  <li> fs.viewfs.mounttable.default.link./project/foo=
+ *  hdfs://nnProject1/projects/foo
+ *  <li> fs.viewfs.mounttable.default.link./project/bar=
+ *  hdfs://nnProject2/projects/bar
+ *  <li> fs.viewfs.mounttable.default.link./tmp=
+ *  hdfs://nnTmp/privateTmpForUserXXX
+ *  </ul>
+ *  
+ * The default mount table (when no authority is specified) is 
+ * from config variables prefixed by <b>fs.viewFs.mounttable.default </b>
+ * The authority component of a URI can be used to specify a different mount
+ * table. For example,
+ * <ul>
+ * <li>  viewfs://sanjayMountable/
+ * </ul>
+ * is initialized from fs.viewFs.mounttable.sanjayMountable.* config variables.
+ * 
+ *  <p> 
+ *  <b> **** Merge Mounts **** </b>(NOTE: merge mounts are not implemented yet.)
+ *  <p>
+ *  
+ *   One can also use "MergeMounts" to merge several directories (this is
+ *   sometimes  called union-mounts or junction-mounts in the literature.
+ *   For example of the home directories are stored on say two file systems
+ *   (because they do not fit on one) then one could specify a mount
+ *   entry such as following merges two dirs:
+ *   <ul>
+ *   <li> /user -> hdfs://nnUser1/user,hdfs://nnUser2/user
+ *   </ul>
+ *  Such a mergeLink can be specified with the following config var where ","
+ *  is used as the separator for each of links to be merged:
+ *  <ul>
+ *  <li> fs.viewfs.mounttable.default.linkMerge./user=
+ *  hdfs://nnUser1/user,hdfs://nnUser1/user
+ *  </ul>
+ *   A special case of the merge mount is where mount table's root is merged
+ *   with the root (slash) of another file system:
+ *   <ul>
+ *   <li>    fs.viewfs.mounttable.default.linkMergeSlash=hdfs://nn99/
+ *   </ul>
+ *   In this cases the root of the mount table is merged with the root of
+ *            <b>hdfs://nn99/ </b> 
+ */
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
+public class ViewFs extends AbstractFileSystem {
+  final long creationTime; // of the the mount table
+  final UserGroupInformation ugi; // the user/group of user who created mtable
+  final Configuration config;
+  InodeTree<AbstractFileSystem> fsState;  // the fs state; ie the mount table
+  
+  static AccessControlException readOnlyMountTable(final String operation,
+      final String p) {
+    return new AccessControlException( 
+        "InternalDir of ViewFileSystem is readonly; operation=" + operation + 
+        "Path=" + p);
+  }
+  static AccessControlException readOnlyMountTable(final String operation,
+      final Path p) {
+    return readOnlyMountTable(operation, p.toString());
+  }
+  
+  
+  static public class MountPoint {
+    private Path src;       // the src of the mount
+    private URI[] targets; //  target of the mount; Multiple targets imply mergeMount
+    MountPoint(Path srcPath, URI[] targetURIs) {
+      src = srcPath;
+      targets = targetURIs;
+    }
+    Path getSrc() {
+      return src;
+    }
+    URI[] getTargets() {
+      return targets;
+    }
+  }
+  
+  public ViewFs(final Configuration conf) throws IOException,
+      URISyntaxException {
+    this(FsConstants.VIEWFS_URI, conf);
+  }
+  
+  /**
+   * This constructor has the signature needed by
+   * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}.
+   * 
+   * @param theUri which must be that of ViewFs
+   * @param conf
+   * @throws IOException
+   * @throws URISyntaxException 
+   */
+  ViewFs(final URI theUri, final Configuration conf) throws IOException,
+      URISyntaxException {
+    super(theUri, FsConstants.VIEWFS_SCHEME, false, -1);
+    creationTime = System.currentTimeMillis();
+    ugi = UserGroupInformation.getCurrentUser();
+    config = conf;
+    // Now build  client side view (i.e. client side mount table) from config.
+    String authority = theUri.getAuthority();
+    fsState = new InodeTree<AbstractFileSystem>(conf, authority) {
+
+      @Override
+      protected
+      AbstractFileSystem getTargetFileSystem(final URI uri)
+        throws URISyntaxException, UnsupportedFileSystemException {
+          return new ChRootedFs(
+              AbstractFileSystem.createFileSystem(uri, config),
+              new Path(uri.getPath()));
+      }
+
+      @Override
+      protected
+      AbstractFileSystem getTargetFileSystem(
+          final INodeDir<AbstractFileSystem> dir) throws URISyntaxException {
+        return new InternalDirOfViewFs(dir, creationTime, ugi, getUri());
+      }
+
+      @Override
+      protected
+      AbstractFileSystem getTargetFileSystem(URI[] mergeFsURIList)
+          throws URISyntaxException, UnsupportedFileSystemException {
+        throw new UnsupportedFileSystemException("mergefs not implemented yet");
+        // return MergeFs.createMergeFs(mergeFsURIList, config);
+      }
+    };
+  }
+
+  @Override
+  public FsServerDefaults getServerDefaults() throws IOException {
+    return LocalConfigKeys.getServerDefaults(); 
+  }
+
+  @Override
+  public int getUriDefaultPort() {
+    return -1;
+  }
+ 
+  @Override
+  public Path resolvePath(final Path f) throws FileNotFoundException,
+          AccessControlException, UnresolvedLinkException, IOException {
+    final InodeTree.ResolveResult<AbstractFileSystem> res;
+      res = fsState.resolve(getUriPath(f), true);
+    if (res.isInternalDir()) {
+      return f;
+    }
+    return res.targetFileSystem.resolvePath(res.remainingPath);
+
+  }
+  
+  @Override
+  public FSDataOutputStream createInternal(final Path f,
+      final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
+      final int bufferSize, final short replication, final long blockSize,
+      final Progressable progress, final int bytesPerChecksum,
+      final boolean createParent) throws AccessControlException,
+      FileAlreadyExistsException, FileNotFoundException,
+      ParentNotDirectoryException, UnsupportedFileSystemException,
+      UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res;
+    try {
+      res = fsState.resolve(getUriPath(f), false);
+    } catch (FileNotFoundException e) {
+      if (createParent) {
+        throw readOnlyMountTable("create", f);
+      } else {
+        throw e;
+      }
+    }
+    assert(res.remainingPath != null);
+    return res.targetFileSystem.createInternal(res.remainingPath, flag,
+        absolutePermission, bufferSize, replication,
+        blockSize, progress, bytesPerChecksum,
+        createParent);
+  }
+
+  @Override
+  public boolean delete(final Path f, final boolean recursive)
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res = 
+      fsState.resolve(getUriPath(f), true);
+    // If internal dir or target is a mount link (ie remainingPath is Slash)
+    if (res.isInternalDir() || res.remainingPath == InodeTree.SlashPath) {
+      throw new AccessControlException(
+          "Cannot delete internal mount table directory: " + f);
+    }
+    return res.targetFileSystem.delete(res.remainingPath, recursive);
+  }
+
+  @Override
+  public BlockLocation[] getFileBlockLocations(final Path f, final long start,
+      final long len) throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res = 
+      fsState.resolve(getUriPath(f), true);
+    return
+      res.targetFileSystem.getFileBlockLocations(res.remainingPath, start, len);
+  }
+
+  @Override
+  public FileChecksum getFileChecksum(final Path f)
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res = 
+      fsState.resolve(getUriPath(f), true);
+    return res.targetFileSystem.getFileChecksum(f);
+  }
+
+  @Override
+  public FileStatus getFileStatus(final Path f) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res = 
+      fsState.resolve(getUriPath(f), true);
+
+    //  FileStatus#getPath is a fully qualified path relative to the root of 
+    // target file system.
+    // We need to change it to viewfs URI - relative to root of mount table.
+    
+    // The implementors of RawLocalFileSystem were trying to be very smart.
+    // They implement FileStatus#getOwener lazily -- the object
+    // returned is really a RawLocalFileSystem that expect the
+    // FileStatus#getPath to be unchanged so that it can get owner when needed.
+    // Hence we need to interpose a new ViewFsFileStatus that works around.
+    
+    
+    FileStatus status =  res.targetFileSystem.getFileStatus(res.remainingPath);
+    return new ViewFsFileStatus(status, this.makeQualified(f));
+  }
+
+  @Override
+  public FileStatus getFileLinkStatus(final Path f)
+     throws AccessControlException, FileNotFoundException,
+     UnsupportedFileSystemException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res = 
+      fsState.resolve(getUriPath(f), false); // do not follow mount link
+    return res.targetFileSystem.getFileLinkStatus(res.remainingPath);
+  }
+  
+  @Override
+  public FsStatus getFsStatus() throws AccessControlException,
+      FileNotFoundException, IOException {
+    return new FsStatus(0, 0, 0);
+  }
+
+  @Override
+  public RemoteIterator<FileStatus> listStatusIterator(final Path f)
+    throws AccessControlException, FileNotFoundException,
+    UnresolvedLinkException, IOException {
+    final InodeTree.ResolveResult<AbstractFileSystem> res =
+      fsState.resolve(getUriPath(f), true);
+    final RemoteIterator<FileStatus> fsIter =
+      res.targetFileSystem.listStatusIterator(res.remainingPath);
+    if (res.isInternalDir()) {
+      return fsIter;
+    }
+    
+    return new RemoteIterator<FileStatus>() {
+      final RemoteIterator<FileStatus> myIter;
+      final ChRootedFs targetFs;
+      { // Init
+          myIter = fsIter;
+          targetFs = (ChRootedFs) res.targetFileSystem;
+      }
+      
+      @Override
+      public boolean hasNext() throws IOException {
+        return myIter.hasNext();
+      }
+      
+      @Override
+      public FileStatus next() throws IOException {
+        FileStatus status =  myIter.next();
+        String suffix = targetFs.stripOutRoot(status.getPath());
+        return new ViewFsFileStatus(status, makeQualified(
+            suffix.length() == 0 ? f : new Path(res.resolvedPath, suffix)));
+      }
+    };
+  }
+  
+  @Override
+  public FileStatus[] listStatus(final Path f) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+      fsState.resolve(getUriPath(f), true);
+    
+    FileStatus[] statusLst = res.targetFileSystem.listStatus(res.remainingPath);
+    if (!res.isInternalDir()) {
+      // We need to change the name in the FileStatus as described in
+      // {@link #getFileStatus }
+      ChRootedFs targetFs;
+      targetFs = (ChRootedFs) res.targetFileSystem;
+      int i = 0;
+      for (FileStatus status : statusLst) {
+          String suffix = targetFs.stripOutRoot(status.getPath());
+          statusLst[i++] = new ViewFsFileStatus(status, this.makeQualified(
+              suffix.length() == 0 ? f : new Path(res.resolvedPath, suffix)));
+      }
+    }
+    return statusLst;
+  }
+
+  @Override
+  public void mkdir(final Path dir, final FsPermission permission,
+      final boolean createParent) throws AccessControlException,
+      FileAlreadyExistsException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res = 
+      fsState.resolve(getUriPath(dir), false);
+    res.targetFileSystem.mkdir(res.remainingPath, permission, createParent);
+  }
+
+  @Override
+  public FSDataInputStream open(final Path f, final int bufferSize)
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res = 
+        fsState.resolve(getUriPath(f), true);
+    return res.targetFileSystem.open(res.remainingPath, bufferSize);
+  }
+
+  
+  @Override
+  public void renameInternal(final Path src, final Path dst,
+      final boolean overwrite) throws IOException, UnresolvedLinkException {
+    // passing resolveLastComponet as false to catch renaming a mount point 
+    // itself we need to catch this as an internal operation and fail.
+    InodeTree.ResolveResult<AbstractFileSystem> resSrc = 
+      fsState.resolve(getUriPath(src), false); 
+  
+    if (resSrc.isInternalDir()) {
+      throw new AccessControlException(
+          "Cannot Rename within internal dirs of mount table: it is readOnly");
+    }
+      
+    InodeTree.ResolveResult<AbstractFileSystem> resDst = 
+                                fsState.resolve(getUriPath(dst), false);
+    if (resDst.isInternalDir()) {
+      throw new AccessControlException(
+          "Cannot Rename within internal dirs of mount table: it is readOnly");
+    }
+    
+    /**
+    // Alternate 1: renames within same file system - valid but we disallow
+    // Alternate 2: (as described in next para - valid but we have disallowed it
+    //
+    // Note we compare the URIs. the URIs include the link targets. 
+    // hence we allow renames across mount links as long as the mount links
+    // point to the same target.
+    if (!resSrc.targetFileSystem.getUri().equals(
+              resDst.targetFileSystem.getUri())) {
+      throw new IOException("Renames across Mount points not supported");
+    }
+    */
+    
+    //
+    // Alternate 3 : renames ONLY within the the same mount links.
+    //
+
+    if (resSrc.targetFileSystem !=resDst.targetFileSystem) {
+      throw new IOException("Renames across Mount points not supported");
+    }
+    
+    resSrc.targetFileSystem.renameInternal(resSrc.remainingPath,
+      resDst.remainingPath, overwrite);
+  }
+
+  @Override
+  public void renameInternal(final Path src, final Path dst)
+      throws AccessControlException, FileAlreadyExistsException,
+      FileNotFoundException, ParentNotDirectoryException,
+      UnresolvedLinkException, IOException {
+    renameInternal(src, dst, false);
+  }
+  
+  @Override
+  public boolean supportsSymlinks() {
+    return true;
+  }
+  
+  @Override
+  public void createSymlink(final Path target, final Path link,
+      final boolean createParent) throws IOException, UnresolvedLinkException {
+    InodeTree.ResolveResult<AbstractFileSystem> res;
+    try {
+      res = fsState.resolve(getUriPath(link), false);
+    } catch (FileNotFoundException e) {
+      if (createParent) {
+        throw readOnlyMountTable("createSymlink", link);
+      } else {
+        throw e;
+      }
+    }
+    assert(res.remainingPath != null);
+    res.targetFileSystem.createSymlink(target, res.remainingPath,
+        createParent);  
+  }
+
+  @Override
+  public Path getLinkTarget(final Path f) throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res = 
+      fsState.resolve(getUriPath(f), false); // do not follow mount link
+    return res.targetFileSystem.getLinkTarget(res.remainingPath);
+  }
+
+  @Override
+  public void setOwner(final Path f, final String username,
+      final String groupname) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res = 
+      fsState.resolve(getUriPath(f), true);
+    res.targetFileSystem.setOwner(res.remainingPath, username, groupname); 
+  }
+
+  @Override
+  public void setPermission(final Path f, final FsPermission permission)
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res = 
+      fsState.resolve(getUriPath(f), true);
+    res.targetFileSystem.setPermission(res.remainingPath, permission); 
+    
+  }
+
+  @Override
+  public boolean setReplication(final Path f, final short replication)
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res = 
+      fsState.resolve(getUriPath(f), true);
+    return res.targetFileSystem.setReplication(res.remainingPath, replication);
+  }
+
+  @Override
+  public void setTimes(final Path f, final long mtime, final long atime)
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res = 
+      fsState.resolve(getUriPath(f), true);
+    res.targetFileSystem.setTimes(res.remainingPath, mtime, atime); 
+  }
+
+  @Override
+  public void setVerifyChecksum(final boolean verifyChecksum)
+      throws AccessControlException, IOException {
+    // This is a file system level operations, however ViewFs 
+    // points to many file systems. Noop for ViewFs. 
+  }
+  
+  public MountPoint[] getMountPoints() {
+    List<InodeTree.MountPoint<AbstractFileSystem>> mountPoints = 
+                  fsState.getMountPoints();
+    
+    MountPoint[] result = new MountPoint[mountPoints.size()];
+    for ( int i = 0; i < mountPoints.size(); ++i ) {
+      result[i] = new MountPoint(new Path(mountPoints.get(i).src), 
+                              mountPoints.get(i).target.targetDirLinkList);
+    }
+    return result;
+  }
+  
+  @Override
+  public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
+    List<InodeTree.MountPoint<AbstractFileSystem>> mountPoints = 
+                fsState.getMountPoints();
+    int initialListSize  = 0;
+    for (InodeTree.MountPoint<AbstractFileSystem> im : mountPoints) {
+      initialListSize += im.target.targetDirLinkList.length; 
+    }
+    List<Token<?>> result = new ArrayList<Token<?>>(initialListSize);
+    for ( int i = 0; i < mountPoints.size(); ++i ) {
+      List<Token<?>> tokens = 
+        mountPoints.get(i).target.targetFileSystem.getDelegationTokens(renewer);
+      if (tokens != null) {
+        result.addAll(tokens);
+      }
+    }
+    return result;
+  }
+
+  
+  
+  /*
+   * An instance of this class represents an internal dir of the viewFs 
+   * ie internal dir of the mount table.
+   * It is a ready only mount tbale and create, mkdir or delete operations
+   * are not allowed.
+   * If called on create or mkdir then this target is the parent of the
+   * directory in which one is trying to create or mkdir; hence
+   * in this case the path name passed in is the last component. 
+   * Otherwise this target is the end point of the path and hence
+   * the path name passed in is null. 
+   */
+  static class InternalDirOfViewFs extends AbstractFileSystem {
+    
+    final InodeTree.INodeDir<AbstractFileSystem>  theInternalDir;
+    final long creationTime; // of the the mount table
+    final UserGroupInformation ugi; // the user/group of user who created mtable
+    final URI myUri; // the URI of the outer ViewFs
+    
+    public InternalDirOfViewFs(final InodeTree.INodeDir<AbstractFileSystem> dir,
+        final long cTime, final UserGroupInformation ugi, final URI uri)
+      throws URISyntaxException {
+      super(FsConstants.VIEWFS_URI, FsConstants.VIEWFS_SCHEME, false, -1);
+      theInternalDir = dir;
+      creationTime = cTime;
+      this.ugi = ugi;
+      myUri = uri;
+    }
+
+    static private void checkPathIsSlash(final Path f) throws IOException {
+      if (f != InodeTree.SlashPath) {
+        throw new IOException (
+        "Internal implementation error: expected file name to be /" );
+      }
+    }
+
+    @Override
+    public FSDataOutputStream createInternal(final Path f,
+        final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
+        final int bufferSize, final short replication, final long blockSize,
+        final Progressable progress, final int bytesPerChecksum,
+        final boolean createParent) throws AccessControlException,
+        FileAlreadyExistsException, FileNotFoundException,
+        ParentNotDirectoryException, UnsupportedFileSystemException,
+        UnresolvedLinkException, IOException {
+      throw readOnlyMountTable("create", f);
+    }
+
+    @Override
+    public boolean delete(final Path f, final boolean recursive)
+        throws AccessControlException, IOException {
+      checkPathIsSlash(f);
+      throw readOnlyMountTable("delete", f);
+    }
+
+    @Override
+    public BlockLocation[] getFileBlockLocations(final Path f, final long start,
+        final long len) throws FileNotFoundException, IOException {
+      checkPathIsSlash(f);
+      throw new FileNotFoundException("Path points to dir not a file");
+    }
+
+    @Override
+    public FileChecksum getFileChecksum(final Path f)
+        throws FileNotFoundException, IOException {
+      checkPathIsSlash(f);
+      throw new FileNotFoundException("Path points to dir not a file");
+    }
+
+    @Override
+    public FileStatus getFileStatus(final Path f) throws IOException {
+      checkPathIsSlash(f);
+      return new FileStatus(0, true, 0, 0, creationTime, creationTime, 
+          PERMISSION_RRR, ugi.getUserName(), ugi.getGroupNames()[0],
+          new Path(theInternalDir.fullPath).makeQualified(
+              myUri, null));
+    }
+    
+    @Override
+    public FileStatus getFileLinkStatus(final Path f)
+        throws FileNotFoundException {
+      // look up i internalDirs children - ignore first Slash
+      INode<AbstractFileSystem> inode =
+        theInternalDir.children.get(f.toUri().toString().substring(1)); 
+      if (inode == null) {
+        throw new FileNotFoundException(
+            "viewFs internal mount table - missing entry:" + f);
+      }
+      FileStatus result;
+      if (inode instanceof INodeLink) {
+        INodeLink<AbstractFileSystem> inodelink = 
+          (INodeLink<AbstractFileSystem>) inode;
+        result = new FileStatus(0, false, 0, 0, creationTime, creationTime,
+            PERMISSION_RRR, ugi.getUserName(), ugi.getGroupNames()[0],
+            inodelink.getTargetLink(),
+            new Path(inode.fullPath).makeQualified(
+                myUri, null));
+      } else {
+        result = new FileStatus(0, true, 0, 0, creationTime, creationTime,
+          PERMISSION_RRR, ugi.getUserName(), ugi.getGroupNames()[0],
+          new Path(inode.fullPath).makeQualified(
+              myUri, null));
+      }
+      return result;
+    }
+    
+    @Override
+    public FsStatus getFsStatus() {
+      return new FsStatus(0, 0, 0);
+    }
+
+    @Override
+    public FsServerDefaults getServerDefaults() throws IOException {
+      throw new IOException("FsServerDefaults not implemented yet");
+    }
+
+    @Override
+    public int getUriDefaultPort() {
+      return -1;
+    }
+
+    @Override
+    public FileStatus[] listStatus(final Path f) throws AccessControlException,
+        IOException {
+      checkPathIsSlash(f);
+      FileStatus[] result = new FileStatus[theInternalDir.children.size()];
+      int i = 0;
+      for (Entry<String, INode<AbstractFileSystem>> iEntry : 
+                                          theInternalDir.children.entrySet()) {
+        INode<AbstractFileSystem> inode = iEntry.getValue();
+
+        
+        if (inode instanceof INodeLink ) {
+          INodeLink<AbstractFileSystem> link = 
+            (INodeLink<AbstractFileSystem>) inode;
+
+          result[i++] = new FileStatus(0, false, 0, 0,
+            creationTime, creationTime,
+            PERMISSION_RRR, ugi.getUserName(), ugi.getGroupNames()[0],
+            link.getTargetLink(),
+            new Path(inode.fullPath).makeQualified(
+                myUri, null));
+        } else {
+          result[i++] = new FileStatus(0, true, 0, 0,
+            creationTime, creationTime,
+            PERMISSION_RRR, ugi.getUserName(), ugi.getGroupNames()[0],
+            new Path(inode.fullPath).makeQualified(
+                myUri, null));
+        }
+      }
+      return result;
+    }
+
+    @Override
+    public void mkdir(final Path dir, final FsPermission permission,
+        final boolean createParent) throws AccessControlException,
+        FileAlreadyExistsException {
+      if (theInternalDir.isRoot & dir == null) {
+        throw new FileAlreadyExistsException("/ already exits");
+      }
+      throw readOnlyMountTable("mkdir", dir);
+    }
+
+    @Override
+    public FSDataInputStream open(final Path f, final int bufferSize)
+        throws FileNotFoundException, IOException {
+      checkPathIsSlash(f);
+      throw new FileNotFoundException("Path points to dir not a file");
+    }
+
+    @Override
+    public void renameInternal(final Path src, final Path dst)
+        throws AccessControlException, IOException {
+      checkPathIsSlash(src);
+      checkPathIsSlash(dst);
+      throw readOnlyMountTable("rename", src);     
+    }
+
+    @Override
+    public boolean supportsSymlinks() {
+      return true;
+    }
+    
+    @Override
+    public void createSymlink(final Path target, final Path link,
+        final boolean createParent) throws AccessControlException {
+      throw readOnlyMountTable("createSymlink", link);    
+    }
+
+    @Override
+    public Path getLinkTarget(final Path f) throws FileNotFoundException,
+        IOException {
+      return getFileLinkStatus(f).getSymlink();
+    }
+
+    @Override
+    public void setOwner(final Path f, final String username,
+        final String groupname) throws AccessControlException, IOException {
+      checkPathIsSlash(f);
+      throw readOnlyMountTable("setOwner", f);
+    }
+
+    @Override
+    public void setPermission(final Path f, final FsPermission permission)
+        throws AccessControlException, IOException {
+      checkPathIsSlash(f);
+      throw readOnlyMountTable("setPermission", f);    
+    }
+
+    @Override
+    public boolean setReplication(final Path f, final short replication)
+        throws AccessControlException, IOException {
+      checkPathIsSlash(f);
+      throw readOnlyMountTable("setReplication", f);
+    }
+
+    @Override
+    public void setTimes(final Path f, final long mtime, final long atime)
+        throws AccessControlException, IOException {
+      checkPathIsSlash(f);
+      throw readOnlyMountTable("setTimes", f);    
+    }
+
+    @Override
+    public void setVerifyChecksum(final boolean verifyChecksum)
+        throws AccessControlException {
+      throw readOnlyMountTable("setVerifyChecksum", "");   
+    }
+  }
+}

Added: hadoop/common/trunk/src/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java?rev=1100026&view=auto
==============================================================================
--- hadoop/common/trunk/src/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java (added)
+++ hadoop/common/trunk/src/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java Fri May  6 02:11:31 2011
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+
+/**
+ * This class is needed to address the  problem described in
+ * {@link ViewFileSystem#getFileStatus(org.apache.hadoop.fs.Path)} and
+ * {@link ViewFs#getFileStatus(org.apache.hadoop.fs.Path)}
+ */
+class ViewFsFileStatus extends FileStatus {
+   final FileStatus myFs;
+   Path modifiedPath;
+   ViewFsFileStatus(FileStatus fs, Path newPath) {
+     myFs = fs;
+     modifiedPath = newPath;
+   }
+   
+   @Override
+   public boolean equals(Object o) {
+     return super.equals(o);
+   }
+   
+   public int hashCode() {
+     return super.hashCode();
+   }
+   
+   @Override
+   public long getLen() {
+     return myFs.getLen();
+   }
+
+   @Override
+   public boolean isFile() {
+     return myFs.isFile();
+   }
+
+   @Override
+   public boolean isDirectory() {
+     return  myFs.isDirectory();
+   }
+   
+   @Override
+   @SuppressWarnings("deprecation")
+   public boolean isDir() {
+     return myFs.isDirectory();
+   }
+   
+   @Override
+   public boolean isSymlink() {
+     return myFs.isSymlink();
+   }
+
+   @Override
+   public long getBlockSize() {
+     return myFs.getBlockSize();
+   }
+
+   @Override
+   public short getReplication() {
+     return myFs.getReplication();
+   }
+
+   @Override
+   public long getModificationTime() {
+     return myFs.getModificationTime();
+   }
+
+   @Override
+   public long getAccessTime() {
+     return myFs.getAccessTime();
+   }
+
+   @Override
+   public FsPermission getPermission() {
+     return myFs.getPermission();
+   }
+   
+   @Override
+   public String getOwner() {
+     return myFs.getOwner();
+   }
+   
+   @Override
+   public String getGroup() {
+     return myFs.getGroup();
+   }
+   
+   @Override
+   public Path getPath() {
+     return modifiedPath;
+   }
+   
+   @Override
+   public void setPath(final Path p) {
+     modifiedPath = p;
+   }
+
+   @Override
+   public Path getSymlink() throws IOException {
+     return myFs.getSymlink();
+   }
+}
+

Modified: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FSMainOperationsBaseTest.java?rev=1100026&r1=1100025&r2=1100026&view=diff
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FSMainOperationsBaseTest.java (original)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FSMainOperationsBaseTest.java Fri May  6 02:11:31 2011
@@ -90,7 +90,6 @@ public abstract class FSMainOperationsBa
   @After
   public void tearDown() throws Exception {
     fSys.delete(new Path(getAbsoluteTestRootPath(fSys), new Path("test")), true);
-    fSys.delete(new Path(LOCAL_FS_ROOT_URI), true);
   }
   
   

Modified: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileContextPermissionBase.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileContextPermissionBase.java?rev=1100026&r1=1100025&r2=1100026&view=diff
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileContextPermissionBase.java (original)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileContextPermissionBase.java Fri May  6 02:11:31 2011
@@ -93,7 +93,8 @@ public abstract class FileContextPermiss
       return;
     }
     String filename = "foo";
-    Path f = createFile(fc, filename);
+    Path f = getTestRootPath(fc, filename);
+    createFile(fc, filename);
     doFilePermissionCheck(FileContext.DEFAULT_PERM.applyUMask(fc.getUMask()),
                         fc.getFileStatus(f).getPermission());
   }
@@ -107,7 +108,8 @@ public abstract class FileContextPermiss
     }
 
     String filename = "foo";
-    Path f = createFile(fc, filename);
+    Path f = getTestRootPath(fc, filename);
+    createFile(fc, f);
 
     try {
       // create files and manipulate them.
@@ -131,7 +133,8 @@ public abstract class FileContextPermiss
     }
 
     String filename = "bar";
-    Path f = createFile(fc, filename);
+    Path f = getTestRootPath(fc, filename);
+    createFile(fc, f);
     List<String> groups = null;
     try {
       groups = getGroups();

Modified: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileContextTestHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileContextTestHelper.java?rev=1100026&r1=1100025&r2=1100026&view=diff
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileContextTestHelper.java (original)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileContextTestHelper.java Fri May  6 02:11:31 2011
@@ -92,7 +92,7 @@ public final class FileContextTestHelper
   /*
    * Create files with numBlocks blocks each with block size blockSize.
    */
-  public static void createFile(FileContext fc, Path path, int numBlocks,
+  public static long createFile(FileContext fc, Path path, int numBlocks,
       CreateOpts... options) throws IOException {
     BlockSize blockSizeOpt = 
       (BlockSize) CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
@@ -103,33 +103,33 @@ public final class FileContextTestHelper
     byte[] data = getFileData(numBlocks, blockSize);
     out.write(data, 0, data.length);
     out.close();
+    return data.length;
   }
 
-  public static void createFile(FileContext fc, Path path, int numBlocks,
+  public static long  createFile(FileContext fc, Path path, int numBlocks,
       int blockSize) throws IOException {
-    createFile(fc, path, numBlocks, CreateOpts.blockSize(blockSize), 
+    return createFile(fc, path, numBlocks, CreateOpts.blockSize(blockSize), 
         CreateOpts.createParent());
   }
 
-  public static void createFile(FileContext fc, Path path) throws IOException {
-    createFile(fc, path, DEFAULT_NUM_BLOCKS, CreateOpts.createParent());
+  public static long createFile(FileContext fc, Path path) throws IOException {
+    return createFile(fc, path, DEFAULT_NUM_BLOCKS, CreateOpts.createParent());
   }
 
-  public static Path createFile(FileContext fc, String name) throws IOException {
+  public static long createFile(FileContext fc, String name) throws IOException {
     Path path = getTestRootPath(fc, name);
-    createFile(fc, path);
-    return path;
+    return createFile(fc, path);
   }
   
-  public static void createFileNonRecursive(FileContext fc, String name)
+  public static long createFileNonRecursive(FileContext fc, String name)
   throws IOException {
     Path path = getTestRootPath(fc, name);
-    createFileNonRecursive(fc, path);
+    return createFileNonRecursive(fc, path);
   }
 
-  public static void createFileNonRecursive(FileContext fc, Path path)
+  public static long createFileNonRecursive(FileContext fc, Path path)
       throws IOException {
-    createFile(fc, path, DEFAULT_NUM_BLOCKS, CreateOpts.donotCreateParent());
+    return createFile(fc, path, DEFAULT_NUM_BLOCKS, CreateOpts.donotCreateParent());
   }
 
   public static void appendToFile(FileContext fc, Path path, int numBlocks,

Modified: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileSystemTestHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileSystemTestHelper.java?rev=1100026&r1=1100025&r2=1100026&view=diff
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileSystemTestHelper.java (original)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileSystemTestHelper.java Fri May  6 02:11:31 2011
@@ -98,7 +98,7 @@ public final class FileSystemTestHelper 
   /*
    * Create files with numBlocks blocks each with block size blockSize.
    */
-  public static void createFile(FileSystem fSys, Path path, int numBlocks,
+  public static long createFile(FileSystem fSys, Path path, int numBlocks,
       int blockSize, boolean createParent) throws IOException {
     FSDataOutputStream out = 
       fSys.create(path, false, 4096, fSys.getDefaultReplication(), blockSize );
@@ -106,21 +106,21 @@ public final class FileSystemTestHelper 
     byte[] data = getFileData(numBlocks, blockSize);
     out.write(data, 0, data.length);
     out.close();
+    return data.length;
   }
 
-  public static void createFile(FileSystem fSys, Path path, int numBlocks,
+  public static long createFile(FileSystem fSys, Path path, int numBlocks,
       int blockSize) throws IOException {
-    createFile(fSys, path, numBlocks, blockSize, true);
+      return createFile(fSys, path, numBlocks, blockSize, true);
     }
 
-  public static void createFile(FileSystem fSys, Path path) throws IOException {
-    createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE, true);
+  public static long createFile(FileSystem fSys, Path path) throws IOException {
+    return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE, true);
   }
 
-  public static Path createFile(FileSystem fSys, String name) throws IOException {
+  public static long createFile(FileSystem fSys, String name) throws IOException {
     Path path = getTestRootPath(fSys, name);
-    createFile(fSys, path);
-    return path;
+    return createFile(fSys, path);
   }
 
   public static boolean exists(FileSystem fSys, Path p) throws IOException {

Added: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java?rev=1100026&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java (added)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java Fri May  6 02:11:31 2011
@@ -0,0 +1,291 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.viewfs.ChRootedFileSystem;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestChRootedFileSystem {
+  FileSystem fSys; // The ChRoootedFs
+  FileSystem fSysTarget; //
+  Path chrootedTo;
+
+  @Before
+  public void setUp() throws Exception {
+    // create the test root on local_fs
+    Configuration conf = new Configuration();
+    fSysTarget = FileSystem.getLocal(conf);
+    chrootedTo = FileSystemTestHelper.getAbsoluteTestRootPath(fSysTarget);
+    // In case previous test was killed before cleanup
+    fSysTarget.delete(chrootedTo, true);
+    
+    fSysTarget.mkdirs(chrootedTo);
+
+
+    // ChRoot to the root of the testDirectory
+    fSys = new ChRootedFileSystem(fSysTarget, chrootedTo);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    fSysTarget.delete(chrootedTo, true);
+  }
+  
+  @Test
+  public void testURI() {
+    URI uri = fSys.getUri();
+    Assert.assertEquals(chrootedTo.toUri(), uri);
+  }
+  
+  @Test
+  public void testBasicPaths() {
+    URI uri = fSys.getUri();
+    Assert.assertEquals(chrootedTo.toUri(), uri);
+    Assert.assertEquals(fSys.makeQualified(
+        new Path("/user/" + System.getProperty("user.name"))),
+        fSys.getWorkingDirectory());
+    Assert.assertEquals(fSys.makeQualified(
+        new Path("/user/" + System.getProperty("user.name"))),
+        fSys.getHomeDirectory());
+    /*
+     * ChRootedFs as its uri like file:///chrootRoot.
+     * This is questionable since path.makequalified(uri, path) ignores
+     * the pathPart of a uri. So our notion of chrooted URI is questionable.
+     * But if we were to fix Path#makeQualified() then  the next test should
+     *  have been:
+
+    Assert.assertEquals(
+        new Path(chrootedTo + "/foo/bar").makeQualified(
+            FsConstants.LOCAL_FS_URI, null),
+        fSys.makeQualified(new Path( "/foo/bar")));
+    */
+    
+    Assert.assertEquals(
+        new Path("/foo/bar").makeQualified(FsConstants.LOCAL_FS_URI, null),
+        fSys.makeQualified(new Path("/foo/bar")));
+  }
+  
+  /** 
+   * Test modify operations (create, mkdir, delete, etc) 
+   * 
+   * Verify the operation via chrootedfs (ie fSys) and *also* via the
+   *  target file system (ie fSysTarget) that has been chrooted.
+   */
+  @Test
+  public void testCreateDelete() throws IOException {
+    
+
+    // Create file 
+    FileSystemTestHelper.createFile(fSys, "/foo");
+    Assert.assertTrue(fSys.isFile(new Path("/foo")));
+    Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo, "foo")));
+    
+    // Create file with recursive dir
+    FileSystemTestHelper.createFile(fSys, "/newDir/foo");
+    Assert.assertTrue(fSys.isFile(new Path("/newDir/foo")));
+    Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/foo")));
+    
+    // Delete the created file
+    Assert.assertTrue(fSys.delete(new Path("/newDir/foo"), false));
+    Assert.assertFalse(fSys.exists(new Path("/newDir/foo")));
+    Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo, "newDir/foo")));
+    
+    // Create file with a 2 component dirs recursively
+    FileSystemTestHelper.createFile(fSys, "/newDir/newDir2/foo");
+    Assert.assertTrue(fSys.isFile(new Path("/newDir/newDir2/foo")));
+    Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/newDir2/foo")));
+    
+    // Delete the created file
+    Assert.assertTrue(fSys.delete(new Path("/newDir/newDir2/foo"), false));
+    Assert.assertFalse(fSys.exists(new Path("/newDir/newDir2/foo")));
+    Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/newDir2/foo")));
+  }
+  
+  
+  @Test
+  public void testMkdirDelete() throws IOException {
+    fSys.mkdirs(FileSystemTestHelper.getTestRootPath(fSys, "/dirX"));
+    Assert.assertTrue(fSys.isDirectory(new Path("/dirX")));
+    Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"dirX")));
+    
+    fSys.mkdirs(FileSystemTestHelper.getTestRootPath(fSys, "/dirX/dirY"));
+    Assert.assertTrue(fSys.isDirectory(new Path("/dirX/dirY")));
+    Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"dirX/dirY")));
+    
+
+    // Delete the created dir
+    Assert.assertTrue(fSys.delete(new Path("/dirX/dirY"), false));
+    Assert.assertFalse(fSys.exists(new Path("/dirX/dirY")));
+    Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"dirX/dirY")));
+    
+    Assert.assertTrue(fSys.delete(new Path("/dirX"), false));
+    Assert.assertFalse(fSys.exists(new Path("/dirX")));
+    Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"dirX")));
+    
+  }
+  @Test
+  public void testRename() throws IOException {
+    // Rename a file
+    FileSystemTestHelper.createFile(fSys, "/newDir/foo");
+    fSys.rename(new Path("/newDir/foo"), new Path("/newDir/fooBar"));
+    Assert.assertFalse(fSys.exists(new Path("/newDir/foo")));
+    Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/foo")));
+    Assert.assertTrue(fSys.isFile(FileSystemTestHelper.getTestRootPath(fSys,"/newDir/fooBar")));
+    Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/fooBar")));
+    
+    
+    // Rename a dir
+    fSys.mkdirs(new Path("/newDir/dirFoo"));
+    fSys.rename(new Path("/newDir/dirFoo"), new Path("/newDir/dirFooBar"));
+    Assert.assertFalse(fSys.exists(new Path("/newDir/dirFoo")));
+    Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/dirFoo")));
+    Assert.assertTrue(fSys.isDirectory(FileSystemTestHelper.getTestRootPath(fSys,"/newDir/dirFooBar")));
+    Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"newDir/dirFooBar")));
+  }
+  
+  
+  /**
+   * We would have liked renames across file system to fail but 
+   * Unfortunately there is not way to distinguish the two file systems 
+   * @throws IOException
+   */
+  @Test
+  public void testRenameAcrossFs() throws IOException {
+    fSys.mkdirs(new Path("/newDir/dirFoo"));
+    fSys.rename(new Path("/newDir/dirFoo"), new Path("file:///tmp/dirFooBar"));
+    FileSystemTestHelper.isDir(fSys, new Path("/tmp/dirFooBar"));
+  }
+ 
+  
+  
+  
+  @Test
+  public void testList() throws IOException {
+    
+    FileStatus fs = fSys.getFileStatus(new Path("/"));
+    Assert.assertTrue(fs.isDirectory());
+    //  should return the full path not the chrooted path
+    Assert.assertEquals(fs.getPath(), chrootedTo);
+    
+    // list on Slash
+    
+    FileStatus[] dirPaths = fSys.listStatus(new Path("/"));
+
+    Assert.assertEquals(0, dirPaths.length);
+    
+    
+
+    FileSystemTestHelper.createFile(fSys, "/foo");
+    FileSystemTestHelper.createFile(fSys, "/bar");
+    fSys.mkdirs(new Path("/dirX"));
+    fSys.mkdirs(FileSystemTestHelper.getTestRootPath(fSys, "/dirY"));
+    fSys.mkdirs(new Path("/dirX/dirXX"));
+    
+    dirPaths = fSys.listStatus(new Path("/"));
+    Assert.assertEquals(4, dirPaths.length); // note 2 crc files
+    
+    // Note the the file status paths are the full paths on target
+    fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "foo"), dirPaths);
+      Assert.assertNotNull(fs);
+      Assert.assertTrue(fs.isFile());
+    fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "bar"), dirPaths);
+      Assert.assertNotNull(fs);
+      Assert.assertTrue(fs.isFile());
+    fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "dirX"), dirPaths);
+      Assert.assertNotNull(fs);
+      Assert.assertTrue(fs.isDirectory());
+    fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "dirY"), dirPaths);
+      Assert.assertNotNull(fs);
+      Assert.assertTrue(fs.isDirectory());
+  }
+  
+  @Test
+  public void testWorkingDirectory() throws Exception {
+
+    // First we cd to our test root
+    fSys.mkdirs(new Path("/testWd"));
+    Path workDir = new Path("/testWd");
+    fSys.setWorkingDirectory(workDir);
+    Assert.assertEquals(workDir, fSys.getWorkingDirectory());
+
+    fSys.setWorkingDirectory(new Path("."));
+    Assert.assertEquals(workDir, fSys.getWorkingDirectory());
+
+    fSys.setWorkingDirectory(new Path(".."));
+    Assert.assertEquals(workDir.getParent(), fSys.getWorkingDirectory());
+    
+    // cd using a relative path
+
+    // Go back to our test root
+    workDir = new Path("/testWd");
+    fSys.setWorkingDirectory(workDir);
+    Assert.assertEquals(workDir, fSys.getWorkingDirectory());
+    
+    Path relativeDir = new Path("existingDir1");
+    Path absoluteDir = new Path(workDir,"existingDir1");
+    fSys.mkdirs(absoluteDir);
+    fSys.setWorkingDirectory(relativeDir);
+    Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
+    // cd using a absolute path
+    absoluteDir = new Path("/test/existingDir2");
+    fSys.mkdirs(absoluteDir);
+    fSys.setWorkingDirectory(absoluteDir);
+    Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
+    
+    // Now open a file relative to the wd we just set above.
+    Path absoluteFooPath = new Path(absoluteDir, "foo");
+    fSys.create(absoluteFooPath).close();
+    fSys.open(new Path("foo")).close();
+    
+    // Now mkdir relative to the dir we cd'ed to
+    fSys.mkdirs(new Path("newDir"));
+    Assert.assertTrue(fSys.isDirectory(new Path(absoluteDir, "newDir")));
+
+    /* Filesystem impls (RawLocal and DistributedFileSystem do not check
+     * for existing of working dir
+    absoluteDir = getTestRootPath(fSys, "nonexistingPath");
+    try {
+      fSys.setWorkingDirectory(absoluteDir);
+      Assert.fail("cd to non existing dir should have failed");
+    } catch (Exception e) {
+      // Exception as expected
+    }
+    */
+    
+    // Try a URI
+    final String LOCAL_FS_ROOT_URI = "file:///tmp/test";
+    absoluteDir = new Path(LOCAL_FS_ROOT_URI + "/existingDir");
+    fSys.mkdirs(absoluteDir);
+    fSys.setWorkingDirectory(absoluteDir);
+    Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
+
+  }
+ 
+}

Added: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestChRootedFs.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestChRootedFs.java?rev=1100026&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestChRootedFs.java (added)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestChRootedFs.java Fri May  6 02:11:31 2011
@@ -0,0 +1,292 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.EnumSet;
+
+import static org.apache.hadoop.fs.FileContextTestHelper.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestHelper;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.viewfs.ChRootedFs;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestChRootedFs {
+  FileContext fc; // The ChRoootedFs
+  FileContext fcTarget; // 
+  Path chrootedTo;
+
+  @Before
+  public void setUp() throws Exception {
+    // create the test root on local_fs
+    fcTarget = FileContext.getLocalFSFileContext();
+    chrootedTo = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
+    // In case previous test was killed before cleanup
+    fcTarget.delete(chrootedTo, true);
+    
+    fcTarget.mkdir(chrootedTo, FileContext.DEFAULT_PERM, true);
+
+    Configuration conf = new Configuration();
+
+    // ChRoot to the root of the testDirectory
+    fc = FileContext.getFileContext(
+        new ChRootedFs(fcTarget.getDefaultFileSystem(), chrootedTo), conf);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    fcTarget.delete(chrootedTo, true);
+  }
+
+  
+  @Test
+  public void testBasicPaths() {
+    URI uri = fc.getDefaultFileSystem().getUri();
+    Assert.assertEquals(chrootedTo.toUri(), uri);
+    Assert.assertEquals(fc.makeQualified(
+        new Path("/user/" + System.getProperty("user.name"))),
+        fc.getWorkingDirectory());
+    Assert.assertEquals(fc.makeQualified(
+        new Path("/user/" + System.getProperty("user.name"))),
+        fc.getHomeDirectory());
+    /*
+     * ChRootedFs as its uri like file:///chrootRoot.
+     * This is questionable since path.makequalified(uri, path) ignores
+     * the pathPart of a uri. So our notion of chrooted URI is questionable.
+     * But if we were to fix Path#makeQualified() then  the next test should
+     *  have been:
+
+    Assert.assertEquals(
+        new Path(chrootedTo + "/foo/bar").makeQualified(
+            FsConstants.LOCAL_FS_URI, null),
+        fc.makeQualified(new Path( "/foo/bar")));
+    */
+    
+    Assert.assertEquals(
+        new Path("/foo/bar").makeQualified(FsConstants.LOCAL_FS_URI, null),
+        fc.makeQualified(new Path("/foo/bar")));
+  }
+  
+  
+  /** 
+   * Test modify operations (create, mkdir, delete, etc) 
+   * 
+   * Verify the operation via chrootedfs (ie fc) and *also* via the
+   *  target file system (ie fclocal) that has been chrooted.
+   */
+  @Test
+  public void testCreateDelete() throws IOException {
+    
+
+    // Create file 
+    FileContextTestHelper.createFileNonRecursive(fc, "/foo");
+    Assert.assertTrue(isFile(fc, new Path("/foo")));
+    Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo, "foo")));
+    
+    // Create file with recursive dir
+    FileContextTestHelper.createFile(fc, "/newDir/foo");
+    Assert.assertTrue(isFile(fc, new Path("/newDir/foo")));
+    Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/foo")));
+    
+    // Delete the created file
+    Assert.assertTrue(fc.delete(new Path("/newDir/foo"), false));
+    Assert.assertFalse(exists(fc, new Path("/newDir/foo")));
+    Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/foo")));
+    
+    // Create file with a 2 component dirs recursively
+    FileContextTestHelper.createFile(fc, "/newDir/newDir2/foo");
+    Assert.assertTrue(isFile(fc, new Path("/newDir/newDir2/foo")));
+    Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/newDir2/foo")));
+    
+    // Delete the created file
+    Assert.assertTrue(fc.delete(new Path("/newDir/newDir2/foo"), false));
+    Assert.assertFalse(exists(fc, new Path("/newDir/newDir2/foo")));
+    Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/newDir2/foo")));
+  }
+  
+  
+  @Test
+  public void testMkdirDelete() throws IOException {
+    fc.mkdir(FileContextTestHelper.getTestRootPath(fc, "/dirX"), FileContext.DEFAULT_PERM, false);
+    Assert.assertTrue(isDir(fc, new Path("/dirX")));
+    Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"dirX")));
+    
+    fc.mkdir(FileContextTestHelper.getTestRootPath(fc, "/dirX/dirY"), FileContext.DEFAULT_PERM, false);
+    Assert.assertTrue(isDir(fc, new Path("/dirX/dirY")));
+    Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"dirX/dirY")));
+    
+
+    // Delete the created dir
+    Assert.assertTrue(fc.delete(new Path("/dirX/dirY"), false));
+    Assert.assertFalse(exists(fc, new Path("/dirX/dirY")));
+    Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"dirX/dirY")));
+    
+    Assert.assertTrue(fc.delete(new Path("/dirX"), false));
+    Assert.assertFalse(exists(fc, new Path("/dirX")));
+    Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"dirX")));
+    
+  }
+  @Test
+  public void testRename() throws IOException {
+    // Rename a file
+    FileContextTestHelper.createFile(fc, "/newDir/foo");
+    fc.rename(new Path("/newDir/foo"), new Path("/newDir/fooBar"));
+    Assert.assertFalse(exists(fc, new Path("/newDir/foo")));
+    Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/foo")));
+    Assert.assertTrue(isFile(fc, FileContextTestHelper.getTestRootPath(fc,"/newDir/fooBar")));
+    Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/fooBar")));
+    
+    
+    // Rename a dir
+    fc.mkdir(new Path("/newDir/dirFoo"), FileContext.DEFAULT_PERM, false);
+    fc.rename(new Path("/newDir/dirFoo"), new Path("/newDir/dirFooBar"));
+    Assert.assertFalse(exists(fc, new Path("/newDir/dirFoo")));
+    Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/dirFoo")));
+    Assert.assertTrue(isDir(fc, FileContextTestHelper.getTestRootPath(fc,"/newDir/dirFooBar")));
+    Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"newDir/dirFooBar")));
+  }
+  
+  
+  /**
+   * We would have liked renames across file system to fail but 
+   * Unfortunately there is not way to distinguish the two file systems 
+   * @throws IOException
+   */
+  @Test
+  public void testRenameAcrossFs() throws IOException {
+    fc.mkdir(new Path("/newDir/dirFoo"), FileContext.DEFAULT_PERM, true);
+    // the root will get interpreted to the root of the chrooted fs.
+    fc.rename(new Path("/newDir/dirFoo"), new Path("file:///dirFooBar"));
+    FileContextTestHelper.isDir(fc, new Path("/dirFooBar"));
+  }
+  
+  @Test
+  public void testList() throws IOException {
+    
+    FileStatus fs = fc.getFileStatus(new Path("/"));
+    Assert.assertTrue(fs.isDirectory());
+    //  should return the full path not the chrooted path
+    Assert.assertEquals(fs.getPath(), chrootedTo);
+    
+    // list on Slash
+    
+    FileStatus[] dirPaths = fc.util().listStatus(new Path("/"));
+
+    Assert.assertEquals(0, dirPaths.length);
+    
+    
+
+    FileContextTestHelper.createFileNonRecursive(fc, "/foo");
+    FileContextTestHelper.createFileNonRecursive(fc, "/bar");
+    fc.mkdir(new Path("/dirX"), FileContext.DEFAULT_PERM, false);
+    fc.mkdir(FileContextTestHelper.getTestRootPath(fc, "/dirY"),
+        FileContext.DEFAULT_PERM, false);
+    fc.mkdir(new Path("/dirX/dirXX"), FileContext.DEFAULT_PERM, false);
+    
+    dirPaths = fc.util().listStatus(new Path("/"));
+    Assert.assertEquals(4, dirPaths.length);
+    
+    // Note the the file status paths are the full paths on target
+    fs = FileContextTestHelper.containsPath(fcTarget, "foo", dirPaths);
+      Assert.assertNotNull(fs);
+      Assert.assertTrue(fs.isFile());
+    fs = FileContextTestHelper.containsPath(fcTarget, "bar", dirPaths);
+      Assert.assertNotNull(fs);
+      Assert.assertTrue(fs.isFile());
+    fs = FileContextTestHelper.containsPath(fcTarget, "dirX", dirPaths);
+      Assert.assertNotNull(fs);
+      Assert.assertTrue(fs.isDirectory());
+    fs = FileContextTestHelper.containsPath(fcTarget, "dirY", dirPaths);
+      Assert.assertNotNull(fs);
+      Assert.assertTrue(fs.isDirectory());
+  }
+  
+  @Test
+  public void testWorkingDirectory() throws Exception {
+
+    // First we cd to our test root
+    fc.mkdir(new Path("/testWd"), FileContext.DEFAULT_PERM, false);
+    Path workDir = new Path("/testWd");
+    Path fqWd = fc.makeQualified(workDir);
+    fc.setWorkingDirectory(workDir);
+    Assert.assertEquals(fqWd, fc.getWorkingDirectory());
+
+    fc.setWorkingDirectory(new Path("."));
+    Assert.assertEquals(fqWd, fc.getWorkingDirectory());
+
+    fc.setWorkingDirectory(new Path(".."));
+    Assert.assertEquals(fqWd.getParent(), fc.getWorkingDirectory());
+    
+    // cd using a relative path
+
+    // Go back to our test root
+    workDir = new Path("/testWd");
+    fqWd = fc.makeQualified(workDir);
+    fc.setWorkingDirectory(workDir);
+    Assert.assertEquals(fqWd, fc.getWorkingDirectory());
+    
+    Path relativeDir = new Path("existingDir1");
+    Path absoluteDir = new Path(workDir,"existingDir1");
+    fc.mkdir(absoluteDir, FileContext.DEFAULT_PERM, true);
+    Path fqAbsoluteDir = fc.makeQualified(absoluteDir);
+    fc.setWorkingDirectory(relativeDir);
+    Assert.assertEquals(fqAbsoluteDir, fc.getWorkingDirectory());
+    // cd using a absolute path
+    absoluteDir = new Path("/test/existingDir2");
+    fqAbsoluteDir = fc.makeQualified(absoluteDir);
+    fc.mkdir(absoluteDir, FileContext.DEFAULT_PERM, true);
+    fc.setWorkingDirectory(absoluteDir);
+    Assert.assertEquals(fqAbsoluteDir, fc.getWorkingDirectory());
+    
+    // Now open a file relative to the wd we just set above.
+    Path absolutePath = new Path(absoluteDir, "foo");
+    fc.create(absolutePath, EnumSet.of(CreateFlag.CREATE)).close();
+    fc.open(new Path("foo")).close();
+    
+    // Now mkdir relative to the dir we cd'ed to
+    fc.mkdir(new Path("newDir"), FileContext.DEFAULT_PERM, true);
+    Assert.assertTrue(isDir(fc, new Path(absoluteDir, "newDir")));
+
+    absoluteDir = getTestRootPath(fc, "nonexistingPath");
+    try {
+      fc.setWorkingDirectory(absoluteDir);
+      Assert.fail("cd to non existing dir should have failed");
+    } catch (Exception e) {
+      // Exception as expected
+    }
+    
+    // Try a URI
+    final String LOCAL_FS_ROOT_URI = "file:///tmp/test";
+    absoluteDir = new Path(LOCAL_FS_ROOT_URI + "/existingDir");
+    fc.mkdir(absoluteDir, FileContext.DEFAULT_PERM, true);
+    fc.setWorkingDirectory(absoluteDir);
+    Assert.assertEquals(absoluteDir, fc.getWorkingDirectory());
+
+  }
+ 
+}

Added: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java?rev=1100026&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java (added)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java Fri May  6 02:11:31 2011
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSMainOperationsBaseTest;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestFSMainOperationsLocalFileSystem extends FSMainOperationsBaseTest {
+   static FileSystem fcTarget;
+  @Before
+  public void setUp() throws Exception {
+    Configuration conf = new Configuration();
+    fcTarget = FileSystem.getLocal(conf);
+    fSys = ViewFileSystemTestSetup.setupForViewFs(fcTarget);
+    super.setUp();
+  }
+  
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+    ViewFileSystemTestSetup.tearDownForViewFs(fcTarget);
+  }
+  
+  @Test
+  @Override
+  public void testWDAbsolute() throws IOException {
+    Path absoluteDir = FileSystemTestHelper.getTestRootPath(fSys,
+        "test/existingDir");
+    fSys.mkdirs(absoluteDir);
+    fSys.setWorkingDirectory(absoluteDir);
+    Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
+
+  }
+}

Added: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFcCreateMkdirLocalFs.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFcCreateMkdirLocalFs.java?rev=1100026&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFcCreateMkdirLocalFs.java (added)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFcCreateMkdirLocalFs.java Fri May  6 02:11:31 2011
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+
+import org.apache.hadoop.fs.FileContextCreateMkdirBaseTest;
+
+import org.junit.After;
+import org.junit.Before;
+
+
+public class TestFcCreateMkdirLocalFs  extends 
+  FileContextCreateMkdirBaseTest {
+
+
+  @Before
+  public void setUp() throws Exception {
+    fc = ViewFsTestSetup.setupForViewFsLocalFs();
+    super.setUp();
+  }
+  
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+    ViewFsTestSetup.tearDownForViewFsLocalFs();
+  }
+}
\ No newline at end of file

Added: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java?rev=1100026&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java (added)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java Fri May  6 02:11:31 2011
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextMainOperationsBaseTest;
+import org.apache.hadoop.fs.FileContextTestHelper;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.viewfs.ConfigUtil;
+
+import org.junit.After;
+import org.junit.Before;
+
+
+public class TestFcMainOperationsLocalFs  extends 
+  FileContextMainOperationsBaseTest {
+
+  FileContext fclocal;
+  Path targetOfTests;
+
+  @Before
+  public void setUp() throws Exception {
+    /**
+     * create the test root on local_fs - the  mount table will point here
+     */
+    fclocal = FileContext.getLocalFSFileContext();
+    targetOfTests = FileContextTestHelper.getTestRootPath(fclocal);
+    // In case previous test was killed before cleanup
+    fclocal.delete(targetOfTests, true);
+    
+    fclocal.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
+
+    
+    
+    
+    // We create mount table so that the test root on the viewFs points to 
+    // to the test root on the target.
+    // DOing this helps verify the FileStatus.path.
+    //
+    // The test root by default when running eclipse 
+    // is a test dir below the working directory. 
+    // (see FileContextTestHelper).
+    // Since viewFs has no built-in wd, its wd is /user/<username>.
+    // If this test launched via ant (build.xml) the test root is absolute path
+    
+    String srcTestRoot;
+    if (FileContextTestHelper.TEST_ROOT_DIR.startsWith("/")) {
+      srcTestRoot = FileContextTestHelper.TEST_ROOT_DIR;
+    } else {
+      srcTestRoot = "/user/"  + System.getProperty("user.name") + "/" +
+      FileContextTestHelper.TEST_ROOT_DIR;
+    }
+
+    Configuration conf = new Configuration();
+    ConfigUtil.addLink(conf, srcTestRoot,
+        targetOfTests.toUri());
+    
+    fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
+    //System.out.println("SRCOfTests = "+ FileContextTestHelper.getTestRootPath(fc, "test"));
+    //System.out.println("TargetOfTests = "+ targetOfTests.toUri());
+    super.setUp();
+  }
+  
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+    fclocal.delete(targetOfTests, true);
+  }
+}
\ No newline at end of file

Added: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFcPermissionsLocalFs.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFcPermissionsLocalFs.java?rev=1100026&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFcPermissionsLocalFs.java (added)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestFcPermissionsLocalFs.java Fri May  6 02:11:31 2011
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+
+import org.apache.hadoop.fs.FileContextPermissionBase;
+
+import org.junit.After;
+import org.junit.Before;
+
+
+public class TestFcPermissionsLocalFs  extends FileContextPermissionBase {
+
+
+  @Before
+  public void setUp() throws Exception {
+    fc = ViewFsTestSetup.setupForViewFsLocalFs();
+    super.setUp();
+  }
+  
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+    ViewFsTestSetup.tearDownForViewFsLocalFs();
+  }
+}
\ No newline at end of file

Added: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java?rev=1100026&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java (added)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java Fri May  6 02:11:31 2011
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+
+import org.junit.After;
+import org.junit.Before;
+
+
+
+/**
+ * 
+ * Test the ViewFileSystemBaseTest using a viewfs with authority: 
+ *    viewfs://mountTableName/
+ *    ie the authority is used to load a mount table.
+ *    The authority name used is "default"
+ *
+ */
+
+public class TestViewFileSystemLocalFileSystem extends ViewFileSystemBaseTest {
+
+
+  @Before
+  public void setUp() throws Exception {
+    // create the test root on local_fs
+    fsTarget = FileSystem.getLocal(new Configuration());
+    super.setUp();
+    
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true);
+    super.tearDown();
+  }
+}

Added: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java?rev=1100026&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java (added)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java Fri May  6 02:11:31 2011
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.Path;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * 
+ * Test the ViewFsBaseTest using a viewfs with authority: 
+ *    viewfs://mountTableName/
+ *    ie the authority is used to load a mount table.
+ *    The authority name used is "default"
+ *
+ */
+public class TestViewFileSystemWithAuthorityLocalFileSystem extends ViewFileSystemBaseTest {
+  URI schemeWithAuthority;
+
+  @Before
+  public void setUp() throws Exception {
+    // create the test root on local_fs
+    fsTarget = FileSystem.getLocal(new Configuration());
+    super.setUp(); // this sets up conf (and fcView which we replace)
+
+    // Now create a viewfs using a mount table called "default"
+    // hence viewfs://default/
+    schemeWithAuthority = 
+      new URI(FsConstants.VIEWFS_SCHEME, "default", "/", null, null);
+    fsView = FileSystem.get(schemeWithAuthority, conf);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true);
+    super.tearDown();
+  }
+ 
+  @Test
+  public void testBasicPaths() {
+    Assert.assertEquals(schemeWithAuthority,
+        fsView.getUri());
+    Assert.assertEquals(fsView.makeQualified(
+        new Path("/user/" + System.getProperty("user.name"))),
+        fsView.getWorkingDirectory());
+    Assert.assertEquals(fsView.makeQualified(
+        new Path("/user/" + System.getProperty("user.name"))),
+        fsView.getHomeDirectory());
+    Assert.assertEquals(
+        new Path("/foo/bar").makeQualified(schemeWithAuthority, null),
+        fsView.makeQualified(new Path("/foo/bar")));
+  }
+}

Added: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java?rev=1100026&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java (added)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java Fri May  6 02:11:31 2011
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.conf.Configuration;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.viewfs.ConfigUtil;
+import org.apache.hadoop.fs.viewfs.InodeTree;
+import org.junit.Test;
+
+
+public class TestViewFsConfig {
+  
+  
+  @Test(expected=FileAlreadyExistsException.class)
+  public void testInvalidConfig() throws IOException, URISyntaxException {
+    Configuration conf = new Configuration();
+    ConfigUtil.addLink(conf, "/internalDir/linkToDir2",
+        new Path("file:///dir2").toUri());
+    ConfigUtil.addLink(conf, "/internalDir/linkToDir2/linkToDir3",
+        new Path("file:///dir3").toUri());
+    
+    class Foo { };
+    
+     new InodeTree<Foo>(conf, null) {
+
+      @Override
+      protected
+      Foo getTargetFileSystem(final URI uri)
+        throws URISyntaxException, UnsupportedFileSystemException {
+          return null;
+      }
+
+      @Override
+      protected
+      Foo getTargetFileSystem(
+          org.apache.hadoop.fs.viewfs.InodeTree.INodeDir<Foo>
+                                          dir)
+        throws URISyntaxException {
+        return null;
+      }
+
+      @Override
+      protected
+      Foo getTargetFileSystem(URI[] mergeFsURIList)
+          throws URISyntaxException, UnsupportedFileSystemException {
+        return null;
+      }
+    };
+  }
+
+}

Added: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFsLocalFs.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFsLocalFs.java?rev=1100026&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFsLocalFs.java (added)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/viewfs/TestViewFsLocalFs.java Fri May  6 02:11:31 2011
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import org.apache.hadoop.fs.FileContext;
+
+
+import org.junit.After;
+import org.junit.Before;
+
+
+public class TestViewFsLocalFs extends ViewFsBaseTest {
+
+  @Before
+  public void setUp() throws Exception {
+    // create the test root on local_fs
+    fcTarget = FileContext.getLocalFSFileContext();
+    super.setUp();
+    
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+}



Mime
View raw message