hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hair...@apache.org
Subject svn commit: r984561 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/fs/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/aop/org/apache/hadoop/fs/ src/test/aop/o...
Date Wed, 11 Aug 2010 19:55:25 GMT
Author: hairong
Date: Wed Aug 11 19:55:24 2010
New Revision: 984561

URL: http://svn.apache.org/viewvc?rev=984561&view=rev
Log:
HDFS-202.  HDFS support of listLocatedStatus introduced in HADOOP-6870.  HDFS improves the default implementation by piggybacking block locations to each file status when listing a directory.  Contrbuted by Hairong Kuang.

Added:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInDFS.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/fs/TestFiListPath.java
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/namenode/ListPathAspects.aj
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed Aug 11 19:55:24 2010
@@ -29,6 +29,10 @@ Trunk (unreleased changes)
 
     HDFS-1330. Make RPCs to DataNodes timeout. (hairong)
 
+    HDFS-202.  HDFS support of listLocatedStatus introduced in HADOOP-6870.
+    HDFS piggyback block locations to each file status when listing a
+    directory.  (hairong)
+
   IMPROVEMENTS
 
     HDFS-1096. fix for prev. commit. (boryas)

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java Wed Aug 11 19:55:24 2010
@@ -25,16 +25,17 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.EnumSet;
-import java.util.Iterator;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
@@ -141,6 +142,17 @@ public class Hdfs extends AbstractFileSy
             getUri(), null)); // fully-qualify path
   }
 
+  private LocatedFileStatus makeQualifiedLocated(
+      HdfsLocatedFileStatus f, Path parent) {
+    return new LocatedFileStatus(f.getLen(), f.isDir(), f.getReplication(),
+        f.getBlockSize(), f.getModificationTime(),
+        f.getAccessTime(),
+        f.getPermission(), f.getOwner(), f.getGroup(),
+        f.isSymlink() ? new Path(f.getSymlink()) : null,
+        (f.getFullPath(parent)).makeQualified(
+            getUri(), null), // fully-qualify path
+        DFSUtil.locatedBlocks2Locations(f.getBlockLocations()));
+  }
 
   @Override
   protected FsStatus getFsStatus() throws IOException {
@@ -153,60 +165,93 @@ public class Hdfs extends AbstractFileSy
   }
 
   @Override
-  protected Iterator<FileStatus> listStatusIterator(final Path f)
+  protected RemoteIterator<LocatedFileStatus> listLocatedStatus(
+      final Path p)
+      throws FileNotFoundException, IOException {
+    return new DirListingIterator<LocatedFileStatus>(p, true) {
+
+      @Override
+      public LocatedFileStatus next() throws IOException {
+        return makeQualifiedLocated((HdfsLocatedFileStatus)getNext(), p);
+      }
+    };
+  }
+  
+  @Override
+  protected RemoteIterator<FileStatus> listStatusIterator(final Path f)
     throws AccessControlException, FileNotFoundException,
     UnresolvedLinkException, IOException {
-    return new Iterator<FileStatus>() {
-      private DirectoryListing thisListing;
-      private int i;
-      private String src;
+    return new DirListingIterator<FileStatus>(f, false) {
 
+      @Override
+      public FileStatus next() throws IOException {
+        return makeQualified(getNext(), f);
+      }
+    };
+  }
 
-      { // initializer
-        src = getUriPath(f);
-        // fetch the first batch of entries in the directory
-        thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME);
-        if (thisListing == null) { // the directory does not exist
-          throw new FileNotFoundException("File " + f + " does not exist.");
-        }
+  /**
+   * This class defines an iterator that returns
+   * the file status of each file/subdirectory of a directory
+   * 
+   * if needLocation, status contains block location if it is a file
+   * throws a RuntimeException with the error as its cause.
+   * 
+   * @param <T> the type of the file status
+   */
+  abstract private class  DirListingIterator<T extends FileStatus>
+  implements RemoteIterator<T> {
+    private DirectoryListing thisListing;
+    private int i;
+    final private String src;
+    final private boolean needLocation;  // if status
+
+    private DirListingIterator(Path p, boolean needLocation)
+      throws IOException {
+      this.src = Hdfs.this.getUriPath(p);
+      this.needLocation = needLocation;
+
+      // fetch the first batch of entries in the directory
+      thisListing = dfs.listPaths(
+          src, HdfsFileStatus.EMPTY_NAME, needLocation);
+      if (thisListing == null) { // the directory does not exist
+        throw new FileNotFoundException("File " + src + " does not exist.");
       }
+    }
 
-      @Override
-      public boolean hasNext() {
+    @Override
+    public boolean hasNext() throws IOException {
+      if (thisListing == null) {
+        return false;
+      }
+      if (i>=thisListing.getPartialListing().length
+          && thisListing.hasMore()) { 
+        // current listing is exhausted & fetch a new listing
+        thisListing = dfs.listPaths(src, thisListing.getLastName(),
+            needLocation);
         if (thisListing == null) {
-          return false;
+          return false; // the directory is deleted
         }
-        try {
-          if (i>=thisListing.getPartialListing().length && thisListing.hasMore()) { 
-            // current listing is exhausted & fetch a new listing
-            thisListing = dfs.listPaths(src, thisListing.getLastName());
-            if (thisListing == null) {
-              return false; // the directory is deleted
-            }
-            i = 0;
-          }
-          return (i<thisListing.getPartialListing().length);
-        } catch (IOException ioe) {
-          return false;
-        }
-      }
-
-      @Override
-      public FileStatus next() {
-        if (hasNext()) {
-          return makeQualified(thisListing.getPartialListing()[i++], f);
-        } 
-        throw new java.util.NoSuchElementException("No more entry in " + f);
+        i = 0;
       }
+      return (i<thisListing.getPartialListing().length);
+    }
 
-      @Override
-      public void remove() {
-        throw new UnsupportedOperationException("Remove is not supported");
-
+    /**
+     * Get the next item in the list
+     * @return the next item in the list
+     * 
+     * @throws IOException if there is any error
+     * @throws NoSuchElmentException if no more entry is available
+     */
+    protected HdfsFileStatus getNext() throws IOException {
+      if (hasNext()) {
+        return thisListing.getPartialListing()[i++];
       }
-    };
+      throw new java.util.NoSuchElementException("No more entry in " + src);
+    }
   }
-  
+
   @Override
   protected FileStatus[] listStatus(Path f) 
       throws IOException, UnresolvedLinkException {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Wed Aug 11 19:55:24 2010
@@ -463,33 +463,9 @@ public class DFSClient implements FSCons
   public BlockLocation[] getBlockLocations(String src, long start, 
     long length) throws IOException, UnresolvedLinkException {
     LocatedBlocks blocks = callGetBlockLocations(namenode, src, start, length);
-    if (blocks == null) {
-      return new BlockLocation[0];
-    }
-    int nrBlocks = blocks.locatedBlockCount();
-    BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
-    int idx = 0;
-    for (LocatedBlock blk : blocks.getLocatedBlocks()) {
-      assert idx < nrBlocks : "Incorrect index";
-      DatanodeInfo[] locations = blk.getLocations();
-      String[] hosts = new String[locations.length];
-      String[] names = new String[locations.length];
-      String[] racks = new String[locations.length];
-      for (int hCnt = 0; hCnt < locations.length; hCnt++) {
-        hosts[hCnt] = locations[hCnt].getHostName();
-        names[hCnt] = locations[hCnt].getName();
-        NodeBase node = new NodeBase(names[hCnt], 
-                                     locations[hCnt].getNetworkLocation());
-        racks[hCnt] = node.toString();
-      }
-      blkLocations[idx] = new BlockLocation(names, hosts, racks,
-                                            blk.getStartOffset(),
-                                            blk.getBlockSize());
-      idx++;
-    }
-    return blkLocations;
+    return DFSUtil.locatedBlocks2Locations(blocks);
   }
-
+  
   public DFSInputStream open(String src) 
       throws IOException, UnresolvedLinkException {
     return open(src, conf.getInt("io.file.buffer.size", 4096), true, null);
@@ -860,18 +836,28 @@ public class DFSClient implements FSCons
 
   /**
    * Get a partial listing of the indicated directory
+   * No block locations need to be fetched
+   */
+  public DirectoryListing listPaths(String src,  byte[] startAfter)
+    throws IOException {
+    return listPaths(src, startAfter, false);
+  }
+  
+  /**
+   * Get a partial listing of the indicated directory
    *
    * Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
    * if the application wants to fetch a listing starting from
    * the first entry in the directory
    *
-   * @see ClientProtocol#getListing(String, byte[])
+   * @see ClientProtocol#getListing(String, byte[], boolean)
    */
-  public DirectoryListing listPaths(String src,  byte[] startAfter) 
+  public DirectoryListing listPaths(String src,  byte[] startAfter,
+      boolean needLocation) 
     throws IOException {
     checkOpen();
     try {
-      return namenode.getListing(src, startAfter);
+      return namenode.getListing(src, startAfter, needLocation);
     } catch(RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class,
                                      FileNotFoundException.class,

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java Wed Aug 11 19:55:24 2010
@@ -22,7 +22,12 @@ import java.io.UnsupportedEncodingExcept
 import java.util.StringTokenizer;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.net.NodeBase;
 
 @InterfaceAudience.Private
 public class DFSUtil {
@@ -187,5 +192,40 @@ public class DFSUtil {
     }
     return result;
   }
+  
+  /**
+   * Convert a LocatedBlocks to BlockLocations[]
+   * @param blocks a LocatedBlocks
+   * @return an array of BlockLocations
+   */
+  public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
+    if (blocks == null) {
+      return new BlockLocation[0];
+    }
+    int nrBlocks = blocks.locatedBlockCount();
+    BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
+    int idx = 0;
+    for (LocatedBlock blk : blocks.getLocatedBlocks()) {
+      assert idx < nrBlocks : "Incorrect index";
+      DatanodeInfo[] locations = blk.getLocations();
+      String[] hosts = new String[locations.length];
+      String[] names = new String[locations.length];
+      String[] racks = new String[locations.length];
+      for (int hCnt = 0; hCnt < locations.length; hCnt++) {
+        hosts[hCnt] = locations[hCnt].getHostName();
+        names[hCnt] = locations[hCnt].getName();
+        NodeBase node = new NodeBase(names[hCnt], 
+                                     locations[hCnt].getNetworkLocation());
+        racks[hCnt] = node.toString();
+      }
+      blkLocations[idx] = new BlockLocation(names, hosts, racks,
+                                            blk.getStartOffset(),
+                                            blk.getBlockSize());
+      idx++;
+    }
+    return blkLocations;
+  }
+
+
 }
 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Wed Aug 11 19:55:24 2010
@@ -37,9 +37,12 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -49,6 +52,7 @@ import org.apache.hadoop.hdfs.protocol.F
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
@@ -341,6 +345,18 @@ public class DistributedFileSystem exten
             getUri(), getWorkingDirectory())); // fully-qualify path
   }
 
+  private LocatedFileStatus makeQualifiedLocated(
+      HdfsLocatedFileStatus f, Path parent) {
+    return new LocatedFileStatus(f.getLen(), f.isDir(), f.getReplication(),
+        f.getBlockSize(), f.getModificationTime(),
+        f.getAccessTime(),
+        f.getPermission(), f.getOwner(), f.getGroup(),
+        null,
+        (f.getFullPath(parent)).makeQualified(
+            getUri(), getWorkingDirectory()), // fully-qualify path
+        DFSUtil.locatedBlocks2Locations(f.getBlockLocations()));
+  }
+
   /**
    * List all the entries of a directory
    *
@@ -401,6 +417,68 @@ public class DistributedFileSystem exten
     return listing.toArray(new FileStatus[listing.size()]);
   }
 
+  @Override
+  protected RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path p,
+      final PathFilter filter)
+  throws IOException {
+    return new RemoteIterator<LocatedFileStatus>() {
+      private DirectoryListing thisListing;
+      private int i;
+      private String src;
+      private LocatedFileStatus curStat = null;
+
+      { // initializer
+        src = getPathName(p);
+        // fetch the first batch of entries in the directory
+        thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME, true);
+        statistics.incrementReadOps(1);
+        if (thisListing == null) { // the directory does not exist
+          throw new FileNotFoundException("File " + p + " does not exist.");
+        }
+      }
+
+      @Override
+      public boolean hasNext() throws IOException {
+        while (curStat == null && hasNextNoFilter()) {
+          LocatedFileStatus next = makeQualifiedLocated(
+              (HdfsLocatedFileStatus)thisListing.getPartialListing()[i++], p);
+          if (filter.accept(next.getPath())) {
+            curStat = next;
+          }
+        }
+        return curStat != null;
+      }
+      
+      /** Check if there is a next item before applying the given filter */
+      private boolean hasNextNoFilter() throws IOException {
+        if (thisListing == null) {
+          return false;
+        }
+        if (i>=thisListing.getPartialListing().length
+            && thisListing.hasMore()) { 
+          // current listing is exhausted & fetch a new listing
+          thisListing = dfs.listPaths(src, thisListing.getLastName(), true);
+          statistics.incrementReadOps(1);
+          if (thisListing == null) {
+            return false;
+          }
+          i = 0;
+        }
+        return (i<thisListing.getPartialListing().length);
+      }
+
+      @Override
+      public LocatedFileStatus next() throws IOException {
+        if (hasNext()) {
+          LocatedFileStatus tmp = curStat;
+          curStat = null;
+          return tmp;
+        } 
+        throw new java.util.NoSuchElementException("No more entry in " + p);
+      }
+    };
+  }
+  
   /**
    * Create a directory with given name and permission, only when
    * parent directory exists.

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Wed Aug 11 19:55:24 2010
@@ -68,9 +68,9 @@ public interface ClientProtocol extends 
    * Compared to the previous version the following changes have been introduced:
    * (Only the latest change is reflected.
    * The log of historical changes can be retrieved from the svn).
-   * 61: HDFS-1081. Performance optimization on getBlocksLocation().
+   * 62: Allow iterative getListinng piggyback block locations.
    */
-  public static final long versionID = 61L;
+  public static final long versionID = 62L;
   
   ///////////////////////////////////////
   // File contents
@@ -473,6 +473,7 @@ public interface ClientProtocol extends 
    *
    * @param src the directory name
    * @param startAfter the name to start listing after encoded in java UTF8
+   * @param needLocation if the FileStatus should contain block locations
    *
    * @return a partial listing starting after startAfter
    *
@@ -481,7 +482,9 @@ public interface ClientProtocol extends 
    * @throws UnresolvedLinkException If <code>src</code> contains a symlink
    * @throws IOException If an I/O error occurred
    */
-  public DirectoryListing getListing(String src, byte[] startAfter)
+  public DirectoryListing getListing(String src,
+                                     byte[] startAfter,
+                                     boolean needLocation)
       throws AccessControlException, FileNotFoundException,
       UnresolvedLinkException, IOException;
 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java Wed Aug 11 19:55:24 2010
@@ -109,9 +109,16 @@ public class DirectoryListing implements
   public void readFields(DataInput in) throws IOException {
     int numEntries = in.readInt();
     partialListing = new HdfsFileStatus[numEntries];
-    for (int i=0; i<numEntries; i++) {
-      partialListing[i] = new HdfsFileStatus();
-      partialListing[i].readFields(in);
+    if (numEntries !=0 ) {
+      boolean hasLocation = in.readBoolean();
+      for (int i=0; i<numEntries; i++) {
+        if (hasLocation) {
+          partialListing[i] = new HdfsLocatedFileStatus();
+        } else {
+          partialListing[i] = new HdfsFileStatus();
+        }
+        partialListing[i].readFields(in);
+      }
     }
     remainingEntries = in.readInt();
   }
@@ -119,8 +126,15 @@ public class DirectoryListing implements
   @Override
   public void write(DataOutput out) throws IOException {
     out.writeInt(partialListing.length);
-    for (HdfsFileStatus fileStatus : partialListing) {
-      fileStatus.write(out);
+    if (partialListing.length != 0) { 
+       if (partialListing[0] instanceof HdfsLocatedFileStatus) {
+         out.writeBoolean(true);
+       } else {
+         out.writeBoolean(false);
+       }
+       for (HdfsFileStatus fileStatus : partialListing) {
+         fileStatus.write(out);
+       }
     }
     out.writeInt(remainingEntries);
   }

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java?rev=984561&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java Wed Aug 11 19:55:24 2010
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+/** 
+ * Interface that represents the over the wire information
+ * including block locations for a file.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class HdfsLocatedFileStatus extends HdfsFileStatus {
+  private LocatedBlocks locations;
+  
+  /**
+   * Default constructor
+   */
+  public HdfsLocatedFileStatus() {
+  }
+  
+  /**
+   * Constructor
+   * 
+   * @param length size
+   * @param isdir if this is directory
+   * @param block_replication the file's replication factor
+   * @param blocksize the file's block size
+   * @param modification_time most recent modification time
+   * @param access_time most recent access time
+   * @param permission permission
+   * @param owner owner
+   * @param group group
+   * @param symlink symbolic link
+   * @param path local path name in java UTF8 format 
+   * @param locations block locations
+   */
+  public HdfsLocatedFileStatus(long length, boolean isdir,
+      int block_replication,
+	    long blocksize, long modification_time, long access_time,
+	    FsPermission permission, String owner, String group, 
+	    byte[] symlink, byte[] path, LocatedBlocks locations) {
+	  super(length, isdir, block_replication, blocksize, modification_time,
+		  access_time, permission, owner, group, symlink, path);
+    this.locations = locations;
+	}
+	
+	public LocatedBlocks getBlockLocations() {
+		return locations;
+	}
+	
+  //////////////////////////////////////////////////
+  // Writable
+  //////////////////////////////////////////////////
+  public void write(DataOutput out) throws IOException {
+    super.write(out);
+    if (!isDir() && !isSymlink()) {
+      locations.write(out);
+    }
+  }
+
+  public void readFields(DataInput in) throws IOException {
+    super.readFields(in);
+    if (!isDir() && !isSymlink()) {
+      locations = new LocatedBlocks();
+      locations.readFields(in);
+    }
+  }
+}

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java Wed Aug 11 19:55:24 2010
@@ -46,7 +46,7 @@ public class LocatedBlocks implements Wr
   private LocatedBlock lastLocatedBlock = null;
   private boolean isLastBlockComplete = false;
 
-  LocatedBlocks() {
+  public LocatedBlocks() {
     fileLength = 0;
     blocks = null;
     underConstruction = false;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed Aug 11 19:55:24 2010
@@ -40,6 +40,8 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
@@ -1083,10 +1085,11 @@ class FSDirectory implements Closeable {
    *
    * @param src the directory name
    * @param startAfter the name to start listing after
+   * @param needLocation if block locations are returned
    * @return a partial listing starting after startAfter
    */
-  DirectoryListing getListing(String src, byte[] startAfter)
-  throws UnresolvedLinkException {
+  DirectoryListing getListing(String src, byte[] startAfter,
+      boolean needLocation) throws UnresolvedLinkException, IOException {
     String srcs = normalizePath(src);
 
     synchronized (rootDir) {
@@ -1095,8 +1098,9 @@ class FSDirectory implements Closeable {
         return null;
       
       if (!targetNode.isDirectory()) {
-        return new DirectoryListing(new HdfsFileStatus[]{createFileStatus(
-            HdfsFileStatus.EMPTY_NAME, targetNode)}, 0);
+        return new DirectoryListing(
+            new HdfsFileStatus[]{createFileStatus(HdfsFileStatus.EMPTY_NAME,
+                targetNode, needLocation)}, 0);
       }
       INodeDirectory dirInode = (INodeDirectory)targetNode;
       List<INode> contents = dirInode.getChildren();
@@ -1106,7 +1110,7 @@ class FSDirectory implements Closeable {
       HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
       for (int i=0; i<numOfListing; i++) {
         INode cur = contents.get(startChild+i);
-        listing[i] = createFileStatus(cur.name, cur);
+        listing[i] = createFileStatus(cur.name, cur, needLocation);
       }
       return new DirectoryListing(
           listing, totalNumChildren-startChild-numOfListing);
@@ -1801,15 +1805,40 @@ class FSDirectory implements Closeable {
   }
 
   /**
+   * create an hdfs file status from an inode
+   * 
+   * @param path the local name
+   * @param node inode
+   * @param needLocation if block locations need to be included or not
+   * @return a file status
+   * @throws IOException if any error occurs
+   */
+  private HdfsFileStatus createFileStatus(byte[] path, INode node,
+      boolean needLocation) throws IOException {
+    if (needLocation) {
+      return createLocatedFileStatus(path, node);
+    } else {
+      return createFileStatus(path, node);
+    }
+  }
+  /**
    * Create FileStatus by file INode 
    */
-   private static HdfsFileStatus createFileStatus(byte[] path, INode node) {
-    // length is zero for directories
-    return new HdfsFileStatus(
-        node instanceof INodeFile ? ((INodeFile)node).computeFileSize(true) : 0, 
+   private HdfsFileStatus createFileStatus(byte[] path, INode node) {
+     long size = 0;     // length is zero for directories
+     short replication = 0;
+     long blocksize = 0;
+     if (node instanceof INodeFile) {
+       INodeFile fileNode = (INodeFile)node;
+       size = fileNode.computeFileSize(true);
+       replication = fileNode.getReplication();
+       blocksize = fileNode.getPreferredBlockSize();
+     }
+     return new HdfsFileStatus(
+        size, 
         node.isDirectory(), 
-        (node.isDirectory() || node.isLink()) ? 0 : ((INodeFile)node).getReplication(), 
-        (node.isDirectory() || node.isLink()) ? 0 : ((INodeFile)node).getPreferredBlockSize(),
+        replication, 
+        blocksize,
         node.getModificationTime(),
         node.getAccessTime(),
         node.getFsPermission(),
@@ -1819,6 +1848,42 @@ class FSDirectory implements Closeable {
         path);
   }
 
+   /**
+    * Create FileStatus with location info by file INode 
+    */
+    private HdfsLocatedFileStatus createLocatedFileStatus(
+        byte[] path, INode node) throws IOException {
+      long size = 0;     // length is zero for directories
+      short replication = 0;
+      long blocksize = 0;
+      LocatedBlocks loc = null;
+      if (node instanceof INodeFile) {
+        INodeFile fileNode = (INodeFile)node;
+        size = fileNode.computeFileSize(true);
+        replication = fileNode.getReplication();
+        blocksize = fileNode.getPreferredBlockSize();
+        loc = getFSNamesystem().getBlockLocationsInternal(
+            fileNode, 0L, size, false);
+        if (loc==null) {
+          loc = new LocatedBlocks();
+        }
+      }
+      return new HdfsLocatedFileStatus(
+          size, 
+          node.isDirectory(), 
+          replication, 
+          blocksize,
+          node.getModificationTime(),
+          node.getAccessTime(),
+          node.getFsPermission(),
+          node.getUserName(),
+          node.getGroupName(),
+          node.isLink() ? ((INodeSymlink)node).getSymlink() : null,
+          path,
+          loc);
+      }
+
+    
   /**
    * Add the given symbolic link to the fs. Record it in the edits log.
    */

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Aug 11 19:55:24 2010
@@ -774,6 +774,12 @@ public class FSNamesystem implements FSC
     if (doAccessTime && isAccessTimeSupported()) {
       dir.setTimes(src, inode, -1, now(), false);
     }
+    return getBlockLocationsInternal(inode, offset, length, needBlockToken);
+  }
+  
+  synchronized LocatedBlocks getBlockLocationsInternal(INodeFile inode,
+      long offset, long length, boolean needBlockToken)
+  throws IOException {
     final BlockInfo[] blocks = inode.getBlocks();
     if (LOG.isDebugEnabled()) {
       LOG.debug("blocks = " + java.util.Arrays.asList(blocks));
@@ -2255,13 +2261,15 @@ public class FSNamesystem implements FSC
    *
    * @param src the directory name
    * @param startAfter the name to start after
+   * @param needLocation if blockLocations need to be returned
    * @return a partial listing starting after startAfter
    * 
    * @throws AccessControlException if access is denied
    * @throws UnresolvedLinkException if symbolic link is encountered
    * @throws IOException if other I/O error occurred
    */
-  public DirectoryListing getListing(String src, byte[] startAfter) 
+  public DirectoryListing getListing(String src, byte[] startAfter,
+      boolean needLocation) 
     throws AccessControlException, UnresolvedLinkException, IOException {
     if (isPermissionEnabled) {
       if (dir.isDir(src)) {
@@ -2276,7 +2284,7 @@ public class FSNamesystem implements FSC
                     Server.getRemoteIp(),
                     "listStatus", src, null, null);
     }
-    return dir.getListing(src, startAfter);
+    return dir.getListing(src, startAfter, needLocation);
   }
 
   /////////////////////////////////////////////////////////

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java Wed Aug 11 19:55:24 2010
@@ -164,7 +164,7 @@ public class ListPathsServlet extends Df
               DirectoryListing thisListing;
               do {
                 assert lastReturnedName != null;
-                thisListing = nn.getListing(p, lastReturnedName);
+                thisListing = nn.getListing(p, lastReturnedName, false);
                 if (thisListing == null) {
                   if (lastReturnedName.length == 0) {
                     LOG

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Aug 11 19:55:24 2010
@@ -975,9 +975,11 @@ public class NameNode implements Namenod
   /**
    */
   @Override
-  public DirectoryListing getListing(String src, byte[] startAfter)
+  public DirectoryListing getListing(String src, byte[] startAfter,
+      boolean needLocation)
   throws IOException {
-    DirectoryListing files = namesystem.getListing(src, startAfter);
+    DirectoryListing files = namesystem.getListing(
+        src, startAfter, needLocation);
     if (files != null) {
       myMetrics.numGetListingOps.inc();
       myMetrics.numFilesInGetListingOps.inc(files.getPartialListing().length);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Wed Aug 11 19:55:24 2010
@@ -267,7 +267,7 @@ public class NamenodeFsck {
       res.totalDirs++;
       do {
         assert lastReturnedName != null;
-        thisListing = namenode.getListing(path, lastReturnedName);
+        thisListing = namenode.getListing(path, lastReturnedName, false);
         if (thisListing == null) {
           return;
         }

Modified: hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/fs/TestFiListPath.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/fs/TestFiListPath.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/fs/TestFiListPath.java (original)
+++ hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/fs/TestFiListPath.java Wed Aug 11 19:55:24 2010
@@ -18,10 +18,12 @@
 package org.apache.hadoop.fs;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.Iterator;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -31,7 +33,9 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
+import org.junit.AfterClass;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 /**
@@ -44,38 +48,58 @@ public class TestFiListPath {
   private static final Log LOG = LogFactory.getLog(TestFiListPath.class);
   private static final int LIST_LIMIT = 1;
   
-  private MiniDFSCluster cluster = null;
+  private static MiniDFSCluster cluster = null;
+  private static FileSystem fs;
+  private static Path TEST_PATH = new Path("/tmp");
 
-  @Before
-  public void setup() throws IOException {
+  @BeforeClass
+  public static void setup() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, LIST_LIMIT);
     cluster = new MiniDFSCluster(conf, 1, true, null);
     cluster.waitClusterUp();
+    fs = cluster.getFileSystem();
   }
 
-  @After
-  public void teardown() throws IOException {
+  @AfterClass
+  public static void teardown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  /** Remove the target directory after the getListing RPC */
-  @Test
-  public void testTargetDeletion() throws Exception {
-    FileSystem fs = cluster.getFileSystem();
-    Path parent = new Path("/tmp");
-    fs.mkdirs(parent);
+  @Before
+  public void prepare() throws IOException {
+    fs.mkdirs(TEST_PATH);
     for (int i=0; i<LIST_LIMIT+1; i++) {
-      fs.mkdirs(new Path(parent, "dir"+i));
+      fs.mkdirs(new Path(TEST_PATH, "dir"+i));
     }
+  }
+  
+  @After
+  public void cleanup() throws IOException {
+    fs.delete(TEST_PATH, true);
+  }
+  
+  /** Remove the target directory after the getListing RPC */
+  @Test
+  public void testTargetDeletionForListStatus() throws Exception {
+    LOG.info("Test Target Delete For listStatus");
     try {
-      fs.listStatus(parent);
+      fs.listStatus(TEST_PATH);
       fail("Test should fail with FileNotFoundException");
     } catch (FileNotFoundException e) {
-      assertEquals("File " + parent + " does not exist.", e.getMessage());
+      assertEquals("File " + TEST_PATH + " does not exist.", e.getMessage());
       LOG.info(StringUtils.stringifyException(e));
     }
   }
+  
+  /** Remove the target directory after the getListing RPC */
+  @Test
+  public void testTargetDeletionForListLocatedStatus() throws Exception {
+    LOG.info("Test Target Delete For listLocatedStatus");
+    RemoteIterator<LocatedFileStatus> itor = fs.listLocatedStatus(TEST_PATH);
+    itor.next();
+    assertFalse (itor.hasNext());
+  }
 }

Modified: hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/namenode/ListPathAspects.aj
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/namenode/ListPathAspects.aj?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/namenode/ListPathAspects.aj (original)
+++ hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/server/namenode/ListPathAspects.aj Wed Aug 11 19:55:24 2010
@@ -33,14 +33,15 @@ import org.apache.hadoop.hdfs.server.nam
 public privileged aspect ListPathAspects {
   public static final Log LOG = LogFactory.getLog(ListPathAspects.class);
 
-  /** When removeChild is called during rename, throw exception */
-  pointcut callGetListing(FSNamesystem fd, String src, byte[] startAfter) : 
-    call(DirectoryListing FSNamesystem.getListing(String, byte[]))
+  pointcut callGetListing(FSNamesystem fd, String src,
+                          byte[] startAfter, boolean needLocation) : 
+    call(DirectoryListing FSNamesystem.getListing(String, byte[], boolean))
     && target(fd)
-    && args(src, startAfter);
+    && args(src, startAfter, needLocation);
 
-  after(FSNamesystem fd, String src, byte[] startAfter) 
-    throws IOException, UnresolvedLinkException: callGetListing(fd, src, startAfter) {
+  after(FSNamesystem fd, String src, byte[] startAfter, boolean needLocation) 
+    throws IOException, UnresolvedLinkException: 
+      callGetListing(fd, src, startAfter, needLocation) {
     LOG.info("FI: callGetListing");
     fd.delete(src, true);
   }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java Wed Aug 11 19:55:24 2010
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.Iterator;
 import java.util.Random;
 
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -29,6 +28,7 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -150,7 +150,7 @@ public class TestFileStatus {
         fs.getWorkingDirectory()).toString(), 
         status.getPath().toString());
     
-    Iterator<FileStatus> itor = fc.listStatus(file1);
+    RemoteIterator<FileStatus> itor = fc.listStatus(file1);
     status = itor.next();
     assertEquals(stats[0], status);
     assertFalse(file1 + " should be a file", status.isDirectory());
@@ -206,7 +206,7 @@ public class TestFileStatus {
     assertEquals(dir + " should be zero size using hftp",
         0, hftpfs.getContentSummary(dir).getLength());
     
-    Iterator<FileStatus> itor = fc.listStatus(dir);
+    RemoteIterator<FileStatus> itor = fc.listStatus(dir);
     assertFalse(dir + " should be empty", itor.hasNext());
 
     // create another file that is smaller than a block.

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInDFS.java?rev=984561&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInDFS.java (added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInDFS.java Wed Aug 11 19:55:24 2010
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.TestListFiles;
+import org.apache.log4j.Level;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+/**
+ * This class tests the FileStatus API.
+ */
+public class TestListFilesInDFS extends TestListFiles {
+  {
+    ((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+
+  private static MiniDFSCluster cluster;
+
+  @BeforeClass
+  public static void testSetUp() throws Exception {
+    cluster = new MiniDFSCluster(conf, 1, true, null);
+    fs = cluster.getFileSystem();
+    fs.delete(TEST_DIR, true);
+  }
+  
+  @AfterClass
+  public static void testShutdown() throws Exception {
+    fs.close();
+    cluster.shutdown();
+  }
+  
+  protected static Path getTestDir() {
+    return new Path("/main_");
+  }
+}

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInFileContext.java?rev=984561&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInFileContext.java (added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInFileContext.java Wed Aug 11 19:55:24 2010
@@ -0,0 +1,202 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Random;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.log4j.Level;
+
+import static org.junit.Assert.*;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Test;
+import org.junit.BeforeClass;
+
+/**
+ * This class tests the FileStatus API.
+ */
+public class TestListFilesInFileContext {
+  {
+    ((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+  static final long seed = 0xDEADBEEFL;
+
+  final private static Configuration conf = new Configuration();
+  private static MiniDFSCluster cluster;
+  private static FileContext fc;
+  final private static Path TEST_DIR = new Path("/main_");
+  final private static int FILE_LEN = 10;
+  final private static Path FILE1 = new Path(TEST_DIR, "file1");
+  final private static Path DIR1 = new Path(TEST_DIR, "dir1");
+  final private static Path FILE2 = new Path(DIR1, "file2");
+  final private static Path FILE3 = new Path(DIR1, "file3");
+
+  @BeforeClass
+  public static void testSetUp() throws Exception {
+    cluster = new MiniDFSCluster(conf, 1, true, null);
+    fc = FileContext.getFileContext(cluster.getConfiguration());
+    fc.delete(TEST_DIR, true);
+  }
+  
+  private static void writeFile(FileContext fc, Path name, int fileSize)
+  throws IOException {
+    // Create and write a file that contains three blocks of data
+    FSDataOutputStream stm = fc.create(name, EnumSet.of(CreateFlag.CREATE),
+        Options.CreateOpts.createParent());
+    byte[] buffer = new byte[fileSize];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    stm.close();
+  }
+  
+  @AfterClass
+  public static void testShutdown() throws Exception {
+    cluster.shutdown();
+  }
+  /** Test when input path is a file */
+  @Test
+  public void testFile() throws IOException {
+    fc.mkdir(TEST_DIR, FsPermission.getDefault(), true);
+    writeFile(fc, FILE1, FILE_LEN);
+
+    RemoteIterator<LocatedFileStatus> itor = fc.util().listFiles(
+        FILE1, true);
+    LocatedFileStatus stat = itor.next();
+    assertFalse(itor.hasNext());
+    assertTrue(stat.isFile());
+    assertEquals(FILE_LEN, stat.getLen());
+    assertEquals(fc.makeQualified(FILE1), stat.getPath());
+    assertEquals(1, stat.getBlockLocations().length);
+    
+    itor = fc.util().listFiles(FILE1, false);
+    stat = itor.next();
+    assertFalse(itor.hasNext());
+    assertTrue(stat.isFile());
+    assertEquals(FILE_LEN, stat.getLen());
+    assertEquals(fc.makeQualified(FILE1), stat.getPath());
+    assertEquals(1, stat.getBlockLocations().length);
+  }
+
+  @After
+  public void cleanDir() throws IOException {
+    fc.delete(TEST_DIR, true);
+  }
+
+  /** Test when input path is a directory */
+  @Test
+  public void testDirectory() throws IOException {
+    fc.mkdir(DIR1, FsPermission.getDefault(), true);
+
+    // test empty directory
+    RemoteIterator<LocatedFileStatus> itor = fc.util().listFiles(
+        DIR1, true);
+    assertFalse(itor.hasNext());
+    itor = fc.util().listFiles(DIR1, false);
+    assertFalse(itor.hasNext());
+    
+    // testing directory with 1 file
+    writeFile(fc, FILE2, FILE_LEN);
+    
+    itor = fc.util().listFiles(DIR1, true);
+    LocatedFileStatus stat = itor.next();
+    assertFalse(itor.hasNext());
+    assertTrue(stat.isFile());
+    assertEquals(FILE_LEN, stat.getLen());
+    assertEquals(fc.makeQualified(FILE2), stat.getPath());
+    assertEquals(1, stat.getBlockLocations().length);
+    
+    itor = fc.util().listFiles(DIR1, false);
+    stat = itor.next();
+    assertFalse(itor.hasNext());
+    assertTrue(stat.isFile());
+    assertEquals(FILE_LEN, stat.getLen());
+    assertEquals(fc.makeQualified(FILE2), stat.getPath());
+    assertEquals(1, stat.getBlockLocations().length);
+
+    // test more complicated directory
+    writeFile(fc, FILE1, FILE_LEN);
+    writeFile(fc, FILE3, FILE_LEN);
+
+    itor = fc.util().listFiles(TEST_DIR, true);
+    stat = itor.next();
+    assertTrue(stat.isFile());
+    assertEquals(fc.makeQualified(FILE2), stat.getPath());
+    stat = itor.next();
+    assertTrue(stat.isFile());
+    assertEquals(fc.makeQualified(FILE3), stat.getPath());
+    stat = itor.next();
+    assertTrue(stat.isFile());
+    assertEquals(fc.makeQualified(FILE1), stat.getPath());
+    assertFalse(itor.hasNext());
+    
+    itor = fc.util().listFiles(TEST_DIR, false);
+    stat = itor.next();
+    assertTrue(stat.isFile());
+    assertEquals(fc.makeQualified(FILE1), stat.getPath());
+    assertFalse(itor.hasNext());
+  }
+
+  /** Test when input patch has a symbolic links as its children */
+  @Test
+  public void testSymbolicLinks() throws IOException {
+    writeFile(fc, FILE1, FILE_LEN);
+    writeFile(fc, FILE2, FILE_LEN);
+    writeFile(fc, FILE3, FILE_LEN);
+    
+    Path dir4 = new Path(TEST_DIR, "dir4");
+    Path dir5 = new Path(dir4, "dir5");
+    Path file4 = new Path(dir4, "file4");
+    
+    fc.createSymlink(DIR1, dir5, true);
+    fc.createSymlink(FILE1, file4, true);
+    
+    RemoteIterator<LocatedFileStatus> itor = fc.util().listFiles(dir4, true);
+    LocatedFileStatus stat = itor.next();
+    assertTrue(stat.isFile());
+    assertEquals(fc.makeQualified(FILE2), stat.getPath());
+    stat = itor.next();
+    assertTrue(stat.isFile());
+    assertEquals(fc.makeQualified(FILE3), stat.getPath());
+    stat = itor.next();
+    assertTrue(stat.isFile());
+    assertEquals(fc.makeQualified(FILE1), stat.getPath());
+    assertFalse(itor.hasNext());
+    
+    itor = fc.util().listFiles(dir4, false);
+    stat = itor.next();
+    assertTrue(stat.isFile());
+    assertEquals(fc.makeQualified(FILE1), stat.getPath());
+    assertFalse(itor.hasNext());
+  }
+}
\ No newline at end of file

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java?rev=984561&r1=984560&r2=984561&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java Wed Aug 11 19:55:24 2010
@@ -75,9 +75,9 @@ public class TestNNMetricFilesInGetListi
     createFile("/tmp1/t2", 3200, (short)3);
     createFile("/tmp2/t1", 3200, (short)3);
     createFile("/tmp2/t2", 3200, (short)3);
-    cluster.getNameNode().getListing("/tmp1", HdfsFileStatus.EMPTY_NAME);
+    cluster.getNameNode().getListing("/tmp1", HdfsFileStatus.EMPTY_NAME, false);
     assertEquals(2,nnMetrics.numFilesInGetListingOps.getCurrentIntervalValue());
-    cluster.getNameNode().getListing("/tmp2", HdfsFileStatus.EMPTY_NAME) ;
+    cluster.getNameNode().getListing("/tmp2", HdfsFileStatus.EMPTY_NAME, false) ;
     assertEquals(4,nnMetrics.numFilesInGetListingOps.getCurrentIntervalValue());
   }
 }



Mime
View raw message