hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hair...@apache.org
Subject svn commit: r939091 - in /hadoop/hdfs/trunk: CHANGES.txt src/java/org/apache/hadoop/fs/Hdfs.java src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
Date Wed, 28 Apr 2010 21:06:56 GMT
Author: hairong
Date: Wed Apr 28 21:06:56 2010
New Revision: 939091

URL: http://svn.apache.org/viewvc?rev=939091&view=rev
Log:
HDFS-1091. Implement listStatus that returns an iterator of FileStatus. Contributed by Hairong
Kuang.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=939091&r1=939090&r2=939091&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed Apr 28 21:06:56 2010
@@ -33,6 +33,9 @@ Trunk (unreleased changes)
     HDFS-1009. Support Kerberos authorization in HDFSProxy.  (Srikanth
     Sundarrajan via szetszwo)
 
+    HDFS-1091. Implement listStatus that returns an iterator of FileStatus.
+    (hairong)
+
   IMPROVEMENTS
     HDFS-968. Use StringBuilder instead of StringBuffer for better
     performance. (Kay Kay via suresh)

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java?rev=939091&r1=939090&r2=939091&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java Wed Apr 28 21:06:56 2010
@@ -25,6 +25,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.EnumSet;
+import java.util.Iterator;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
 
 public class Hdfs extends AbstractFileSystem {
@@ -147,6 +149,61 @@ public class Hdfs extends AbstractFileSy
   }
 
   @Override
+  protected Iterator<FileStatus> listStatusIterator(final Path f)
+    throws AccessControlException, FileNotFoundException,
+    UnresolvedLinkException, IOException {
+    return new Iterator<FileStatus>() {
+      private DirectoryListing thisListing;
+      private int i;
+      private String src;
+
+
+      { // initializer
+        src = getUriPath(f);
+        // fetch the first batch of entries in the directory
+        thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME);
+        if (thisListing == null) { // the directory does not exist
+          throw new FileNotFoundException("File " + f + " does not exist.");
+        }
+      }
+
+      @Override
+      public boolean hasNext() {
+        if (thisListing == null) {
+          return false;
+        }
+        try {
+          if (i>=thisListing.getPartialListing().length && thisListing.hasMore())
{ 
+            // current listing is exhausted & fetch a new listing
+            thisListing = dfs.listPaths(src, thisListing.getLastName());
+            if (thisListing == null) {
+              return false; // the directory is deleted
+            }
+            i = 0;
+          }
+          return (i<thisListing.getPartialListing().length);
+        } catch (IOException ioe) {
+          return false;
+        }
+      }
+
+      @Override
+      public FileStatus next() {
+        if (hasNext()) {
+          return makeQualified(thisListing.getPartialListing()[i++], f);
+        } 
+        throw new java.util.NoSuchElementException("No more entry in " + f);
+      }
+
+      @Override
+      public void remove() {
+        throw new UnsupportedOperationException("Remove is not supported");
+
+      }
+    };
+  }
+  
+  @Override
   protected FileStatus[] listStatus(Path f) 
       throws IOException, UnresolvedLinkException {
     String src = getUriPath(f);
@@ -184,7 +241,8 @@ public class Hdfs extends AbstractFileSy
       thisListing = dfs.listPaths(src, thisListing.getLastName());
  
       if (thisListing == null) {
-        break; // the directory is deleted
+        // the directory is deleted
+        throw new FileNotFoundException("File " + f + " does not exist.");
       }
  
       partialListing = thisListing.getPartialListing();

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java?rev=939091&r1=939090&r2=939091&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java Wed Apr 28
21:06:56 2010
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.Iterator;
 import java.util.Random;
 
 import junit.framework.TestCase;
@@ -26,6 +27,7 @@ import junit.framework.TestCase;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -78,6 +80,7 @@ public class TestFileStatus extends Test
     FileSystem fs = cluster.getFileSystem();
     final HftpFileSystem hftpfs = cluster.getHftpFileSystem();
     final DFSClient dfsClient = new DFSClient(NameNode.getAddress(conf), conf);
+    FileContext fc = FileContext.getFileContext(cluster.getURI(), conf);
     try {
 
       //
@@ -133,6 +136,12 @@ public class TestFileStatus extends Test
       assertEquals(file1.makeQualified(fs.getUri(), 
           fs.getWorkingDirectory()).toString(), 
           status.getPath().toString());
+      
+      Iterator<FileStatus> itor = fc.listStatus(file1);
+      status = itor.next();
+      assertEquals(stats[0], status);
+      assertTrue(file1 + " should be a file", 
+          status.isDir() == false);
 
       // test file status on a directory
       Path dir = new Path("/test/mkdirs");
@@ -144,6 +153,13 @@ public class TestFileStatus extends Test
       } catch (FileNotFoundException fe) {
         assertTrue(fe.getMessage().equals("File " + dir + " does not exist."));
       }
+      
+      try {
+        itor = fc.listStatus(dir);
+        fail("listStatus of non-existent path should fail");
+      } catch (FileNotFoundException fe) {
+        assertTrue(fe.getMessage().equals("File " + dir + " does not exist."));
+      }
       try {
         status = fs.getFileStatus(dir);
         fail("getFileStatus of non-existent path should fail");
@@ -171,6 +187,9 @@ public class TestFileStatus extends Test
           0, fs.getContentSummary(dir).getLength());
       assertEquals(dir + " should be zero size using hftp",
           0, hftpfs.getContentSummary(dir).getLength());
+      
+      itor = fc.listStatus(dir);
+      assertFalse(dir + " should be empty", itor.hasNext());
 
       // create another file that is smaller than a block.
       //
@@ -207,8 +226,13 @@ public class TestFileStatus extends Test
       // test listStatus on a non-empty directory
       stats = fs.listStatus(dir);
       assertEquals(dir + " should have two entries", 2, stats.length);
-       assertEquals(file2.toString(), stats[0].getPath().toString());
-       assertEquals(file3.toString(), stats[1].getPath().toString());
+      assertEquals(file2.toString(), stats[0].getPath().toString());
+      assertEquals(file3.toString(), stats[1].getPath().toString());
+
+      itor = fc.listStatus(dir);
+      assertEquals(file2.toString(), itor.next().getPath().toString());
+      assertEquals(file3.toString(), itor.next().getPath().toString());
+      assertFalse(itor.hasNext());
 
       // test iterative listing
       // now dir has 2 entries, create one more
@@ -221,6 +245,12 @@ public class TestFileStatus extends Test
       assertEquals(file2.toString(), stats[1].getPath().toString());
       assertEquals(file3.toString(), stats[2].getPath().toString());
 
+      itor = fc.listStatus(dir);
+      assertEquals(dir3.toString(), itor.next().getPath().toString());
+      assertEquals(file2.toString(), itor.next().getPath().toString());
+      assertEquals(file3.toString(), itor.next().getPath().toString());
+      assertFalse(itor.hasNext());
+
       // now dir has 3 entries, create two more
       Path dir4 = fs.makeQualified(new Path(dir, "dir4"));
       fs.mkdirs(dir4);
@@ -235,6 +265,14 @@ public class TestFileStatus extends Test
       assertEquals(dir5.toString(), stats[2].getPath().toString());
       assertEquals(file2.toString(), stats[3].getPath().toString());
       assertEquals(file3.toString(), stats[4].getPath().toString());
+      
+      itor = fc.listStatus(dir);
+      assertEquals(dir3.toString(), itor.next().getPath().toString());
+      assertEquals(dir4.toString(), itor.next().getPath().toString());
+      assertEquals(dir5.toString(), itor.next().getPath().toString());
+      assertEquals(file2.toString(), itor.next().getPath().toString());
+      assertEquals(file3.toString(), itor.next().getPath().toString());
+      assertFalse(itor.hasNext());      
     } finally {
       fs.close();
       cluster.shutdown();



Mime
View raw message