hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hair...@apache.org
Subject svn commit: r915089 - in /hadoop/hdfs/trunk: ./ src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ src/java/org/apache/hadoop/fs/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server...
Date Mon, 22 Feb 2010 22:13:54 GMT
Author: hairong
Date: Mon Feb 22 22:13:53 2010
New Revision: 915089

URL: http://svn.apache.org/viewvc?rev=915089&view=rev
Log:
HDFS-946. NameNode should not return full path name when lisitng a diretory or getting the status of a file. Contributed by Hairong Kuang. 

Added:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Mon Feb 22 22:13:53 2010
@@ -72,6 +72,9 @@
 
   OPTIMIZATIONS
 
+    HDFS-946. NameNode should not return full path name when lisitng a
+    diretory or getting the status of a file. (hairong)
+
   BUG FIXES
     HDFS-913. Rename fault injection test TestRename.java to TestFiRename.java
     to include it in tests run by ant target run-test-hdfs-fault-inject.

Modified: hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java (original)
+++ hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java Mon Feb 22 22:13:53 2010
@@ -25,9 +25,8 @@
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.FileDataServlet;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -49,12 +48,12 @@
 
   /** {@inheritDoc} */
   @Override
-  protected URI createUri(FileStatus i, UserGroupInformation ugi,
+  protected URI createUri(String parent, HdfsFileStatus i, UserGroupInformation ugi,
       ClientProtocol nnproxy, HttpServletRequest request) throws IOException,
       URISyntaxException {
     return new URI(request.getScheme(), null, request.getServerName(), request
-        .getServerPort(), "/streamFile", "filename=" + i.getPath() + "&ugi="
-        + ugi.getShortUserName(), null);
+        .getServerPort(), "/streamFile", "filename=" + i.getFullName(parent) 
+        + "&ugi=" + ugi.getShortUserName(), null);
   }
 
   /** {@inheritDoc} */

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java Mon Feb 22 22:13:53 2010
@@ -29,6 +29,7 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.Progressable;
 
@@ -99,15 +100,24 @@
 
   @Override
   protected FileStatus getFileStatus(Path f) throws IOException {
-    FileStatus fi = dfs.getFileInfo(getUriPath(f));
+    HdfsFileStatus fi = dfs.getFileInfo(getUriPath(f));
     if (fi != null) {
-      fi.setPath(fi.getPath().makeQualified(getUri(), null));
-      return fi;
+      return makeQualified(fi, f);
     } else {
       throw new FileNotFoundException("File does not exist: " + f.toString());
     }
   }
 
+  private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
+    return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
+        f.getBlockSize(), f.getModificationTime(),
+        f.getAccessTime(),
+        f.getPermission(), f.getOwner(), f.getGroup(),
+        (f.getFullPath(parent)).makeQualified(
+            getUri(), null)); // fully-qualify path
+  }
+
+
   @Override
   protected FsStatus getFsStatus() throws IOException {
     return dfs.getDiskStatus();
@@ -120,14 +130,15 @@
 
   @Override
   protected FileStatus[] listStatus(Path f) throws IOException {
-    FileStatus[] infos = dfs.listPaths(getUriPath(f));
+    HdfsFileStatus[] infos = dfs.listPaths(getUriPath(f));
     if (infos == null)
       throw new FileNotFoundException("File " + f + " does not exist.");
 
+    FileStatus [] stats = new FileStatus[infos.length]; 
     for (int i = 0; i < infos.length; i++) {
-      infos[i].setPath(infos[i].getPath().makeQualified(getUri(), null));
+      stats[i] = makeQualified(infos[i], f);
     }
-    return infos;
+    return stats;
   }
 
   @Override

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Mon Feb 22 22:13:53 2010
@@ -50,7 +50,6 @@
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsStatus;
@@ -67,6 +66,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
@@ -649,7 +649,7 @@
   OutputStream append(String src, int buffersize, Progressable progress
       ) throws IOException {
     checkOpen();
-    FileStatus stat = null;
+    HdfsFileStatus stat = null;
     LocatedBlock lastBlock = null;
     try {
       stat = getFileInfo(src);
@@ -763,7 +763,7 @@
     return getFileInfo(src) != null;
   }
 
-  public FileStatus[] listPaths(String src) throws IOException {
+  public HdfsFileStatus[] listPaths(String src) throws IOException {
     checkOpen();
     try {
       return namenode.getListing(src);
@@ -772,7 +772,7 @@
     }
   }
 
-  public FileStatus getFileInfo(String src) throws IOException {
+  public HdfsFileStatus getFileInfo(String src) throws IOException {
     checkOpen();
     try {
       return namenode.getFileInfo(src);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Mon Feb 22 22:13:53 2010
@@ -46,6 +46,7 @@
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
@@ -291,7 +292,7 @@
      * @param bytesPerChecksum number of bytes per checksum
      * @throws IOException if error occurs
      */
-    private DataStreamer(LocatedBlock lastBlock, FileStatus stat,
+    private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat,
         int bytesPerChecksum) throws IOException {
       stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
       block = lastBlock.getBlock();
@@ -1072,7 +1073,7 @@
    * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long)
    */
   DFSOutputStream(DFSClient dfsClient, String src, int buffersize, Progressable progress,
-      LocatedBlock lastBlock, FileStatus stat,
+      LocatedBlock lastBlock, HdfsFileStatus stat,
       int bytesPerChecksum) throws IOException {
     this(dfsClient, src, stat.getBlockSize(), progress, bytesPerChecksum);
     initialFileSize = stat.getLen(); // length of file when opened

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java Mon Feb 22 22:13:53 2010
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
+import java.io.UnsupportedEncodingException;
 import java.util.StringTokenizer;
 
 import org.apache.hadoop.conf.Configuration;
@@ -96,5 +97,29 @@
     String user = conf.get(userNameKey, System.getProperty("user.name"));
     UserGroupInformation.loginUserFromKeytab(user, keytabFilename);
   }
+
+  /**
+   * Converts a byte array to a string using UTF8 encoding.
+   */
+  public static String bytes2String(byte[] bytes) {
+    try {
+      return new String(bytes, "UTF8");
+    } catch(UnsupportedEncodingException e) {
+      assert false : "UTF8 encoding is not supported ";
+    }
+    return null;
+  }
+
+  /**
+   * Converts a string to a byte array using UTF8 encoding.
+   */
+  public static byte[] string2Bytes(String str) {
+    try {
+      return str.getBytes("UTF8");
+    } catch(UnsupportedEncodingException e) {
+      assert false : "UTF8 encoding is not supported ";
+    }
+    return null;
+  }
 }
 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Mon Feb 22 22:13:53 2010
@@ -28,6 +28,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
@@ -303,23 +304,24 @@
     dfs.setQuota(getPathName(src), namespaceQuota, diskspaceQuota);
   }
   
-  private FileStatus makeQualified(FileStatus f) {
+  private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
     return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
         f.getBlockSize(), f.getModificationTime(),
         f.getAccessTime(),
         f.getPermission(), f.getOwner(), f.getGroup(),
-        f.getPath().makeQualified(this)); // fully-qualify path
+        (f.getFullPath(parent)).makeQualified(
+            getUri(), getWorkingDirectory())); // fully-qualify path
   }
 
   @Override
   public FileStatus[] listStatus(Path p) throws IOException {
-    FileStatus[] infos = dfs.listPaths(getPathName(p));
+    HdfsFileStatus[] infos = dfs.listPaths(getPathName(p));
     if (infos == null) 
       throw new FileNotFoundException("File " + p + " does not exist.");
     
     FileStatus[] stats = new FileStatus[infos.length];
     for (int i = 0; i < infos.length; i++) {
-      stats[i] = makeQualified(infos[i]);
+      stats[i] = makeQualified(infos[i], p);
     }
     return stats;
   }
@@ -564,9 +566,9 @@
    */
   @Override
   public FileStatus getFileStatus(Path f) throws IOException {
-    FileStatus fi = dfs.getFileInfo(getPathName(f));
+    HdfsFileStatus fi = dfs.getFileInfo(getPathName(f));
     if (fi != null) {
-      return makeQualified(fi);
+      return makeQualified(fi, f);
     } else {
       throw new FileNotFoundException("File does not exist: " + f);
     }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Mon Feb 22 22:13:53 2010
@@ -21,8 +21,6 @@
 import java.io.IOException;
 
 import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Options;
@@ -55,9 +53,11 @@
    * Compared to the previous version the following changes have been introduced:
    * (Only the latest change is reflected.
    * The log of historical changes can be retrieved from the svn).
-   * 55: Adding Delegation Token related APIs
+   * 57: getFileInfo returns HDFSFileStatus;
+   *     getListing returns HDFSFileStatus[].
+   * 
    */
-  public static final long versionID = 55L;
+  public static final long versionID = 57L;
   
   ///////////////////////////////////////
   // File contents
@@ -334,7 +334,7 @@
   /**
    * Get a listing of the indicated directory
    */
-  public FileStatus[] getListing(String src) throws IOException;
+  public HdfsFileStatus[] getListing(String src) throws IOException;
 
   ///////////////////////////////////////
   // System issues and management
@@ -511,7 +511,7 @@
    * @return object containing information regarding the file
    *         or null if file not found
    */
-  public FileStatus getFileInfo(String src) throws IOException;
+  public HdfsFileStatus getFileInfo(String src) throws IOException;
 
   /**
    * Get {@link ContentSummary} rooted at the specified directory.

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java?rev=915089&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java Mon Feb 22 22:13:53 2010
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+
+/** Interface that represents the over the wire information for a file.
+ */
+public class HdfsFileStatus implements Writable {
+
+  private byte[] path;  // local name of the inode that's encoded in java UTF8
+  private long length;
+  private boolean isdir;
+  private short block_replication;
+  private long blocksize;
+  private long modification_time;
+  private long access_time;
+  private FsPermission permission;
+  private String owner;
+  private String group;
+  
+  public static final byte[] EMPTY_NAME = new byte[0];
+
+  /**
+   * default constructor
+   */
+  public HdfsFileStatus() { this(0, false, 0, 0, 0, 0, null, null, null, null); }
+  
+  /**
+   * Constructor
+   * @param length the number of bytes the file has
+   * @param isdir if the path is a directory
+   * @param block_replication the replication factor
+   * @param blocksize the block size
+   * @param modification_time modification time
+   * @param access_time access time
+   * @param permission permission
+   * @param owner the owner of the path
+   * @param group the group of the path
+   * @param path the local name in java UTF8 encoding the same as that in-memory
+   */
+  public HdfsFileStatus(long length, boolean isdir, int block_replication,
+                    long blocksize, long modification_time, long access_time,
+                    FsPermission permission, String owner, String group, 
+                    byte[] path) {
+    this.length = length;
+    this.isdir = isdir;
+    this.block_replication = (short)block_replication;
+    this.blocksize = blocksize;
+    this.modification_time = modification_time;
+    this.access_time = access_time;
+    this.permission = (permission == null) ? 
+                      FsPermission.getDefault() : permission;
+    this.owner = (owner == null) ? "" : owner;
+    this.group = (group == null) ? "" : group;
+    this.path = path;
+  }
+
+  /**
+   * Get the length of this file, in bytes.
+   * @return the length of this file, in bytes.
+   */
+  final public long getLen() {
+    return length;
+  }
+
+  /**
+   * Is this a directory?
+   * @return true if this is a directory
+   */
+  final public boolean isDir() {
+    return isdir;
+  }
+
+  /**
+   * Get the block size of the file.
+   * @return the number of bytes
+   */
+  final public long getBlockSize() {
+    return blocksize;
+  }
+
+  /**
+   * Get the replication factor of a file.
+   * @return the replication factor of a file.
+   */
+  final public short getReplication() {
+    return block_replication;
+  }
+
+  /**
+   * Get the modification time of the file.
+   * @return the modification time of file in milliseconds since January 1, 1970 UTC.
+   */
+  final public long getModificationTime() {
+    return modification_time;
+  }
+
+  /**
+   * Get the access time of the file.
+   * @return the access time of file in milliseconds since January 1, 1970 UTC.
+   */
+  final public long getAccessTime() {
+    return access_time;
+  }
+
+  /**
+   * Get FsPermission associated with the file.
+   * @return permssion
+   */
+  final public FsPermission getPermission() {
+    return permission;
+  }
+  
+  /**
+   * Get the owner of the file.
+   * @return owner of the file
+   */
+  final public String getOwner() {
+    return owner;
+  }
+  
+  /**
+   * Get the group associated with the file.
+   * @return group for the file. 
+   */
+  final public String getGroup() {
+    return group;
+  }
+  
+  /**
+   * Check if the local name is empty
+   * @return true if the name is empty
+   */
+  final public boolean isEmptyLocalName() {
+    return path.length == 0;
+  }
+
+  /**
+   * Get the string representation of the local name
+   * @return the local name in string
+   */
+  final public String getLocalName() {
+    return DFSUtil.bytes2String(path);
+  }
+  
+  /**
+   * Get the string representation of the full path name
+   * @param parent the parent path
+   * @return the full path in string
+   */
+  final public String getFullName(final String parent) {
+    if (isEmptyLocalName()) {
+      return parent;
+    }
+    
+    StringBuilder fullName = new StringBuilder(parent);
+    if (!parent.endsWith(Path.SEPARATOR)) {
+      fullName.append(Path.SEPARATOR);
+    }
+    fullName.append(getLocalName());
+    return fullName.toString();
+  }
+
+  /**
+   * Get the full path
+   * @param parent the parent path
+   * @return the full path
+   */
+  final public Path getFullPath(final Path parent) {
+    if (isEmptyLocalName()) {
+      return parent;
+    }
+    
+    return new Path(parent, getLocalName());
+  }
+
+  //////////////////////////////////////////////////
+  // Writable
+  //////////////////////////////////////////////////
+  public void write(DataOutput out) throws IOException {
+    out.writeInt(path.length);
+    out.write(path);
+    out.writeLong(length);
+    out.writeBoolean(isdir);
+    out.writeShort(block_replication);
+    out.writeLong(blocksize);
+    out.writeLong(modification_time);
+    out.writeLong(access_time);
+    permission.write(out);
+    Text.writeString(out, owner);
+    Text.writeString(out, group);
+  }
+
+  public void readFields(DataInput in) throws IOException {
+    int numOfBytes = in.readInt();
+    if (numOfBytes == 0) {
+      this.path = EMPTY_NAME;
+    } else {
+      this.path = new byte[numOfBytes];
+      in.readFully(path);
+    }
+    this.length = in.readLong();
+    this.isdir = in.readBoolean();
+    this.block_replication = in.readShort();
+    blocksize = in.readLong();
+    modification_time = in.readLong();
+    access_time = in.readLong();
+    permission.readFields(in);
+    owner = Text.readString(in);
+    group = Text.readString(in);
+  }
+}

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Mon Feb 22 22:13:53 2010
@@ -29,11 +29,11 @@
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.jsp.JspWriter;
 
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.BlockAccessToken;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
@@ -59,7 +59,7 @@
     final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(),
         JspHelper.conf);
     String target = dir;
-    final FileStatus targetStatus = dfs.getFileInfo(target);
+    final HdfsFileStatus targetStatus = dfs.getFileInfo(target);
     if (targetStatus == null) { // not exists
       out.print("<h3>File or directory : " + target + " does not exist</h3>");
       JspHelper.printGotoForm(out, namenodeInfoPort, target);
@@ -95,7 +95,7 @@
         return;
       }
       // directory
-      FileStatus[] files = dfs.listPaths(target);
+      HdfsFileStatus[] files = dfs.listPaths(target);
       // generate a table and dump the info
       String[] headings = { "Name", "Type", "Size", "Replication",
           "Block Size", "Modification Time", "Permission", "Owner", "Group" };
@@ -120,8 +120,9 @@
         JspHelper.addTableRow(out, headings, row++);
         String cols[] = new String[headings.length];
         for (int i = 0; i < files.length; i++) {
+          String localFileName = files[i].getLocalName();
           // Get the location of the first block of the file
-          if (files[i].getPath().toString().endsWith(".crc"))
+          if (localFileName.endsWith(".crc"))
             continue;
           if (!files[i].isDir()) {
             cols[1] = "file";
@@ -135,10 +136,10 @@
             cols[4] = "";
           }
           String datanodeUrl = req.getRequestURL() + "?dir="
-              + URLEncoder.encode(files[i].getPath().toString(), "UTF-8")
+              + URLEncoder.encode(files[i].getFullName(target), "UTF-8")
               + "&namenodeInfoPort=" + namenodeInfoPort;
           cols[0] = "<a href=\"" + datanodeUrl + "\">"
-              + files[i].getPath().getName() + "</a>";
+              + localFileName + "</a>";
           cols[5] = FsShell.dateForm.format(new Date((files[i]
               .getModificationTime())));
           cols[6] = files[i].getPermission().toString();

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Mon Feb 22 22:13:53 2010
@@ -28,7 +28,6 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Options;
@@ -39,6 +38,7 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
@@ -1018,7 +1018,7 @@
    * This function is admittedly very inefficient right now.  We'll
    * make it better later.
    */
-  FileStatus[] getListing(String src) {
+  HdfsFileStatus[] getListing(String src) {
     String srcs = normalizePath(src);
 
     synchronized (rootDir) {
@@ -1026,15 +1026,14 @@
       if (targetNode == null)
         return null;
       if (!targetNode.isDirectory()) {
-        return new FileStatus[]{createFileStatus(srcs, targetNode)};
+        return new HdfsFileStatus[]{createFileStatus(
+            HdfsFileStatus.EMPTY_NAME, targetNode)};
       }
       List<INode> contents = ((INodeDirectory)targetNode).getChildren();
-      FileStatus listing[] = new FileStatus[contents.size()];
-      if(! srcs.endsWith(Path.SEPARATOR))
-        srcs += Path.SEPARATOR;
+      HdfsFileStatus listing[] = new HdfsFileStatus[contents.size()];
       int i = 0;
       for (INode cur : contents) {
-        listing[i] = createFileStatus(srcs+cur.getLocalName(), cur);
+        listing[i] = createFileStatus(cur.name, cur);
         i++;
       }
       return listing;
@@ -1046,7 +1045,7 @@
    * @return object containing information regarding the file
    *         or null if file not found
    */
-  FileStatus getFileInfo(String src) {
+  HdfsFileStatus getFileInfo(String src) {
     String srcs = normalizePath(src);
     synchronized (rootDir) {
       INode targetNode = rootDir.getNode(srcs);
@@ -1054,7 +1053,7 @@
         return null;
       }
       else {
-        return createFileStatus(srcs, targetNode);
+        return createFileStatus(HdfsFileStatus.EMPTY_NAME, targetNode);
       }
     }
   }
@@ -1708,9 +1707,10 @@
   /**
    * Create FileStatus by file INode 
    */
-   private static FileStatus createFileStatus(String path, INode node) {
+   private static HdfsFileStatus createFileStatus(byte[] path, INode node) {
     // length is zero for directories
-    return new FileStatus(node.isDirectory() ? 0 : node.computeContentSummary().getLength(), 
+    return new HdfsFileStatus(
+        node instanceof INodeFile ? ((INodeFile)node).computeFileSize(true) : 0, 
         node.isDirectory(), 
         node.isDirectory() ? 0 : ((INodeFile)node).getReplication(), 
         node.isDirectory() ? 0 : ((INodeFile)node).getPreferredBlockSize(),
@@ -1719,6 +1719,6 @@
         node.getFsPermission(),
         node.getUserName(),
         node.getGroupName(),
-        new Path(path));
+        path);
   }
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Mon Feb 22 22:13:53 2010
@@ -26,7 +26,6 @@
 import java.util.ArrayList;
 import java.util.Iterator;
 
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -35,6 +34,7 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
@@ -577,7 +577,7 @@
           String s = FSImage.readString(in);
           String d = FSImage.readString(in);
           timestamp = readLong(in);
-          FileStatus dinfo = fsDir.getFileInfo(d);
+          HdfsFileStatus dinfo = fsDir.getFileInfo(d);
           fsDir.unprotectedRenameTo(s, d, timestamp);
           fsNamesys.changeLease(s, d, dinfo);
           break;
@@ -714,7 +714,7 @@
           String d = FSImage.readString(in);
           timestamp = readLong(in);
           Rename[] options = readRenameOptions(in);
-          FileStatus dinfo = fsDir.getFileInfo(d);
+          HdfsFileStatus dinfo = fsDir.getFileInfo(d);
           fsDir.unprotectedRenameTo(s, d, timestamp, options);
           fsNamesys.changeLease(s, d, dinfo);
           break;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Mon Feb 22 22:13:53 2010
@@ -47,6 +47,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -149,7 +150,7 @@
    * Used for saving the image to disk
    */
   static private final FsPermission FILE_PERM = new FsPermission((short)0);
-  static private final byte[] PATH_SEPARATOR = INode.string2Bytes(Path.SEPARATOR);
+  static private final byte[] PATH_SEPARATOR = DFSUtil.string2Bytes(Path.SEPARATOR);
 
   /**
    */

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Feb 22 22:13:53 2010
@@ -62,7 +62,6 @@
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Options;
@@ -87,7 +86,6 @@
 import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
-import javax.security.auth.login.LoginException;
 
 /***************************************************
  * FSNamesystem does the actual bookkeeping work for the
@@ -120,7 +118,7 @@
 
   private static final void logAuditEvent(UserGroupInformation ugi,
       InetAddress addr, String cmd, String src, String dst,
-      FileStatus stat) {
+      HdfsFileStatus stat) {
     final Formatter fmt = auditFormatter.get();
     ((StringBuilder)fmt.out()).setLength(0);
     auditLog.info(fmt.format(AUDIT_FORMAT, ugi, addr, cmd, src, dst,
@@ -641,7 +639,7 @@
     dir.setPermission(src, permission);
     getEditLog().logSync();
     if (auditLog.isInfoEnabled()) {
-      final FileStatus stat = dir.getFileInfo(src);
+      final HdfsFileStatus stat = dir.getFileInfo(src);
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     "setPermission", src, null, stat);
@@ -669,7 +667,7 @@
     dir.setOwner(src, username, group);
     getEditLog().logSync();
     if (auditLog.isInfoEnabled()) {
-      final FileStatus stat = dir.getFileInfo(src);
+      final HdfsFileStatus stat = dir.getFileInfo(src);
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     "setOwner", src, null, stat);
@@ -728,7 +726,7 @@
                                                        ) throws IOException {
     INodeFile inode = dir.getFileINode(src);
     if (inode == null)
-      throw new FileNotFoundException();
+      throw new FileNotFoundException(src);
     if (doAccessTime && isAccessTimeSupported()) {
       dir.setTimes(src, inode, -1, now(), false);
     }
@@ -906,7 +904,7 @@
    
     
     if (auditLog.isInfoEnabled()) {
-      final FileStatus stat = dir.getFileInfo(target);
+      final HdfsFileStatus stat = dir.getFileInfo(target);
       logAuditEvent(UserGroupInformation.getLoginUser(),
                     Server.getRemoteIp(),
                     "concat", Arrays.toString(srcs), target, stat);
@@ -933,7 +931,7 @@
     if (inode != null) {
       dir.setTimes(src, inode, mtime, atime, true);
       if (auditLog.isInfoEnabled()) {
-        final FileStatus stat = dir.getFileInfo(src);
+        final HdfsFileStatus stat = dir.getFileInfo(src);
         logAuditEvent(UserGroupInformation.getCurrentUser(),
                       Server.getRemoteIp(),
                       "setTimes", src, null, stat);
@@ -1046,7 +1044,7 @@
         createParent, replication, blockSize);
     getEditLog().logSync();
     if (auditLog.isInfoEnabled()) {
-      final FileStatus stat = dir.getFileInfo(src);
+      final HdfsFileStatus stat = dir.getFileInfo(src);
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     "create", src, null, stat);
@@ -1601,7 +1599,7 @@
     boolean status = renameToInternal(src, dst);
     getEditLog().logSync();
     if (status && auditLog.isInfoEnabled()) {
-      final FileStatus stat = dir.getFileInfo(dst);
+      final HdfsFileStatus stat = dir.getFileInfo(dst);
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     "rename", src, dst, stat);
@@ -1629,7 +1627,7 @@
       checkAncestorAccess(actualdst, FsAction.WRITE);
     }
 
-    FileStatus dinfo = dir.getFileInfo(dst);
+    HdfsFileStatus dinfo = dir.getFileInfo(dst);
     if (dir.renameTo(src, dst)) {
       changeLease(src, dst, dinfo);     // update lease with new filename
       return true;
@@ -1648,7 +1646,7 @@
       for (Rename option : options) {
         cmd.append(option.value()).append(" ");
       }
-      final FileStatus stat = dir.getFileInfo(dst);
+      final HdfsFileStatus stat = dir.getFileInfo(dst);
       logAuditEvent(UserGroupInformation.getCurrentUser(), Server.getRemoteIp(),
                     cmd.toString(), src, dst, stat);
     }
@@ -1671,7 +1669,7 @@
       checkAncestorAccess(dst, FsAction.WRITE);
     }
 
-    FileStatus dinfo = dir.getFileInfo(dst);
+    HdfsFileStatus dinfo = dir.getFileInfo(dst);
     dir.renameTo(src, dst, options);
     changeLease(src, dst, dinfo); // update lease with new filename
   }
@@ -1770,7 +1768,7 @@
    * @return object containing information regarding the file
    *         or null if file not found
    */
-  FileStatus getFileInfo(String src) throws IOException {
+  HdfsFileStatus getFileInfo(String src) throws IOException {
     if (!DFSUtil.isValidName(src)) {
       throw new IOException("Invalid file name: " + src);
     }
@@ -1788,7 +1786,7 @@
     boolean status = mkdirsInternal(src, permissions, createParent);
     getEditLog().logSync();
     if (status && auditLog.isInfoEnabled()) {
-      final FileStatus stat = dir.getFileInfo(src);
+      final HdfsFileStatus stat = dir.getFileInfo(src);
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     Server.getRemoteIp(),
                     "mkdirs", src, null, stat);
@@ -2139,7 +2137,7 @@
    * Get a listing of all files at 'src'.  The Object[] array
    * exists so we can return file attributes (soon to be implemented)
    */
-  public FileStatus[] getListing(String src) throws IOException {
+  public HdfsFileStatus[] getListing(String src) throws IOException {
     if (isPermissionEnabled) {
       if (dir.isDir(src)) {
         checkPathAccess(src, FsAction.READ_EXECUTE);
@@ -4186,7 +4184,7 @@
   // rename was successful. If any part of the renamed subtree had
   // files that were being written to, update with new filename.
   //
-  void changeLease(String src, String dst, FileStatus dinfo) 
+  void changeLease(String src, String dst, HdfsFileStatus dinfo) 
                    throws IOException {
     String overwrite;
     String replaceBy;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java Mon Feb 22 22:13:53 2010
@@ -25,10 +25,11 @@
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
-import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -41,11 +42,11 @@
   private static final long serialVersionUID = 1L;
 
   /** Create a redirection URI */
-  protected URI createUri(FileStatus i, UserGroupInformation ugi,
+  protected URI createUri(String parent, HdfsFileStatus i, UserGroupInformation ugi,
       ClientProtocol nnproxy, HttpServletRequest request)
       throws IOException, URISyntaxException {
     String scheme = request.getScheme();
-    final DatanodeID host = pickSrcDatanode(i, nnproxy);
+    final DatanodeID host = pickSrcDatanode(parent, i, nnproxy);
     final String hostname;
     if (host instanceof DatanodeInfo) {
       hostname = ((DatanodeInfo)host).getHostName();
@@ -56,7 +57,7 @@
         "https".equals(scheme)
           ? (Integer)getServletContext().getAttribute("datanode.https.port")
           : host.getInfoPort(),
-            "/streamFile", "filename=" + i.getPath() + 
+            "/streamFile", "filename=" + i.getFullName(parent) + 
             "&ugi=" + ugi.getShortUserName(), null);
   }
 
@@ -64,10 +65,10 @@
    * Currently, this looks at no more than the first five blocks of a file,
    * selecting a datanode randomly from the most represented.
    */
-  private DatanodeID pickSrcDatanode(FileStatus i,
+  private DatanodeID pickSrcDatanode(String parent, HdfsFileStatus i,
       ClientProtocol nnproxy) throws IOException {
     final LocatedBlocks blks = nnproxy.getBlockLocations(
-        i.getPath().toUri().getPath(), 0, 1);
+        i.getFullPath(new Path(parent)).toUri().getPath(), 0, 1);
     if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
       // pick a random datanode
       NameNode nn = (NameNode)getServletContext().getAttribute("name.node");
@@ -98,9 +99,9 @@
 
       final String path = request.getPathInfo() != null ? 
                                                     request.getPathInfo() : "/";
-      FileStatus info = nnproxy.getFileInfo(path);
+      HdfsFileStatus info = nnproxy.getFileInfo(path);
       if ((info != null) && !info.isDir()) {
-        response.sendRedirect(createUri(info, ugi, nnproxy,
+        response.sendRedirect(createUri(path, info, ugi, nnproxy,
               request).toURL().toString());
       } else if (info == null){
         response.sendError(400, "cat: File not found " + path);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INode.java Mon Feb 22 22:13:53 2010
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.io.UnsupportedEncodingException;
 import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.permission.*;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 
 /**
@@ -32,6 +32,13 @@
  * directory inodes.
  */
 abstract class INode implements Comparable<byte[]>, FSInodeInfo {
+  /*
+   *  The inode name is in java UTF8 encoding; 
+   *  The name in HdfsFileStatus should keep the same encoding as this.
+   *  if this encoding is changed, implicitly getFileInfo and listStatus in
+   *  clientProtocol are changed; The decoding at the client
+   *  side should change accordingly.
+   */
   protected byte[] name;
   protected INodeDirectory parent;
   protected long modificationTime;
@@ -219,7 +226,7 @@
    * @return local file name
    */
   String getLocalName() {
-    return bytes2String(name);
+    return DFSUtil.bytes2String(name);
   }
 
   /**
@@ -234,7 +241,7 @@
    * Set local file name
    */
   void setLocalName(String name) {
-    this.name = string2Bytes(name);
+    this.name = DFSUtil.string2Bytes(name);
   }
 
   /**
@@ -328,7 +335,7 @@
     }
     byte[][] bytes = new byte[strings.length][];
     for (int i = 0; i < strings.length; i++)
-      bytes[i] = string2Bytes(strings[i]);
+      bytes[i] = DFSUtil.string2Bytes(strings[i]);
     return bytes;
   }
 
@@ -397,28 +404,4 @@
     }
     return len1 - len2;
   }
-
-  /**
-   * Converts a byte array to a string using UTF8 encoding.
-   */
-  static String bytes2String(byte[] bytes) {
-    try {
-      return new String(bytes, "UTF8");
-    } catch(UnsupportedEncodingException e) {
-      assert false : "UTF8 encoding is not supported ";
-    }
-    return null;
-  }
-
-  /**
-   * Converts a string to a byte array using UTF8 encoding.
-   */
-  static byte[] string2Bytes(String str) {
-    try {
-      return str.getBytes("UTF8");
-    } catch(UnsupportedEncodingException e) {
-      assert false : "UTF8 encoding is not supported ";
-    }
-    return null;
-  }
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Mon Feb 22 22:13:53 2010
@@ -25,6 +25,7 @@
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 
 /**
@@ -95,7 +96,7 @@
   }
   
   INode getChild(String name) {
-    return getChildINode(string2Bytes(name));
+    return getChildINode(DFSUtil.string2Bytes(name));
   }
 
   private INode getChildINode(byte[] name) {
@@ -161,7 +162,7 @@
   int getExistingPathINodes(byte[][] components, INode[] existing) {
     assert compareBytes(this.name, components[0]) == 0 :
       "Incorrect name " + getLocalName() + " expected " + 
-      bytes2String(components[0]);
+      DFSUtil.bytes2String(components[0]);
 
     INode curNode = this;
     int count = 0;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java Mon Feb 22 22:13:53 2010
@@ -18,8 +18,10 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.HftpFileSystem;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.VersionInfo;
 
@@ -59,10 +61,10 @@
    * Node information includes path, modification, permission, owner and group.
    * For files, it also includes size, replication and block-size. 
    */
-  static void writeInfo(FileStatus i, XMLOutputter doc) throws IOException {
+  static void writeInfo(String parent, HdfsFileStatus i, XMLOutputter doc) throws IOException {
     final SimpleDateFormat ldf = df.get();
     doc.startTag(i.isDir() ? "directory" : "file");
-    doc.attribute("path", i.getPath().toUri().getPath());
+    doc.attribute("path", i.getFullPath(new Path(parent)).toUri().getPath());
     doc.attribute("modified", ldf.format(new Date(i.getModificationTime())));
     doc.attribute("accesstime", ldf.format(new Date(i.getAccessTime())));
     if (!i.isDir()) {
@@ -148,9 +150,9 @@
         doc.attribute(m.getKey(), m.getValue());
       }
 
-      FileStatus base = nnproxy.getFileInfo(path);
+      HdfsFileStatus base = nnproxy.getFileInfo(path);
       if ((base != null) && base.isDir()) {
-        writeInfo(base, doc);
+        writeInfo(path, base, doc);
       }
 
       Stack<String> pathstack = new Stack<String>();
@@ -158,20 +160,21 @@
       while (!pathstack.empty()) {
         String p = pathstack.pop();
         try {
-          FileStatus[] listing = nnproxy.getListing(p);
+          HdfsFileStatus[] listing = nnproxy.getListing(p);
           if (listing == null) {
             LOG.warn("ListPathsServlet - Path " + p + " does not exist");
             continue;
           }
-          for (FileStatus i : listing) {
-            if (exclude.matcher(i.getPath().getName()).matches()
-                || !filter.matcher(i.getPath().getName()).matches()) {
+          for (HdfsFileStatus i : listing) {
+            String localName = i.getLocalName();
+            if (exclude.matcher(localName).matches()
+                || !filter.matcher(localName).matches()) {
               continue;
             }
             if (recur && i.isDir()) {
-              pathstack.push(i.getPath().toUri().getPath());
+              pathstack.push(new Path(p, localName).toUri().getPath());
             }
-            writeInfo(i, doc);
+            writeInfo(p, i, doc);
           }
         }
         catch(RemoteException re) {re.writeXml(p, doc);}

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Mon Feb 22 22:13:53 2010
@@ -31,7 +31,6 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Path;
@@ -46,6 +45,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
@@ -828,8 +828,8 @@
 
   /**
    */
-  public FileStatus[] getListing(String src) throws IOException {
-    FileStatus[] files = namesystem.getListing(src);
+  public HdfsFileStatus[] getListing(String src) throws IOException {
+    HdfsFileStatus[] files = namesystem.getListing(src);
     if (files != null) {
       myMetrics.numGetListingOps.inc();
       myMetrics.numFilesInGetListingOps.inc(files.length);
@@ -844,7 +844,7 @@
    * @return object containing information regarding the file
    *         or null if file not found
    */
-  public FileStatus getFileInfo(String src)  throws IOException {
+  public HdfsFileStatus getFileInfo(String src)  throws IOException {
     myMetrics.numFileInfoOps.inc();
     return namesystem.getFileInfo(src);
   }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Mon Feb 22 22:13:53 2010
@@ -32,12 +32,12 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
@@ -141,10 +141,10 @@
     try {
       Result res = new Result(conf);
 
-      final FileStatus[] files = namenode.getListing(path);
+      final HdfsFileStatus[] files = namenode.getListing(path);
       if (files != null) {
         for (int i = 0; i < files.length; i++) {
-          check(files[i], res);
+          check(path, files[i], res);
         }
         out.println(res);
         out.println(" Number of data-nodes:\t\t" + totalDatanodes);
@@ -171,12 +171,12 @@
     }
   }
   
-  private void check(FileStatus file, Result res) throws IOException {
-    String path = file.getPath().toString();
+  private void check(String parent, HdfsFileStatus file, Result res) throws IOException {
+    String path = file.getFullName(parent);
     boolean isOpen = false;
 
     if (file.isDir()) {
-      final FileStatus[] files = namenode.getListing(path);
+      final HdfsFileStatus[] files = namenode.getListing(path);
       if (files == null) {
         return;
       }
@@ -185,7 +185,7 @@
       }
       res.totalDirs++;
       for (int i = 0; i < files.length; i++) {
-        check(files[i], res);
+        check(path, files[i], res);
       }
       return;
     }
@@ -304,7 +304,7 @@
         break;
       case FIXING_MOVE:
         if (!isOpen)
-          lostFoundMove(file, blocks);
+          lostFoundMove(parent, file, blocks);
         break;
       case FIXING_DELETE:
         if (!isOpen)
@@ -323,7 +323,7 @@
     }
   }
   
-  private void lostFoundMove(FileStatus file, LocatedBlocks blocks)
+  private void lostFoundMove(String parent, HdfsFileStatus file, LocatedBlocks blocks)
     throws IOException {
     final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
     try {
@@ -333,8 +333,9 @@
     if (!lfInitedOk) {
       return;
     }
-    String target = lostFound + file.getPath();
-    String errmsg = "Failed to move " + file.getPath() + " to /lost+found";
+    String fullName = file.getFullName(parent);
+    String target = lostFound + fullName;
+    String errmsg = "Failed to move " + fullName + " to /lost+found";
     try {
       if (!namenode.mkdirs(target, file.getPermission(), true)) {
         LOG.warn(errmsg);
@@ -378,8 +379,8 @@
         }
       }
       if (fos != null) fos.close();
-      LOG.warn("\n - moved corrupted file " + file.getPath() + " to /lost+found");
-      dfs.delete(file.getPath().toString(), true);
+      LOG.warn("\n - moved corrupted file " + fullName + " to /lost+found");
+      dfs.delete(fullName, true);
     }  catch (Exception e) {
       e.printStackTrace();
       LOG.warn(errmsg + ": " + e.getMessage());
@@ -500,7 +501,7 @@
     try {
       String lfName = "/lost+found";
       
-      final FileStatus lfStatus = dfs.getFileInfo(lfName);
+      final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName);
       if (lfStatus == null) { // not exists
         lfInitedOk = dfs.mkdirs(lfName, null, true);
         lostFound = lfName;

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java Mon Feb 22 22:13:53 2010
@@ -224,7 +224,7 @@
 
     public boolean mkdirs(String src, FsPermission masked, boolean createParent) throws IOException { return false; }
 
-    public FileStatus[] getListing(String src) throws IOException { return null; }
+    public HdfsFileStatus[] getListing(String src) throws IOException { return null; }
 
     public void renewLease(String clientName) throws IOException {}
 
@@ -248,7 +248,7 @@
 
     public void metaSave(String filename) throws IOException {}
 
-    public FileStatus getFileInfo(String src) throws IOException { return null; }
+    public HdfsFileStatus getFileInfo(String src) throws IOException { return null; }
 
     public ContentSummary getContentSummary(String path) throws IOException { return null; }
 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Mon Feb 22 22:13:53 2010
@@ -28,9 +28,9 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSInputStream;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -130,11 +130,11 @@
   private void verifyDir(DFSClient client, String dir) 
                                            throws IOException {
     
-    FileStatus[] fileArr = client.listPaths(dir);
+    HdfsFileStatus[] fileArr = client.listPaths(dir);
     TreeMap<String, Boolean> fileMap = new TreeMap<String, Boolean>();
     
-    for(FileStatus file : fileArr) {
-      String path = file.getPath().toString();
+    for(HdfsFileStatus file : fileArr) {
+      String path = file.getFullName(dir);
       fileMap.put(path, Boolean.valueOf(file.isDir()));
     }
     

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java Mon Feb 22 22:13:53 2010
@@ -28,6 +28,7 @@
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.RemoteException;
@@ -88,7 +89,7 @@
                  fs.getFileStatus(path).isDir() == true);
       
       // make sure getFileInfo returns null for files which do not exist
-      FileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
+      HdfsFileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
       assertTrue(fileInfo == null);
 
       // make sure getFileInfo throws the appropriate exception for non-relative
@@ -107,43 +108,74 @@
       System.out.println("Created file filestatus.dat with one "
                          + " replicas.");
       checkFile(fs, file1, 1);
-      assertTrue(file1 + " should be a file", 
-                  fs.getFileStatus(file1).isDir() == false);
-      assertTrue(fs.getFileStatus(file1).getBlockSize() == blockSize);
-      assertTrue(fs.getFileStatus(file1).getReplication() == 1);
-      assertTrue(fs.getFileStatus(file1).getLen() == fileSize);
       System.out.println("Path : \"" + file1 + "\"");
+      
+      // test getFileStatus on a file
+      FileStatus status = fs.getFileStatus(file1);
+      assertTrue(file1 + " should be a file", 
+                  status.isDir() == false);
+      assertTrue(status.getBlockSize() == blockSize);
+      assertTrue(status.getReplication() == 1);
+      assertTrue(status.getLen() == fileSize);
+      assertEquals(file1.makeQualified(fs.getUri(), 
+          fs.getWorkingDirectory()).toString(), 
+          status.getPath().toString());
+      
+      // test listStatus on a file
+      FileStatus[] stats = fs.listStatus(file1);
+      assertEquals(1, stats.length);
+      status = stats[0];
+      assertTrue(file1 + " should be a file", 
+          status.isDir() == false);
+      assertTrue(status.getBlockSize() == blockSize);
+      assertTrue(status.getReplication() == 1);
+      assertTrue(status.getLen() == fileSize);
+      assertEquals(file1.makeQualified(fs.getUri(), 
+          fs.getWorkingDirectory()).toString(), 
+          status.getPath().toString());
 
       // create an empty directory
       //
       Path dir = new Path("/test/mkdirs");
       assertTrue(fs.mkdirs(dir));
       assertTrue(fs.exists(dir));
-      assertTrue(dir + " should be a directory", 
-                 fs.getFileStatus(path).isDir() == true);
+      System.out.println("Dir : \"" + dir + "\"");
+      
+      // test getFileStatus on an empty directory
+      status = fs.getFileStatus(dir);
+      assertTrue(dir + " should be a directory", status.isDir());
+      assertTrue(dir + " should be zero size ", status.getLen() == 0);
+      assertEquals(dir.makeQualified(fs.getUri(), 
+          fs.getWorkingDirectory()).toString(), 
+          status.getPath().toString());
+      
+      // test listStatus on an empty directory
+      stats = fs.listStatus(dir);
+      assertEquals(dir + " should be empty", 0, stats.length);
       assertEquals(dir + " should be zero size ",
           0, fs.getContentSummary(dir).getLength());
       assertEquals(dir + " should be zero size using hftp",
           0, hftpfs.getContentSummary(dir).getLength());
-      assertTrue(dir + " should be zero size ",
-                 fs.getFileStatus(dir).getLen() == 0);
-      System.out.println("Dir : \"" + dir + "\"");
 
       // create another file that is smaller than a block.
       //
-      Path file2 = new Path("/test/mkdirs/filestatus2.dat");
+      Path file2 = new Path(dir, "filestatus2.dat");
       writeFile(fs, file2, 1, blockSize/4, blockSize);
       System.out.println("Created file filestatus2.dat with one "
                          + " replicas.");
       checkFile(fs, file2, 1);
       System.out.println("Path : \"" + file2 + "\"");
-
+      
       // verify file attributes
-      assertTrue(fs.getFileStatus(file2).getBlockSize() == blockSize);
-      assertTrue(fs.getFileStatus(file2).getReplication() == 1);
+      status = fs.getFileStatus(file2);
+      assertTrue(status.getBlockSize() == blockSize);
+      assertTrue(status.getReplication() == 1);
+      assertEquals(file2.makeQualified(
+          fs.getUri(), fs.getWorkingDirectory()).toString(), 
+          status.getPath().toString());
 
       // create another file in the same directory
-      Path file3 = new Path("/test/mkdirs/filestatus3.dat");
+      Path file3 = new Path(dir, "filestatus3.dat");
       writeFile(fs, file3, 1, blockSize/4, blockSize);
       System.out.println("Created file filestatus3.dat with one "
                          + " replicas.");
@@ -156,6 +188,19 @@
           expected, fs.getContentSummary(dir).getLength());
       assertEquals(dir + " size should be " + expected + " using hftp", 
           expected, hftpfs.getContentSummary(dir).getLength());
+      
+      // test listStatus on a non-empty directory
+      stats = fs.listStatus(dir);
+      assertEquals(dir + " should have two entries", 2, stats.length);
+      String qualifiedFile2 = file2.makeQualified(fs.getUri(), 
+          fs.getWorkingDirectory()).toString();
+      String qualifiedFile3 = file3.makeQualified(fs.getUri(), 
+          fs.getWorkingDirectory()).toString();
+      for(FileStatus stat:stats) {
+        String statusFullName = stat.getPath().toString();
+        assertTrue(qualifiedFile2.equals(statusFullName)
+          || qualifiedFile3.toString().equals(statusFullName));
+      }
     } finally {
       fs.close();
       cluster.shutdown();

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java?rev=915089&r1=915088&r2=915089&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java Mon Feb 22 22:13:53 2010
@@ -32,12 +32,11 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -105,7 +104,7 @@
   public void testConcat() throws IOException, InterruptedException {
     final int numFiles = 10;
     long fileLen = blockSize*3;
-    FileStatus fStatus;
+    HdfsFileStatus fStatus;
     FSDataInputStream stm;
     
     String trg = new String("/trg");
@@ -252,7 +251,7 @@
     Path filePath1 = new Path(name1);
     DFSTestUtil.createFile(dfs, filePath1, trgFileLen, REPL_FACTOR, 1);
     
-    FileStatus fStatus = cluster.getNameNode().getFileInfo(name1);
+    HdfsFileStatus fStatus = cluster.getNameNode().getFileInfo(name1);
     long fileLen = fStatus.getLen();
     assertEquals(fileLen, trgFileLen);
     



Mime
View raw message