hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject svn commit: r1590766 [3/4] - in /hadoop/common/branches/HDFS-2006/hadoop-hdfs-project: hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/ hadoop-hdfs/src/main/bin/...
Date Mon, 28 Apr 2014 19:40:15 GMT
Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java Mon Apr 28 19:40:06 2014
@@ -206,7 +206,7 @@ public class INodeDirectorySnapshottable
     return i < 0? null: snapshotsByNames.get(i);
   }
   
-  Snapshot getSnapshotById(int sid) {
+  public Snapshot getSnapshotById(int sid) {
     for (Snapshot s : snapshotsByNames) {
       if (s.getId() == sid) {
         return s;

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Mon Apr 28 19:40:06 2014
@@ -162,8 +162,16 @@ public class NamenodeWebHdfsMethods {
 
     //clear content type
     response.setContentType(null);
+    
+    // set the remote address, if coming in via a trust proxy server then
+    // the address with be that of the proxied client
+    REMOTE_ADDRESS.set(JspHelper.getRemoteAddr(request));
   }
 
+  private void reset() {
+    REMOTE_ADDRESS.set(null);
+  }
+  
   private static NamenodeProtocols getRPCServer(NameNode namenode)
       throws IOException {
      final NamenodeProtocols np = namenode.getRpcServer();
@@ -394,7 +402,6 @@ public class NamenodeWebHdfsMethods {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException, URISyntaxException {
-        REMOTE_ADDRESS.set(request.getRemoteAddr());
         try {
           return put(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, destination, owner, group,
@@ -402,7 +409,7 @@ public class NamenodeWebHdfsMethods {
               modificationTime, accessTime, renameOptions, createParent,
               delegationTokenArgument,aclPermission);
         } finally {
-          REMOTE_ADDRESS.set(null);
+          reset();
         }
       }
     });
@@ -583,12 +590,11 @@ public class NamenodeWebHdfsMethods {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException, URISyntaxException {
-        REMOTE_ADDRESS.set(request.getRemoteAddr());
         try {
           return post(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, concatSrcs, bufferSize);
         } finally {
-          REMOTE_ADDRESS.set(null);
+          reset();
         }
       }
     });
@@ -681,12 +687,11 @@ public class NamenodeWebHdfsMethods {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException, URISyntaxException {
-        REMOTE_ADDRESS.set(request.getRemoteAddr());
         try {
           return get(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, offset, length, renewer, bufferSize);
         } finally {
-          REMOTE_ADDRESS.set(null);
+          reset();
         }
       }
     });
@@ -889,12 +894,11 @@ public class NamenodeWebHdfsMethods {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException {
-        REMOTE_ADDRESS.set(request.getRemoteAddr());
         try {
           return delete(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, recursive);
         } finally {
-          REMOTE_ADDRESS.set(null);
+          reset();
         }
       }
     });

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockIdCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockIdCommand.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockIdCommand.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockIdCommand.java Mon Apr 28 19:40:06 2014
@@ -32,7 +32,6 @@ public class BlockIdCommand extends Data
 
   /**
    * Create BlockCommand for the given action
-   * @param blocks blocks related to the action
    */
   public BlockIdCommand(int action, String poolId, long[] blockIds) {
     super(action);

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Mon Apr 28 19:40:06 2014
@@ -119,9 +119,9 @@ public interface DatanodeProtocol {
    * and should be deleted.  This function is meant to upload *all*
    * the locally-stored blocks.  It's invoked upon startup and then
    * infrequently afterwards.
-   * @param registration
-   * @param poolId - the block pool ID for the blocks
-   * @param reports - report of blocks per storage
+   * @param registration datanode registration
+   * @param poolId the block pool ID for the blocks
+   * @param reports report of blocks per storage
    *     Each finalized block is represented as 3 longs. Each under-
    *     construction replica is represented as 4 longs.
    *     This is done instead of Block[] to reduce memory used by block reports.

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java Mon Apr 28 19:40:06 2014
@@ -48,8 +48,6 @@ public class DatanodeStorage {
 
   /**
    * Create a storage with {@link State#NORMAL} and {@link StorageType#DEFAULT}.
-   *
-   * @param storageID
    */
   public DatanodeStorage(String storageID) {
     this(storageID, State.NORMAL, StorageType.DEFAULT);

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java Mon Apr 28 19:40:06 2014
@@ -39,7 +39,7 @@ public abstract class ServerCommand {
    * 
    * @see DatanodeProtocol
    * @see NamenodeProtocol
-   * @param action
+   * @param action protocol specific action
    */
   public ServerCommand(int action) {
     this.action = action;

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java Mon Apr 28 19:40:06 2014
@@ -130,9 +130,6 @@ public class DFSck extends Configured im
     out.println(USAGE + "\n");
     ToolRunner.printGenericCommandUsage(out);
   }
-  /**
-   * @param args
-   */
   @Override
   public int run(final String[] args) throws IOException {
     if (args.length == 0) {

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java Mon Apr 28 19:40:06 2014
@@ -167,8 +167,7 @@ public class GetConf extends Configured 
     }
 
     
-    /** Method to be overridden by sub classes for specific behavior 
-     * @param args */
+    /** Method to be overridden by sub classes for specific behavior */
     int doWorkInternal(GetConf tool, String[] args) throws Exception {
 
       String value = tool.getConf().getTrimmed(key);

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/HDFSConcat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/HDFSConcat.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/HDFSConcat.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/HDFSConcat.java Mon Apr 28 19:40:06 2014
@@ -30,9 +30,7 @@ import org.apache.hadoop.hdfs.Distribute
 @InterfaceAudience.Private
 public class HDFSConcat {
   private final static String def_uri = "hdfs://localhost:9000";
-  /**
-   * @param args
-   */
+
   public static void main(String... args) throws IOException {
 
     if(args.length < 2) {

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java Mon Apr 28 19:40:06 2014
@@ -217,7 +217,7 @@ public class JMXGet {
   }
 
   /**
-   * @param msg
+   * @param msg error message
    */
   private static void err(String msg) {
     System.err.println(msg);
@@ -274,13 +274,7 @@ public class JMXGet {
     return commandLine;
   }
 
-  /**
-   * main
-   * 
-   * @param args
-   */
   public static void main(String[] args) {
-
     int res = -1;
 
     // parse arguments

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java Mon Apr 28 19:40:06 2014
@@ -37,8 +37,7 @@ public class BinaryEditsVisitor implemen
 
   /**
    * Create a processor that writes to a given file
-   *
-   * @param filename Name of file to write output to
+   * @param outputName Name of file to write output to
    */
   public BinaryEditsVisitor(String outputName) throws IOException {
     this.elfos = new EditLogFileOutputStream(new Configuration(),

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java Mon Apr 28 19:40:06 2014
@@ -104,6 +104,8 @@ public class FSImageHandler extends Simp
         content = loader.getFileStatus(path);
       } else if (op.equals("LISTSTATUS")) {
         content = loader.listStatus(path);
+      } else if (op.equals("GETACLSTATUS")) {
+        content = loader.getAclStatus(path);
       } else {
         response.setStatus(HttpResponseStatus.BAD_REQUEST);
       }

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java Mon Apr 28 19:40:06 2014
@@ -31,6 +31,7 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
@@ -271,6 +272,81 @@ class FSImageLoader {
   }
 
   /**
+   * Return the JSON formatted ACL status of the specified file.
+   * @param path a path specifies a file
+   * @return JSON formatted AclStatus
+   * @throws IOException if failed to serialize fileStatus to JSON.
+   */
+  String getAclStatus(String path) throws IOException {
+    StringBuilder sb = new StringBuilder();
+    List<AclEntry> aclEntryList = getAclEntryList(path);
+    PermissionStatus p = getPermissionStatus(path);
+    sb.append("{\"AclStatus\":{\"entries\":[");
+    int i = 0;
+    for (AclEntry aclEntry : aclEntryList) {
+      if (i++ != 0) {
+        sb.append(',');
+      }
+      sb.append('"');
+      sb.append(aclEntry.toString());
+      sb.append('"');
+    }
+    sb.append("],\"group\": \"");
+    sb.append(p.getGroupName());
+    sb.append("\",\"owner\": \"");
+    sb.append(p.getUserName());
+    sb.append("\",\"stickyBit\": ");
+    sb.append(p.getPermission().getStickyBit());
+    sb.append("}}\n");
+    return sb.toString();
+  }
+
+  private List<AclEntry> getAclEntryList(String path) {
+    long id = getINodeId(path);
+    FsImageProto.INodeSection.INode inode = inodes.get(id);
+    switch (inode.getType()) {
+      case FILE: {
+        FsImageProto.INodeSection.INodeFile f = inode.getFile();
+        return FSImageFormatPBINode.Loader.loadAclEntries(
+            f.getAcl(), stringTable);
+      }
+      case DIRECTORY: {
+        FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory();
+        return FSImageFormatPBINode.Loader.loadAclEntries(
+            d.getAcl(), stringTable);
+      }
+      default: {
+        return new ArrayList<AclEntry>();
+      }
+    }
+  }
+
+  private PermissionStatus getPermissionStatus(String path) {
+    long id = getINodeId(path);
+    FsImageProto.INodeSection.INode inode = inodes.get(id);
+    switch (inode.getType()) {
+      case FILE: {
+        FsImageProto.INodeSection.INodeFile f = inode.getFile();
+        return FSImageFormatPBINode.Loader.loadPermission(
+            f.getPermission(), stringTable);
+      }
+      case DIRECTORY: {
+        FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory();
+        return FSImageFormatPBINode.Loader.loadPermission(
+            d.getPermission(), stringTable);
+      }
+      case SYMLINK: {
+        FsImageProto.INodeSection.INodeSymlink s = inode.getSymlink();
+        return FSImageFormatPBINode.Loader.loadPermission(
+            s.getPermission(), stringTable);
+      }
+      default: {
+        return null;
+      }
+    }
+  }
+
+  /**
    * Return the INodeId of the specified path.
    */
   private long getINodeId(String strPath) {

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java Mon Apr 28 19:40:06 2014
@@ -63,8 +63,6 @@ public class DataTransferThrottler {
   /**
    * Sets throttle bandwidth. This takes affect latest by the end of current
    * period.
-   * 
-   * @param bytesPerSecond 
    */
   public synchronized void setBandwidth(long bytesPerSecond) {
     if ( bytesPerSecond <= 0 ) {

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Mon Apr 28 19:40:06 2014
@@ -490,8 +490,25 @@ public class WebHdfsFileSystem extends F
 
     private void connect(boolean doOutput) throws IOException {
       conn.setRequestMethod(op.getType().toString());
-      conn.setDoOutput(doOutput);
       conn.setInstanceFollowRedirects(false);
+      switch (op.getType()) {
+        // if not sending a message body for a POST or PUT operation, need
+        // to ensure the server/proxy knows this 
+        case POST:
+        case PUT: {
+          conn.setDoOutput(true);
+          if (!doOutput) {
+            // explicitly setting content-length to 0 won't do spnego!!
+            // opening and closing the stream will send "Content-Length: 0"
+            conn.getOutputStream().close();
+          }
+          break;
+        }
+        default: {
+          conn.setDoOutput(doOutput);
+          break;
+        }
+      }
       conn.connect();
     }
 

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java Mon Apr 28 19:40:06 2014
@@ -60,10 +60,7 @@ public class AclPermissionParam extends 
   }
 
   /**
-   * Parse the list of AclEntry and returns aclspec.
-   * 
-   * @param List <AclEntry>
-   * @return String
+   * @return parse {@code aclEntry} and return aclspec
    */
   private static String parseAclSpec(List<AclEntry> aclEntry) {
     return StringUtils.join(aclEntry, ",");

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java Mon Apr 28 19:40:06 2014
@@ -71,8 +71,8 @@ public abstract class HttpOpParam<E exte
         GetOpParam.Op.GETFILECHECKSUM);
     
     static final List<TemporaryRedirectOp> values
-        = Collections.unmodifiableList(Arrays.asList(
-            new TemporaryRedirectOp[]{CREATE, APPEND, OPEN, GETFILECHECKSUM}));
+        = Collections.unmodifiableList(Arrays.asList(CREATE, APPEND, OPEN,
+                                       GETFILECHECKSUM));
 
     /** Get an object for the given op. */
     public static TemporaryRedirectOp valueOf(final Op op) {

Propchange: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1588992-1590763

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Mon Apr 28 19:40:06 2014
@@ -55,11 +55,11 @@
   <name>dfs.namenode.rpc-bind-host</name>
   <value></value>
   <description>
-    The actual address the server will bind to. If this optional address is
-    set, the RPC server will bind to this address and the port specified in
-    dfs.namenode.rpc-address for the RPC server. It can also be specified
-    per name node or name service for HA/Federation. This is most useful for
-    making name node listen to all interfaces by setting to 0.0.0.0.
+    The actual address the RPC server will bind to. If this optional address is
+    set, it overrides only the hostname portion of dfs.namenode.rpc-address.
+    It can also be specified per name node or name service for HA/Federation.
+    This is useful for making the name node listen on all interfaces by
+    setting it to 0.0.0.0.
   </description>
 </property>
 
@@ -80,11 +80,11 @@
   <name>dfs.namenode.servicerpc-bind-host</name>
   <value></value>
   <description>
-    The actual address the server will bind to. If this optional address is
-    set, the service RPC server will bind to this address and the port 
-    specified in dfs.namenode.servicerpc-address. It can also be specified
-    per name node or name service for HA/Federation. This is most useful for
-    making name node listen to all interfaces by setting to 0.0.0.0.
+    The actual address the service RPC server will bind to. If this optional address is
+    set, it overrides only the hostname portion of dfs.namenode.servicerpc-address.
+    It can also be specified per name node or name service for HA/Federation.
+    This is useful for making the name node listen on all interfaces by
+    setting it to 0.0.0.0.
   </description>
 </property>
 
@@ -143,6 +143,18 @@
 </property>
 
 <property>
+  <name>dfs.namenode.http-bind-host</name>
+  <value></value>
+  <description>
+    The actual adress the HTTP server will bind to. If this optional address
+    is set, it overrides only the hostname portion of dfs.namenode.http-address.
+    It can also be specified per name node or name service for HA/Federation.
+    This is useful for making the name node HTTP server listen on all
+    interfaces by setting it to 0.0.0.0.
+  </description>
+</property>
+
+<property>
   <name>dfs.https.enable</name>
   <value>false</value>
   <description>
@@ -207,6 +219,18 @@
   <description>The namenode secure http server address and port.</description>
 </property>
 
+<property>
+  <name>dfs.namenode.https-bind-host</name>
+  <value></value>
+  <description>
+    The actual adress the HTTPS server will bind to. If this optional address
+    is set, it overrides only the hostname portion of dfs.namenode.https-address.
+    It can also be specified per name node or name service for HA/Federation.
+    This is useful for making the name node HTTPS server listen on all
+    interfaces by setting it to 0.0.0.0.
+  </description>
+</property>
+
  <property>
   <name>dfs.datanode.dns.interface</name>
   <value>default</value>

Propchange: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1588992-1590763

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html Mon Apr 28 19:40:06 2014
@@ -1,3 +1,5 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+    "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -14,22 +16,46 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<meta HTTP-EQUIV="REFRESH" content="0;url=dataNodeHome.jsp"/>
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
 <head>
-<title>Hadoop Administration</title>
+<link rel="stylesheet" type="text/css" href="/static/bootstrap-3.0.2/css/bootstrap.min.css" />
+<link rel="stylesheet" type="text/css" href="/static/hadoop.css" />
+<title>DataNode Information</title>
 </head>
-
 <body>
 
-<h1>Hadoop Administration</h1>
-
-<ul>
-
-<li><a href="dataNodeHome.jsp">DataNode Home</a></li>
-
-</ul>
-
+<header class="navbar navbar-inverse bs-docs-nav" role="banner">
+<div class="container">
+  <div class="navbar-header">
+    <div class="navbar-brand">Hadoop</div>
+  </div>
+
+  <ul class="nav navbar-nav" id="ui-tabs">
+    <li><a>Overview</a></li>
+  </ul>
+</div>
+</header>
+
+<div class="container">
+
+<div class="tab-content">
+  <div class="tab-pane" id="tab-overview">
+    <div class="page-header"><h1>DataNode on <small><div id="authority" style="display: inline-block"></div></small></h1></div>
+  </div>
+</div>
+
+<div class="row">
+  <hr />
+  <div class="col-xs-2"><p>Hadoop, 2014.</p></div>
+</div>
+</div>
+
+<script type="text/javascript" src="/static/jquery-1.10.2.min.js">
+</script><script type="text/javascript" src="/static/bootstrap-3.0.2/js/bootstrap.min.js">
+</script>
+<script type="text/javascript">
+$('#authority').html(window.location.host);
+$('#tab-overview').addClass('active');
+</script>
 </body>
-
 </html>

Propchange: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1588992-1590763

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html Mon Apr 28 19:40:06 2014
@@ -351,7 +351,7 @@
 </script><script type="text/javascript" src="/static/bootstrap-3.0.2/js/bootstrap.min.js">
 </script><script type="text/javascript" src="/static/dust-full-2.0.0.min.js">
 </script><script type="text/javascript" src="/static/dust-helpers-1.1.1.min.js">
-</script><script type="text/javascript" src="dfs-dust.js">
+</script><script type="text/javascript" src="/static/dfs-dust.js">
 </script><script type="text/javascript" src="dfshealth.js">
 </script>
 </body>

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html Mon Apr 28 19:40:06 2014
@@ -122,7 +122,7 @@
     </script><script type="text/javascript" src="/static/bootstrap-3.0.2/js/bootstrap.min.js">
     </script><script type="text/javascript" src="/static/dust-full-2.0.0.min.js">
     </script><script type="text/javascript" src="/static/dust-helpers-1.1.1.min.js">
-    </script><script type="text/javascript" src="dfs-dust.js">
+    </script><script type="text/javascript" src="/static/dfs-dust.js">
     </script><script type="text/javascript" src="explorer.js">
     </script>
     <hr />

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js Mon Apr 28 19:40:06 2014
@@ -75,8 +75,7 @@
   }
 
   function get_response_err_msg(data) {
-    var msg = data.RemoteException !== undefined ? data.RemoteException.message : "";
-    return msg;
+    return data.RemoteException !== undefined ? data.RemoteException.message : "";
   }
 
   function view_file_details(path, abs_path) {

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html Mon Apr 28 19:40:06 2014
@@ -1,5 +1,5 @@
-<meta HTTP-EQUIV="REFRESH" content="0;url=journalstatus.jsp"/>
-<html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+    "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -16,14 +16,46 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<head><title>Hadoop Administration</title></head>
-
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<link rel="stylesheet" type="text/css" href="/static/bootstrap-3.0.2/css/bootstrap.min.css" />
+<link rel="stylesheet" type="text/css" href="/static/hadoop.css" />
+<title>JournalNode Information</title>
+</head>
 <body>
-<h1>Hadoop Administration</h1>
 
-<ul> 
-  <li><a href="journalstatus.jsp">Status</a></li> 
-</ul>
+<header class="navbar navbar-inverse bs-docs-nav" role="banner">
+<div class="container">
+  <div class="navbar-header">
+    <div class="navbar-brand">Hadoop</div>
+  </div>
+
+  <ul class="nav navbar-nav" id="ui-tabs">
+    <li><a>Overview</a></li>
+  </ul>
+</div>
+</header>
+
+<div class="container">
+
+<div class="tab-content">
+  <div class="tab-pane" id="tab-overview">
+    <div class="page-header"><h1>JournalNode on <small><div id="authority" style="display: inline-block"></div></small></h1></div>
+  </div>
+</div>
+
+<div class="row">
+  <hr />
+  <div class="col-xs-2"><p>Hadoop, 2014.</p></div>
+</div>
+</div>
 
-</body> 
+<script type="text/javascript" src="/static/jquery-1.10.2.min.js">
+</script><script type="text/javascript" src="/static/bootstrap-3.0.2/js/bootstrap.min.js">
+</script>
+<script type="text/javascript">
+$('#authority').html(window.location.host);
+$('#tab-overview').addClass('active');
+</script>
+</body>
 </html>

Propchange: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1588992-1590763

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html Mon Apr 28 19:40:06 2014
@@ -1,5 +1,3 @@
-<meta HTTP-EQUIV="REFRESH" content="0;url=status.jsp"/>
-<html>
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -16,14 +14,22 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<head><title>Hadoop Administration</title></head>
-
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+  <meta http-equiv="REFRESH" content="0;url=status.html" />
+  <title>Hadoop Administration</title>
+</head>
 <body>
+<script type="text/javascript">
+//<![CDATA[
+window.location.href='status.html';
+//]]>
+</script>
 <h1>Hadoop Administration</h1>
-
-<ul> 
-  <li><a href="status.jsp">Status</a></li> 
+<ul>
+  <li><a href="status.jsp">Status</a></li>
 </ul>
-
-</body> 
-</html>
+</body>
+</html>
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsImageViewer.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsImageViewer.apt.vm?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsImageViewer.apt.vm (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsImageViewer.apt.vm Mon Apr 28 19:40:06 2014
@@ -23,56 +23,29 @@ Offline Image Viewer Guide
 * Overview
 
    The Offline Image Viewer is a tool to dump the contents of hdfs fsimage
-   files to human-readable formats in order to allow offline analysis and
-   examination of an Hadoop cluster's namespace. The tool is able to
-   process very large image files relatively quickly, converting them to
-   one of several output formats. The tool handles the layout formats that
-   were included with Hadoop versions 16 and up. If the tool is not able
-   to process an image file, it will exit cleanly. The Offline Image
-   Viewer does not require an Hadoop cluster to be running; it is entirely
-   offline in its operation.
+   files to a human-readable format and provide read-only WebHDFS API
+   in order to allow offline analysis and examination of an Hadoop cluster's
+   namespace. The tool is able to process very large image files relatively
+   quickly. The tool handles the layout formats that were included with Hadoop
+   versions 2.4 and up. If you want to handle older layout formats, you can
+   use the Offline Image Viewer of Hadoop 2.3.
+   If the tool is not able to process an image file, it will exit cleanly.
+   The Offline Image Viewer does not require a Hadoop cluster to be running;
+   it is entirely offline in its operation.
 
    The Offline Image Viewer provides several output processors:
 
-   [[1]] Ls is the default output processor. It closely mimics the format of
-      the lsr command. It includes the same fields, in the same order, as
-      lsr : directory or file flag, permissions, replication, owner,
-      group, file size, modification date, and full path. Unlike the lsr
-      command, the root path is included. One important difference
-      between the output of the lsr command this processor, is that this
-      output is not sorted by directory name and contents. Rather, the
-      files are listed in the order in which they are stored in the
-      fsimage file. Therefore, it is not possible to directly compare the
-      output of the lsr command this this tool. The Ls processor uses
-      information contained within the Inode blocks to calculate file
-      sizes and ignores the -skipBlocks option.
-
-   [[2]] Indented provides a more complete view of the fsimage's contents,
-      including all of the information included in the image, such as
-      image version, generation stamp and inode- and block-specific
-      listings. This processor uses indentation to organize the output
-      into a hierarchal manner. The lsr format is suitable for easy human
-      comprehension.
-
-   [[3]] Delimited provides one file per line consisting of the path,
-      replication, modification time, access time, block size, number of
-      blocks, file size, namespace quota, diskspace quota, permissions,
-      username and group name. If run against an fsimage that does not
-      contain any of these fields, the field's column will be included,
-      but no data recorded. The default record delimiter is a tab, but
-      this may be changed via the -delimiter command line argument. This
-      processor is designed to create output that is easily analyzed by
-      other tools, such as {{{http://pig.apache.org}Apache Pig}}. See
-      the {{Analyzing Results}} section for further information on using
-      this processor to analyze the contents of fsimage files.
+   [[1]] Web is the default output processor. It launches a HTTP server
+      that exposes read-only WebHDFS API. Users can investigate the namespace
+      interactively by using HTTP REST API.
 
-   [[4]] XML creates an XML document of the fsimage and includes all of the
+   [[2]] XML creates an XML document of the fsimage and includes all of the
       information within the fsimage, similar to the lsr processor. The
       output of this processor is amenable to automated processing and
       analysis with XML tools. Due to the verbosity of the XML syntax,
       this processor will also generate the largest amount of output.
 
-   [[5]] FileDistribution is the tool for analyzing file sizes in the
+   [[3]] FileDistribution is the tool for analyzing file sizes in the
       namespace image. In order to run the tool one should define a range
       of integers [0, maxSize] by specifying maxSize and a step. The
       range of integers is divided into segments of size step: [0, s[1],
@@ -86,105 +59,93 @@ Offline Image Viewer Guide
 
 * Usage
 
-** Basic
+** Web Processor
 
-   The simplest usage of the Offline Image Viewer is to provide just an
-   input and output file, via the -i and -o command-line switches:
+   Web processor launches a HTTP server which exposes read-only WebHDFS API.
+   Users can specify the address to listen by -addr option (default by
+   localhost:5978).
 
 ----
-   bash$ bin/hdfs oiv -i fsimage -o fsimage.txt
+   bash$ bin/hdfs oiv -i fsimage
+   14/04/07 13:25:14 INFO offlineImageViewer.WebImageViewer: WebImageViewer
+   started. Listening on /127.0.0.1:5978. Press Ctrl+C to stop the viewer.
 ----
 
-   This will create a file named fsimage.txt in the current directory
-   using the Ls output processor. For very large image files, this process
-   may take several minutes.
+   Users can access the viewer and get the information of the fsimage by
+   the following shell command:
 
-   One can specify which output processor via the command-line switch -p.
-   For instance:
+----
+   bash$ bin/hdfs dfs -ls webhdfs://127.0.0.1:5978/
+   Found 2 items
+   drwxrwx---   - root supergroup          0 2014-03-26 20:16 webhdfs://127.0.0.1:5978/tmp
+   drwxr-xr-x   - root supergroup          0 2014-03-31 14:08 webhdfs://127.0.0.1:5978/user
+----
+
+   To get the information of all the files and directories, you can simply use
+   the following command:
 
 ----
-   bash$ bin/hdfs oiv -i fsimage -o fsimage.xml -p XML
+   bash$ bin/hdfs dfs -ls -R webhdfs://127.0.0.1:5978/
 ----
 
-   or
+   Users can also get JSON formatted FileStatuses via HTTP REST API.
 
 ----
-   bash$ bin/hdfs oiv -i fsimage -o fsimage.txt -p Indented
+   bash$ curl -i http://127.0.0.1:5978/webhdfs/v1/?op=liststatus
+   HTTP/1.1 200 OK
+   Content-Type: application/json
+   Content-Length: 252
+
+   {"FileStatuses":{"FileStatus":[
+   {"fileId":16386,"accessTime":0,"replication":0,"owner":"theuser","length":0,"permission":"755","blockSize":0,"modificationTime":1392772497282,"type":"DIRECTORY","group":"supergroup","childrenNum":1,"pathSuffix":"user"}
+   ]}}
 ----
 
-   This will run the tool using either the XML or Indented output
-   processor, respectively.
+   The Web processor now supports the following operations:
 
-   One command-line option worth considering is -skipBlocks, which
-   prevents the tool from explicitly enumerating all of the blocks that
-   make up a file in the namespace. This is useful for file systems that
-   have very large files. Enabling this option can significantly decrease
-   the size of the resulting output, as individual blocks are not
-   included. Note, however, that the Ls processor needs to enumerate the
-   blocks and so overrides this option.
+   * {{{./WebHDFS.html#List_a_Directory}LISTSTATUS}}
 
-Example
+   * {{{./WebHDFS.html#Status_of_a_FileDirectory}GETFILESTATUS}}
 
-   Consider the following contrived namespace:
+   * {{{./WebHDFS.html#Get_ACL_Status}GETACLSTATUS}}
 
-----
-   drwxr-xr-x   - theuser supergroup          0 2009-03-16 21:17 /anotherDir
-   -rw-r--r--   3 theuser supergroup  286631664 2009-03-16 21:15 /anotherDir/biggerfile
-   -rw-r--r--   3 theuser supergroup       8754 2009-03-16 21:17 /anotherDir/smallFile
-   drwxr-xr-x   - theuser supergroup          0 2009-03-16 21:11 /mapredsystem
-   drwxr-xr-x   - theuser supergroup          0 2009-03-16 21:11 /mapredsystem/theuser
-   drwxr-xr-x   - theuser supergroup          0 2009-03-16 21:11 /mapredsystem/theuser/mapredsystem
-   drwx-wx-wx   - theuser supergroup          0 2009-03-16 21:11 /mapredsystem/theuser/mapredsystem/ip.redacted.com
-   drwxr-xr-x   - theuser supergroup          0 2009-03-16 21:12 /one
-   drwxr-xr-x   - theuser supergroup          0 2009-03-16 21:12 /one/two
-   drwxr-xr-x   - theuser supergroup          0 2009-03-16 21:16 /user
-   drwxr-xr-x   - theuser supergroup          0 2009-03-16 21:19 /user/theuser
-----
+** XML Processor
 
-   Applying the Offline Image Processor against this file with default
-   options would result in the following output:
+   XML Processor is used to dump all the contents in the fsimage. Users can
+   specify input and output file via -i and -o command-line.
 
 ----
-   machine:hadoop-0.21.0-dev theuser$ bin/hdfs oiv -i fsimagedemo -o fsimage.txt
-
-   drwxr-xr-x  -   theuser supergroup            0 2009-03-16 14:16 /
-   drwxr-xr-x  -   theuser supergroup            0 2009-03-16 14:17 /anotherDir
-   drwxr-xr-x  -   theuser supergroup            0 2009-03-16 14:11 /mapredsystem
-   drwxr-xr-x  -   theuser supergroup            0 2009-03-16 14:12 /one
-   drwxr-xr-x  -   theuser supergroup            0 2009-03-16 14:16 /user
-   -rw-r--r--  3   theuser supergroup    286631664 2009-03-16 14:15 /anotherDir/biggerfile
-   -rw-r--r--  3   theuser supergroup         8754 2009-03-16 14:17 /anotherDir/smallFile
-   drwxr-xr-x  -   theuser supergroup            0 2009-03-16 14:11 /mapredsystem/theuser
-   drwxr-xr-x  -   theuser supergroup            0 2009-03-16 14:11 /mapredsystem/theuser/mapredsystem
-   drwx-wx-wx  -   theuser supergroup            0 2009-03-16 14:11 /mapredsystem/theuser/mapredsystem/ip.redacted.com
-   drwxr-xr-x  -   theuser supergroup            0 2009-03-16 14:12 /one/two
-   drwxr-xr-x  -   theuser supergroup            0 2009-03-16 14:19 /user/theuser
+   bash$ bin/hdfs oiv -p XML -i fsimage -o fsimage.xml
 ----
 
-   Similarly, applying the Indented processor would generate output that
-   begins with:
+   This will create a file named fsimage.xml contains all the information in
+   the fsimage. For very large image files, this process may take several
+   minutes.
 
-----
-   machine:hadoop-0.21.0-dev theuser$ bin/hdfs oiv -i fsimagedemo -p Indented -o fsimage.txt
+   Applying the Offline Image Viewer with XML processor would result in the
+   following output:
 
-   FSImage
-     ImageVersion = -19
-     NamespaceID = 2109123098
-     GenerationStamp = 1003
-     INodes [NumInodes = 12]
-       Inode
-         INodePath =
-         Replication = 0
-         ModificationTime = 2009-03-16 14:16
-         AccessTime = 1969-12-31 16:00
-         BlockSize = 0
-         Blocks [NumBlocks = -1]
-         NSQuota = 2147483647
-         DSQuota = -1
-         Permissions
-           Username = theuser
-           GroupName = supergroup
-           PermString = rwxr-xr-x
+----
+   <?xml version="1.0"?>
+   <fsimage>
+   <NameSection>
+     <genstampV1>1000</genstampV1>
+     <genstampV2>1002</genstampV2>
+     <genstampV1Limit>0</genstampV1Limit>
+     <lastAllocatedBlockId>1073741826</lastAllocatedBlockId>
+     <txid>37</txid>
+   </NameSection>
+   <INodeSection>
+     <lastInodeId>16400</lastInodeId>
+     <inode>
+       <id>16385</id>
+       <type>DIRECTORY</type>
+       <name></name>
+       <mtime>1392772497282</mtime>
+       <permission>theuser:supergroup:rwxr-xr-x</permission>
+       <nsquota>9223372036854775807</nsquota>
+       <dsquota>-1</dsquota>
+     </inode>
    ...remaining output omitted...
 ----
 
@@ -193,30 +154,32 @@ Example
 *-----------------------:-----------------------------------+
 | <<Flag>>              | <<Description>>                   |
 *-----------------------:-----------------------------------+
-| <<<-i>>>\|<<<--inputFile>>> <input file> | Specify the input fsimage file to
-|                       | process. Required.
+| <<<-i>>>\|<<<--inputFile>>> <input file> | Specify the input fsimage file
+|                       | to process. Required.
 *-----------------------:-----------------------------------+
-| <<<-o>>>\|<<<--outputFile>>> <output file> | Specify the output filename, if the
-|                       | specified output processor generates one. If the specified file already
-|                       | exists, it is silently overwritten. Required.
+| <<<-o>>>\|<<<--outputFile>>> <output file> | Specify the output filename,
+|                       | if the specified output processor generates one. If
+|                       | the specified file already exists, it is silently
+|                       | overwritten. (output to stdout by default)
+*-----------------------:-----------------------------------+
+| <<<-p>>>\|<<<--processor>>> <processor> | Specify the image processor to
+|                       | apply against the image file. Currently valid options
+|                       | are Web (default), XML and FileDistribution.
+*-----------------------:-----------------------------------+
+| <<<-addr>>> <address> | Specify the address(host:port) to listen.
+|                       | (localhost:5978 by default). This option is used with
+|                       | Web processor.
+*-----------------------:-----------------------------------+
+| <<<-maxSize>>> <size> | Specify the range [0, maxSize] of file sizes to be
+|                       | analyzed in bytes (128GB by default). This option is
+|                       | used with FileDistribution processor.
+*-----------------------:-----------------------------------+
+| <<<-step>>> <size>    | Specify the granularity of the distribution in bytes
+|                       | (2MB by default). This option is used with
+|                       | FileDistribution processor.
 *-----------------------:-----------------------------------+
-| <<<-p>>>\|<<<--processor>>> <processor> | Specify the image processor to apply
-|                       | against the image file. Currently valid options are Ls (default), XML
-|                       | and Indented..
-*-----------------------:-----------------------------------+
-| <<<-skipBlocks>>>     | Do not enumerate individual blocks within files. This may
-|                       | save processing time and outfile file space on namespaces with very
-|                       | large files. The Ls processor reads the blocks to correctly determine
-|                       | file sizes and ignores this option.
-*-----------------------:-----------------------------------+
-| <<<-printToScreen>>>  | Pipe output of processor to console as well as specified
-|                       | file. On extremely large namespaces, this may increase processing time
-|                       | by an order of magnitude.
-*-----------------------:-----------------------------------+
-| <<<-delimiter>>> <arg>| When used in conjunction with the Delimited processor,
-|                       | replaces the default tab delimiter with the string specified by arg.
-*-----------------------:-----------------------------------+
-| <<<-h>>>\|<<<--help>>>| Display the tool usage and help information and exit.
+| <<<-h>>>\|<<<--help>>>| Display the tool usage and help information and
+|                       | exit.
 *-----------------------:-----------------------------------+
 
 * Analyzing Results
@@ -224,193 +187,4 @@ Example
    The Offline Image Viewer makes it easy to gather large amounts of data
    about the hdfs namespace. This information can then be used to explore
    file system usage patterns or find specific files that match arbitrary
-   criteria, along with other types of namespace analysis. The Delimited
-   image processor in particular creates output that is amenable to
-   further processing by tools such as [38]Apache Pig. Pig provides a
-   particularly good choice for analyzing these data as it is able to deal
-   with the output generated from a small fsimage but also scales up to
-   consume data from extremely large file systems.
-
-   The Delimited image processor generates lines of text separated, by
-   default, by tabs and includes all of the fields that are common between
-   constructed files and files that were still under constructed when the
-   fsimage was generated. Examples scripts are provided demonstrating how
-   to use this output to accomplish three tasks: determine the number of
-   files each user has created on the file system, find files were created
-   but have not accessed, and find probable duplicates of large files by
-   comparing the size of each file.
-
-   Each of the following scripts assumes you have generated an output file
-   using the Delimited processor named foo and will be storing the results
-   of the Pig analysis in a file named results.
-
-** Total Number of Files for Each User
-
-   This script processes each path within the namespace, groups them by
-   the file owner and determines the total number of files each user owns.
-
-----
-      numFilesOfEachUser.pig:
-   -- This script determines the total number of files each user has in
-   -- the namespace. Its output is of the form:
-   --   username, totalNumFiles
-
-   -- Load all of the fields from the file
-   A = LOAD '$inputFile' USING PigStorage('\t') AS (path:chararray,
-                                                    replication:int,
-                                                    modTime:chararray,
-                                                    accessTime:chararray,
-                                                    blockSize:long,
-                                                    numBlocks:int,
-                                                    fileSize:long,
-                                                    NamespaceQuota:int,
-                                                    DiskspaceQuota:int,
-                                                    perms:chararray,
-                                                    username:chararray,
-                                                    groupname:chararray);
-
-
-   -- Grab just the path and username
-   B = FOREACH A GENERATE path, username;
-
-   -- Generate the sum of the number of paths for each user
-   C = FOREACH (GROUP B BY username) GENERATE group, COUNT(B.path);
-
-   -- Save results
-   STORE C INTO '$outputFile';
-----
-
-   This script can be run against pig with the following command:
-
-----
-   bin/pig -x local -param inputFile=../foo -param outputFile=../results ../numFilesOfEachUser.pig
-----
-
-   The output file's content will be similar to that below:
-
-----
-   bart 1
-   lisa 16
-   homer 28
-   marge 2456
-----
-
-** Files That Have Never Been Accessed
-
-   This script finds files that were created but whose access times were
-   never changed, meaning they were never opened or viewed.
-
-----
-      neverAccessed.pig:
-   -- This script generates a list of files that were created but never
-   -- accessed, based on their AccessTime
-
-   -- Load all of the fields from the file
-   A = LOAD '$inputFile' USING PigStorage('\t') AS (path:chararray,
-                                                    replication:int,
-                                                    modTime:chararray,
-                                                    accessTime:chararray,
-                                                    blockSize:long,
-                                                    numBlocks:int,
-                                                    fileSize:long,
-                                                    NamespaceQuota:int,
-                                                    DiskspaceQuota:int,
-                                                    perms:chararray,
-                                                    username:chararray,
-                                                    groupname:chararray);
-
-   -- Grab just the path and last time the file was accessed
-   B = FOREACH A GENERATE path, accessTime;
-
-   -- Drop all the paths that don't have the default assigned last-access time
-   C = FILTER B BY accessTime == '1969-12-31 16:00';
-
-   -- Drop the accessTimes, since they're all the same
-   D = FOREACH C GENERATE path;
-
-   -- Save results
-   STORE D INTO '$outputFile';
-----
-
-   This script can be run against pig with the following command and its
-   output file's content will be a list of files that were created but
-   never viewed afterwards.
-
-----
-   bin/pig -x local -param inputFile=../foo -param outputFile=../results ../neverAccessed.pig
-----
-
-** Probable Duplicated Files Based on File Size
-
-   This script groups files together based on their size, drops any that
-   are of less than 100mb and returns a list of the file size, number of
-   files found and a tuple of the file paths. This can be used to find
-   likely duplicates within the filesystem namespace.
-
-----
-      probableDuplicates.pig:
-   -- This script finds probable duplicate files greater than 100 MB by
-   -- grouping together files based on their byte size. Files of this size
-   -- with exactly the same number of bytes can be considered probable
-   -- duplicates, but should be checked further, either by comparing the
-   -- contents directly or by another proxy, such as a hash of the contents.
-   -- The scripts output is of the type:
-   --    fileSize numProbableDuplicates {(probableDup1), (probableDup2)}
-
-   -- Load all of the fields from the file
-   A = LOAD '$inputFile' USING PigStorage('\t') AS (path:chararray,
-                                                    replication:int,
-                                                    modTime:chararray,
-                                                    accessTime:chararray,
-                                                    blockSize:long,
-                                                    numBlocks:int,
-                                                    fileSize:long,
-                                                    NamespaceQuota:int,
-                                                    DiskspaceQuota:int,
-                                                    perms:chararray,
-                                                    username:chararray,
-                                                    groupname:chararray);
-
-   -- Grab the pathname and filesize
-   B = FOREACH A generate path, fileSize;
-
-   -- Drop files smaller than 100 MB
-   C = FILTER B by fileSize > 100L  * 1024L * 1024L;
-
-   -- Gather all the files of the same byte size
-   D = GROUP C by fileSize;
-
-   -- Generate path, num of duplicates, list of duplicates
-   E = FOREACH D generate group AS fileSize, COUNT(C) as numDupes, C.path AS files;
-
-   -- Drop all the files where there are only one of them
-   F = FILTER E by numDupes > 1L;
-
-   -- Sort by the size of the files
-   G = ORDER F by fileSize;
-
-   -- Save results
-   STORE G INTO '$outputFile';
-----
-
-   This script can be run against pig with the following command:
-
-----
-   bin/pig -x local -param inputFile=../foo -param outputFile=../results ../probableDuplicates.pig
-----
-
-   The output file's content will be similar to that below:
-
-----
-   1077288632 2 {(/user/tennant/work1/part-00501),(/user/tennant/work1/part-00993)}
-   1077288664 4 {(/user/tennant/work0/part-00567),(/user/tennant/work0/part-03980),(/user/tennant/work1/part-00725),(/user/eccelston/output/part-03395)}
-   1077288668 3 {(/user/tennant/work0/part-03705),(/user/tennant/work0/part-04242),(/user/tennant/work1/part-03839)}
-   1077288698 2 {(/user/tennant/work0/part-00435),(/user/eccelston/output/part-01382)}
-   1077288702 2 {(/user/tennant/work0/part-03864),(/user/eccelston/output/part-03234)}
-----
-
-   Each line includes the file size in bytes that was found to be
-   duplicated, the number of duplicates found, and a list of the
-   duplicated paths. Files less than 100MB are ignored, providing a
-   reasonable likelihood that files of these exact sizes may be
-   duplicates.
+   criteria, along with other types of namespace analysis.

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm Mon Apr 28 19:40:06 2014
@@ -243,6 +243,19 @@ HDFS NFS Gateway
       hadoop-daemon.sh stop portmap
 -------------------------
 
+  Optionally, you can forgo running the Hadoop-provided portmap daemon and
+  instead use the system portmap daemon on all operating systems if you start the
+  NFS Gateway as root. This will allow the HDFS NFS Gateway to work around the
+  aforementioned bug and still register using the system portmap daemon. To do
+  so, just start the NFS gateway daemon as you normally would, but make sure to
+  do so as the "root" user, and also set the "HADOOP_PRIVILEGED_NFS_USER"
+  environment variable to an unprivileged user. In this mode the NFS Gateway will
+  start as root to perform its initial registration with the system portmap, and
+  then will drop privileges back to the user specified by the
+  HADOOP_PRIVILEGED_NFS_USER afterward and for the rest of the duration of the
+  lifetime of the NFS Gateway process. Note that if you choose this route, you
+  should skip steps 1 and 2 above.
+
 
 * {Verify validity of NFS related services}
 

Propchange: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1588992-1590763

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java Mon Apr 28 19:40:06 2014
@@ -229,7 +229,7 @@ public class BenchmarkThroughput extends
   }
 
   /**
-   * @param args
+   * @param args arguments
    */
   public static void main(String[] args) throws Exception {
     int res = ToolRunner.run(new HdfsConfiguration(),

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Mon Apr 28 19:40:06 2014
@@ -82,6 +82,7 @@ import java.util.concurrent.TimeoutExcep
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 /** Utilities for HDFS tests */
 public class DFSTestUtil {
@@ -1192,7 +1193,20 @@ public class DFSTestUtil {
     long c = (val + factor - 1) / factor;
     return c * factor;
   }
-  
+
+  public static void checkComponentsEquals(byte[][] expected, byte[][] actual) {
+    assertEquals("expected: " + DFSUtil.byteArray2PathString(expected)
+        + ", actual: " + DFSUtil.byteArray2PathString(actual), expected.length,
+        actual.length);
+    int i = 0;
+    for (byte[] e : expected) {
+      byte[] actualComponent = actual[i++];
+      assertTrue("expected: " + DFSUtil.bytes2String(e) + ", actual: "
+          + DFSUtil.bytes2String(actualComponent),
+          Arrays.equals(e, actualComponent));
+    }
+  }
+
   /**
    * A short-circuit test context which makes it easier to get a short-circuit
    * configuration and set everything up.

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Mon Apr 28 19:40:06 2014
@@ -1384,8 +1384,8 @@ public class MiniDFSCluster {
   /**
    * Finalize cluster for the namenode at the given index 
    * @see MiniDFSCluster#finalizeCluster(Configuration)
-   * @param nnIndex
-   * @param conf
+   * @param nnIndex index of the namenode
+   * @param conf configuration
    * @throws Exception
    */
   public void finalizeCluster(int nnIndex, Configuration conf) throws Exception {
@@ -2216,7 +2216,7 @@ public class MiniDFSCluster {
    * to determine the location of the storage of a DN instance in the mini cluster
    * @param dnIndex datanode index
    * @param dirIndex directory index.
-   * @return
+   * @return storage directory path
    */
   private static String getStorageDirPath(int dnIndex, int dirIndex) {
     return "data/data" + (2 * dnIndex + 1 + dirIndex);
@@ -2242,8 +2242,8 @@ public class MiniDFSCluster {
   }
   /**
    * Get directory relative to block pool directory in the datanode
-   * @param storageDir
-   * @return current directory
+   * @param storageDir storage directory
+   * @return current directory in the given storage directory
    */
   public static String getBPDir(File storageDir, String bpid, String dirName) {
     return getBPDir(storageDir, bpid) + dirName + "/";

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java Mon Apr 28 19:40:06 2014
@@ -51,6 +51,8 @@ public class TestDatanodeConfig {
   public static void setUp() throws Exception {
     clearBaseDir();
     Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, 0);
+    conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "localhost:0");
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
     cluster.waitActive();
   }
@@ -100,8 +102,14 @@ public class TestDatanodeConfig {
     String dnDir3 = dataDir.getAbsolutePath() + "3";
     conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
                 dnDir1 + "," + dnDir2 + "," + dnDir3);
-    cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
-    assertTrue("Data-node should startup.", cluster.isDataNodeUp());
+    try {
+      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
+      assertTrue("Data-node should startup.", cluster.isDataNodeUp());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdownDataNodes();
+      }
+    }
   }
 
   private static String makeURI(String scheme, String host, String path)

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java Mon Apr 28 19:40:06 2014
@@ -42,7 +42,9 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.PathUtils;
@@ -202,10 +204,11 @@ public class TestDecommission {
   }
 
   /*
-   * decommission one random node and wait for each to reach the
-   * given {@code waitForState}.
+   * decommission the DN at index dnIndex or one random node if dnIndex is set
+   * to -1 and wait for the node to reach the given {@code waitForState}.
    */
   private DatanodeInfo decommissionNode(int nnIndex,
+                                  String datanodeUuid,
                                   ArrayList<DatanodeInfo>decommissionedNodes,
                                   AdminStates waitForState)
     throws IOException {
@@ -213,14 +216,26 @@ public class TestDecommission {
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
 
     //
-    // pick one datanode randomly.
+    // pick one datanode randomly unless the caller specifies one.
     //
     int index = 0;
-    boolean found = false;
-    while (!found) {
-      index = myrand.nextInt(info.length);
-      if (!info[index].isDecommissioned()) {
-        found = true;
+    if (datanodeUuid == null) {
+      boolean found = false;
+      while (!found) {
+        index = myrand.nextInt(info.length);
+        if (!info[index].isDecommissioned()) {
+          found = true;
+        }
+      }
+    } else {
+      // The caller specifies a DN
+      for (; index < info.length; index++) {
+        if (info[index].getDatanodeUuid().equals(datanodeUuid)) {
+          break;
+        }
+      }
+      if (index == info.length) {
+        throw new IOException("invalid datanodeUuid " + datanodeUuid);
       }
     }
     String nodename = info[index].getXferAddr();
@@ -242,11 +257,13 @@ public class TestDecommission {
     return ret;
   }
 
-  /* stop decommission of the datanode and wait for each to reach the NORMAL state */
-  private void recomissionNode(DatanodeInfo decommissionedNode) throws IOException {
+  /* Ask a specific NN to stop decommission of the datanode and wait for each
+   * to reach the NORMAL state.
+   */
+  private void recomissionNode(int nnIndex, DatanodeInfo decommissionedNode) throws IOException {
     LOG.info("Recommissioning node: " + decommissionedNode);
     writeConfigFile(excludeFile, null);
-    refreshNodes(cluster.getNamesystem(), conf);
+    refreshNodes(cluster.getNamesystem(nnIndex), conf);
     waitNodeState(decommissionedNode, AdminStates.NORMAL);
 
   }
@@ -367,7 +384,7 @@ public class TestDecommission {
     int liveDecomissioned = ns.getNumDecomLiveDataNodes();
 
     // Decommission one node. Verify that node is decommissioned.
-    DatanodeInfo decomNode = decommissionNode(0, decommissionedNodes,
+    DatanodeInfo decomNode = decommissionNode(0, null, decommissionedNodes,
         AdminStates.DECOMMISSIONED);
     decommissionedNodes.add(decomNode);
     assertEquals(deadDecomissioned, ns.getNumDecomDeadDataNodes());
@@ -403,7 +420,130 @@ public class TestDecommission {
   public void testDecommissionFederation() throws IOException {
     testDecommission(2, 2);
   }
-  
+
+  /**
+   * Test decommission process on standby NN.
+   * Verify admins can run "dfsadmin -refreshNodes" on SBN and decomm
+   * process can finish as long as admins run "dfsadmin -refreshNodes"
+   * on active NN.
+   * SBN used to mark excess replica upon recommission. The SBN's pick
+   * for excess replica could be different from the one picked by ANN.
+   * That creates inconsistent state and prevent SBN from finishing
+   * decommission.
+   */
+  @Test(timeout=360000)
+  public void testDecommissionOnStandby() throws Exception {
+    Configuration hdfsConf = new HdfsConfiguration(conf);
+    hdfsConf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+    hdfsConf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 30000);
+    hdfsConf.setInt(DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY, 2);
+
+    // The time to wait so that the slow DN's heartbeat is considered old
+    // by BlockPlacementPolicyDefault and thus will choose that DN for
+    // excess replica.
+    long slowHeartbeatDNwaitTime =
+        hdfsConf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
+        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000 * (hdfsConf.getInt(
+        DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY,
+        DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT) + 1);
+
+    cluster = new MiniDFSCluster.Builder(hdfsConf)
+        .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
+
+    cluster.transitionToActive(0);
+    cluster.waitActive();
+
+
+    // Step 1, create a cluster with 4 DNs. Blocks are stored on the first 3 DNs.
+    // The last DN is empty. Also configure the last DN to have slow heartbeat
+    // so that it will be chosen as excess replica candidate during recommission.
+
+    // Step 1.a, copy blocks to the first 3 DNs. Given the replica count is the
+    // same as # of DNs, each DN will have a replica for any block.
+    Path file1 = new Path("testDecommissionHA.dat");
+    int replicas = 3;
+    FileSystem activeFileSys = cluster.getFileSystem(0);
+    writeFile(activeFileSys, file1, replicas);
+
+    HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
+        cluster.getNameNode(1));
+
+    // Step 1.b, start a DN with slow heartbeat, so that we can know for sure it
+    // will be chosen as the target of excess replica during recommission.
+    hdfsConf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30);
+    cluster.startDataNodes(hdfsConf, 1, true, null, null, null);
+    DataNode lastDN = cluster.getDataNodes().get(3);
+    lastDN.getDatanodeUuid();
+
+    // Step 2, decommission the first DN at both ANN and SBN.
+    DataNode firstDN = cluster.getDataNodes().get(0);
+
+    // Step 2.a, ask ANN to decomm the first DN
+    DatanodeInfo decommissionedNodeFromANN = decommissionNode(
+        0, firstDN.getDatanodeUuid(), null, AdminStates.DECOMMISSIONED);
+
+    // Step 2.b, ask SBN to decomm the first DN
+    DatanodeInfo decomNodeFromSBN = decommissionNode(1, firstDN.getDatanodeUuid(), null,
+        AdminStates.DECOMMISSIONED);
+
+    // Step 3, recommission the first DN on SBN and ANN to create excess replica
+    // It recommissions the node on SBN first to create potential
+    // inconsistent state. In production cluster, such insistent state can happen
+    // even if recommission command was issued on ANN first given the async nature
+    // of the system.
+
+    // Step 3.a, ask SBN to recomm the first DN.
+    // SBN has been fixed so that it no longer invalidates excess replica during
+    // recommission.
+    // Before the fix, SBN could get into the following state.
+    //    1. the last DN would have been chosen as excess replica, given its
+    //    heartbeat is considered old.
+    //    Please refer to BlockPlacementPolicyDefault#chooseReplicaToDelete
+    //    2. After recomissionNode finishes, SBN has 3 live replicas ( 0, 1, 2 )
+    //    and one excess replica ( 3 )
+    // After the fix,
+    //    After recomissionNode finishes, SBN has 4 live replicas ( 0, 1, 2, 3 )
+    Thread.sleep(slowHeartbeatDNwaitTime);
+    recomissionNode(1, decomNodeFromSBN);
+
+    // Step 3.b, ask ANN to recommission the first DN.
+    // To verify the fix, the test makes sure the excess replica picked by ANN
+    // is different from the one picked by SBN before the fix.
+    // To achieve that, we make sure next-to-last DN is chosen as excess replica
+    // by ANN.
+    // 1. restore LastDNprop's heartbeat interval.
+    // 2. Make next-to-last DN's heartbeat slow.
+    MiniDFSCluster.DataNodeProperties LastDNprop = cluster.stopDataNode(3);
+    LastDNprop.conf.setLong(
+        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
+    cluster.restartDataNode(LastDNprop);
+
+    MiniDFSCluster.DataNodeProperties nextToLastDNprop = cluster.stopDataNode(2);
+    nextToLastDNprop.conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30);
+    cluster.restartDataNode(nextToLastDNprop);
+    cluster.waitActive();
+    Thread.sleep(slowHeartbeatDNwaitTime);
+    recomissionNode(0, decommissionedNodeFromANN);
+
+    // Step 3.c, make sure the DN has deleted the block and report to NNs
+    cluster.triggerHeartbeats();
+    HATestUtil.waitForDNDeletions(cluster);
+    cluster.triggerDeletionReports();
+
+    // Step 4, decommission the first DN on both ANN and SBN
+    // With the fix to make sure SBN no longer marks excess replica
+    // during recommission, SBN's decommission can finish properly
+    decommissionNode(0, firstDN.getDatanodeUuid(), null,
+        AdminStates.DECOMMISSIONED);
+
+    // Ask SBN to decomm the first DN
+    decommissionNode(1, firstDN.getDatanodeUuid(), null,
+        AdminStates.DECOMMISSIONED);
+
+    cluster.shutdown();
+
+  }
+
   private void testDecommission(int numNamenodes, int numDatanodes)
       throws IOException {
     LOG.info("Starting test testDecommission");
@@ -430,7 +570,7 @@ public class TestDecommission {
         int liveDecomissioned = ns.getNumDecomLiveDataNodes();
 
         // Decommission one node. Verify that node is decommissioned.
-        DatanodeInfo decomNode = decommissionNode(i, decommissionedNodes,
+        DatanodeInfo decomNode = decommissionNode(i, null, decommissionedNodes,
             AdminStates.DECOMMISSIONED);
         decommissionedNodes.add(decomNode);
         assertEquals(deadDecomissioned, ns.getNumDecomDeadDataNodes());
@@ -458,7 +598,7 @@ public class TestDecommission {
       }
     }
 
-    // Restart the cluster and ensure recommissioned datanodes
+    // Restart the cluster and ensure decommissioned datanodes
     // are allowed to register with the namenode
     cluster.shutdown();
     startCluster(numNamenodes, numDatanodes, conf);
@@ -486,7 +626,7 @@ public class TestDecommission {
       writeFile(fileSys, file1, replicas);
         
       // Decommission one node. Verify that node is decommissioned.
-      DatanodeInfo decomNode = decommissionNode(i, decommissionedNodes,
+      DatanodeInfo decomNode = decommissionNode(i, null, decommissionedNodes,
           AdminStates.DECOMMISSIONED);
       decommissionedNodes.add(decomNode);
         
@@ -510,7 +650,7 @@ public class TestDecommission {
           + tries + " times.", tries < 20);
 
       // stop decommission and check if the new replicas are removed
-      recomissionNode(decomNode);
+      recomissionNode(0, decomNode);
       // wait for the block to be deleted
       tries = 0;
       while (tries++ < 20) {
@@ -561,7 +701,7 @@ public class TestDecommission {
       
       FSNamesystem fsn = cluster.getNamesystem(i);
       NameNode namenode = cluster.getNameNode(i);
-      DatanodeInfo downnode = decommissionNode(i, null,
+      DatanodeInfo downnode = decommissionNode(i, null, null,
           AdminStates.DECOMMISSION_INPROGRESS);
       // Check namenode stats for multiple datanode heartbeats
       verifyStats(namenode, fsn, downnode, true);
@@ -744,4 +884,76 @@ public class TestDecommission {
     startCluster(numNamenodes, numDatanodes, conf);
     cluster.shutdown();
   }
+
+  /**
+   * Test using a "registration name" in a host include file.
+   *
+   * Registration names are DataNode names specified in the configuration by
+   * dfs.datanode.hostname.  The DataNode will send this name to the NameNode
+   * as part of its registration.  Registration names are helpful when you
+   * want to override the normal first result of DNS resolution on the
+   * NameNode.  For example, a given datanode IP may map to two hostnames,
+   * and you may want to choose which hostname is used internally in the
+   * cluster.
+   *
+   * It is not recommended to use a registration name which is not also a
+   * valid DNS hostname for the DataNode.  See HDFS-5237 for background.
+   */
+  @Test(timeout=360000)
+  public void testIncludeByRegistrationName() throws IOException,
+      InterruptedException {
+    Configuration hdfsConf = new Configuration(conf);
+    // Any IPv4 address starting with 127 functions as a "loopback" address
+    // which is connected to the current host.  So by choosing 127.0.0.100
+    // as our registration name, we have chosen a name which is also a valid
+    // way of reaching the local DataNode we're going to start.
+    // Typically, a registration name would be a hostname, but we don't want
+    // to deal with DNS in this test.
+    final String registrationName = "127.0.0.100";
+    final String nonExistentDn = "127.0.0.10";
+    hdfsConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, registrationName);
+    cluster = new MiniDFSCluster.Builder(hdfsConf)
+        .numDataNodes(1).checkDataNodeHostConfig(true)
+        .setupHostsFile(true).build();
+    cluster.waitActive();
+
+    // Set up an includes file that doesn't have our datanode.
+    ArrayList<String> nodes = new ArrayList<String>();
+    nodes.add(nonExistentDn);
+    writeConfigFile(hostsFile,  nodes);
+    refreshNodes(cluster.getNamesystem(0), hdfsConf);
+
+    // Wait for the DN to be marked dead.
+    DFSClient client = getDfsClient(cluster.getNameNode(0), hdfsConf);
+    while (true) {
+      DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.DEAD);
+      if (info.length == 1) {
+        break;
+      }
+      LOG.info("Waiting for datanode to be marked dead");
+      Thread.sleep(HEARTBEAT_INTERVAL * 1000);
+    }
+
+    // Use a non-empty include file with our registration name.
+    // It should work.
+    int dnPort = cluster.getDataNodes().get(0).getXferPort();
+    nodes = new ArrayList<String>();
+    nodes.add(registrationName + ":" + dnPort);
+    writeConfigFile(hostsFile,  nodes);
+    refreshNodes(cluster.getNamesystem(0), hdfsConf);
+    cluster.restartDataNode(0);
+
+    // Wait for the DN to come back.
+    while (true) {
+      DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE);
+      if (info.length == 1) {
+        Assert.assertFalse(info[0].isDecommissioned());
+        Assert.assertFalse(info[0].isDecommissionInProgress());
+        assertEquals(registrationName, info[0].getHostName());
+        break;
+      }
+      LOG.info("Waiting for datanode to come back");
+      Thread.sleep(HEARTBEAT_INTERVAL * 1000);
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java?rev=1590766&r1=1590765&r2=1590766&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java Mon Apr 28 19:40:06 2014
@@ -345,7 +345,6 @@ public class TestFileAppend{
       throws IOException, InterruptedException {
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
-    conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
     //Set small soft-limit for lease
     final long softLimit = 1L;
     final long hardLimit = 9999999L;



Mime
View raw message