hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1337645 [2/3] - in /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs: ./ src/contrib/fuse-dfs/src/ src/main/bin/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/security/token/block...
Date Sat, 12 May 2012 20:52:45 GMT
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Sat May 12 20:52:34 2012
@@ -203,6 +203,10 @@ public abstract class FSEditLogOp {
     }
 
     <T extends AddCloseOp> T setBlocks(Block[] blocks) {
+      if (blocks.length > MAX_BLOCKS) {
+        throw new RuntimeException("Can't have more than " + MAX_BLOCKS +
+            " in an AddCloseOp.");
+      }
       this.blocks = blocks;
       return (T)this;
     }
@@ -296,10 +300,18 @@ public abstract class FSEditLogOp {
       }
     }
 
+    static final public int MAX_BLOCKS = 1024 * 1024 * 64;
+    
     private static Block[] readBlocks(
         DataInputStream in,
         int logVersion) throws IOException {
       int numBlocks = in.readInt();
+      if (numBlocks < 0) {
+        throw new IOException("invalid negative number of blocks");
+      } else if (numBlocks > MAX_BLOCKS) {
+        throw new IOException("invalid number of blocks: " + numBlocks +
+            ".  The maximum number of blocks per file is " + MAX_BLOCKS);
+      }
       Block[] blocks = new Block[numBlocks];
       for (int i = 0; i < numBlocks; i++) {
         Block blk = new Block();
@@ -579,6 +591,7 @@ public abstract class FSEditLogOp {
     String trg;
     String[] srcs;
     long timestamp;
+    final static public int MAX_CONCAT_SRC = 1024 * 1024;
 
     private ConcatDeleteOp() {
       super(OP_CONCAT_DELETE);
@@ -594,7 +607,12 @@ public abstract class FSEditLogOp {
     }
 
     ConcatDeleteOp setSources(String[] srcs) {
+      if (srcs.length > MAX_CONCAT_SRC) {
+        throw new RuntimeException("ConcatDeleteOp can only have " +
+            MAX_CONCAT_SRC + " sources at most.");
+      }
       this.srcs = srcs;
+
       return this;
     }
 
@@ -624,8 +642,8 @@ public abstract class FSEditLogOp {
       if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
         this.length = in.readInt();
         if (length < 3) { // trg, srcs.., timestamp
-          throw new IOException("Incorrect data format. "
-              + "Concat delete operation.");
+          throw new IOException("Incorrect data format " +
+              "for ConcatDeleteOp.");
         }
       }
       this.trg = FSImageSerialization.readString(in);
@@ -635,6 +653,15 @@ public abstract class FSEditLogOp {
       } else {
         srcSize = this.length - 1 - 1; // trg and timestamp
       }
+      if (srcSize < 0) {
+          throw new IOException("Incorrect data format. "
+              + "ConcatDeleteOp cannot have a negative number of data " +
+              " sources.");
+      } else if (srcSize > MAX_CONCAT_SRC) {
+          throw new IOException("Incorrect data format. "
+              + "ConcatDeleteOp can have at most " + MAX_CONCAT_SRC +
+              " sources, but we tried to have " + (length - 3) + " sources.");
+      }
       this.srcs = new String [srcSize];
       for(int i=0; i<srcSize;i++) {
         srcs[i]= FSImageSerialization.readString(in);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sat May 12 20:52:34 2012
@@ -1783,24 +1783,21 @@ public class FSNamesystem implements Nam
                 "Failed to close file " + src +
                 ". Lease recovery is in progress. Try again later.");
         } else {
-          BlockInfoUnderConstruction lastBlock=pendingFile.getLastBlock();
-          if(lastBlock != null && lastBlock.getBlockUCState() ==
-            BlockUCState.UNDER_RECOVERY) {
-            throw new RecoveryInProgressException(
-              "Recovery in progress, file [" + src + "], " +
-              "lease owner [" + lease.getHolder() + "]");
-            } else {
-              throw new AlreadyBeingCreatedException(
-                "Failed to create file [" + src + "] for [" + holder +
-                "] on client [" + clientMachine +
-                "], because this file is already being created by [" +
-                pendingFile.getClientName() + "] on [" +
-                pendingFile.getClientMachine() + "]");
-            }
-         }
+          final BlockInfo lastBlock = pendingFile.getLastBlock();
+          if (lastBlock != null
+              && lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
+            throw new RecoveryInProgressException("Recovery in progress, file ["
+                + src + "], " + "lease owner [" + lease.getHolder() + "]");
+          } else {
+            throw new AlreadyBeingCreatedException("Failed to create file ["
+                + src + "] for [" + holder + "] on client [" + clientMachine
+                + "], because this file is already being created by ["
+                + pendingFile.getClientName() + "] on ["
+                + pendingFile.getClientMachine() + "]");
+          }
+        }
       }
     }
-
   }
 
   /**
@@ -2840,7 +2837,7 @@ public class FSNamesystem implements Nam
       if (storedBlock == null) {
         throw new IOException("Block (=" + lastblock + ") not found");
       }
-      INodeFile iFile = storedBlock.getINode();
+      INodeFile iFile = (INodeFile) storedBlock.getBlockCollection();
       if (!iFile.isUnderConstruction() || storedBlock.isComplete()) {
         throw new IOException("Unexpected block (=" + lastblock
                               + ") since the file (=" + iFile.getLocalName()
@@ -4135,7 +4132,7 @@ public class FSNamesystem implements Nam
    * Returns whether the given block is one pointed-to by a file.
    */
   private boolean isValidBlock(Block b) {
-    return (blockManager.getINode(b) != null);
+    return (blockManager.getBlockCollection(b) != null);
   }
 
   // Distributed upgrade manager
@@ -4394,7 +4391,7 @@ public class FSNamesystem implements Nam
     }
     
     // check file inode
-    INodeFile file = storedBlock.getINode();
+    INodeFile file = (INodeFile) storedBlock.getBlockCollection();
     if (file==null || !file.isUnderConstruction()) {
       throw new IOException("The file " + storedBlock + 
           " belonged to does not exist or it is not under construction.");
@@ -4556,7 +4553,7 @@ public class FSNamesystem implements Nam
     if (destinationExisted && dinfo.isDir()) {
       Path spath = new Path(src);
       Path parent = spath.getParent();
-      if (isRoot(parent)) {
+      if (parent.isRoot()) {
         overwrite = parent.toString();
       } else {
         overwrite = parent.toString() + Path.SEPARATOR;
@@ -4569,10 +4566,6 @@ public class FSNamesystem implements Nam
 
     leaseManager.changeLease(src, dst, overwrite, replaceBy);
   }
-           
-  private boolean isRoot(Path path) {
-    return path.getParent() == null;
-  }
 
   /**
    * Serializes leases. 
@@ -4710,7 +4703,7 @@ public class FSNamesystem implements Nam
 
       while (blkIterator.hasNext()) {
         Block blk = blkIterator.next();
-        INode inode = blockManager.getINode(blk);
+        INode inode = (INodeFile) blockManager.getBlockCollection(blk);
         skip++;
         if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
           String src = FSDirectory.getFullPathName(inode);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java Sat May 12 20:52:34 2012
@@ -27,6 +27,8 @@ import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.security.SecurityUtil;
 
 import org.apache.commons.logging.Log;
@@ -34,7 +36,6 @@ import org.apache.commons.logging.LogFac
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
@@ -83,11 +84,11 @@ public class GetImageServlet extends Htt
         (Configuration)getServletContext().getAttribute(JspHelper.CURRENT_CONF);
       
       if(UserGroupInformation.isSecurityEnabled() && 
-          !isValidRequestor(request.getRemoteUser(), conf)) {
+          !isValidRequestor(request.getUserPrincipal().getName(), conf)) {
         response.sendError(HttpServletResponse.SC_FORBIDDEN, 
             "Only Namenode and Secondary Namenode may access this servlet");
         LOG.warn("Received non-NN/SNN request for image or edits from " 
-            + request.getRemoteHost());
+            + request.getUserPrincipal().getName() + " at " + request.getRemoteHost());
         return;
       }
       
@@ -156,15 +157,10 @@ public class GetImageServlet extends Htt
               }
               
               // issue a HTTP get request to download the new fsimage 
-              MD5Hash downloadImageDigest = reloginIfNecessary().doAs(
-                  new PrivilegedExceptionAction<MD5Hash>() {
-                  @Override
-                  public MD5Hash run() throws Exception {
-                    return TransferFsImage.downloadImageToStorage(
+              MD5Hash downloadImageDigest =
+                TransferFsImage.downloadImageToStorage(
                         parsedParams.getInfoServer(), txid,
                         nnImage.getStorage(), true);
-                    }
-              });
               nnImage.saveDigestAndRenameCheckpointImage(txid, downloadImageDigest);
               
               // Now that we have a new checkpoint, we might be able to
@@ -176,18 +172,6 @@ public class GetImageServlet extends Htt
           }
           return null;
         }
-        
-        // We may have lost our ticket since the last time we tried to open
-        // an http connection, so log in just in case.
-        private UserGroupInformation reloginIfNecessary() throws IOException {
-          // This method is only called on the NN, therefore it is safe to
-          // use these key values.
-          return UserGroupInformation.loginUserFromKeytabAndReturnUGI(
-                  SecurityUtil.getServerPrincipal(conf
-                      .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
-                      NameNode.getAddress(conf).getHostName()),
-              conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
-        }       
       });
       
     } catch (Throwable t) {
@@ -234,18 +218,10 @@ public class GetImageServlet extends Htt
 
     validRequestors.add(
         SecurityUtil.getServerPrincipal(conf
-            .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), NameNode
-            .getAddress(conf).getHostName()));
-    validRequestors.add(
-        SecurityUtil.getServerPrincipal(conf
             .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), NameNode
             .getAddress(conf).getHostName()));
     validRequestors.add(
         SecurityUtil.getServerPrincipal(conf
-            .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
-            SecondaryNameNode.getHttpAddress(conf).getHostName()));
-    validRequestors.add(
-        SecurityUtil.getServerPrincipal(conf
             .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY),
             SecondaryNameNode.getHttpAddress(conf).getHostName()));
 
@@ -253,21 +229,17 @@ public class GetImageServlet extends Htt
       Configuration otherNnConf = HAUtil.getConfForOtherNode(conf);
       validRequestors.add(
           SecurityUtil.getServerPrincipal(otherNnConf
-              .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
-              NameNode.getAddress(otherNnConf).getHostName()));
-      validRequestors.add(
-          SecurityUtil.getServerPrincipal(otherNnConf
               .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
               NameNode.getAddress(otherNnConf).getHostName()));
     }
 
     for(String v : validRequestors) {
       if(v != null && v.equals(remoteUser)) {
-        if(LOG.isDebugEnabled()) LOG.debug("isValidRequestor is allowing: " + remoteUser);
+        if(LOG.isInfoEnabled()) LOG.info("GetImageServlet allowing: " + remoteUser);
         return true;
       }
     }
-    if(LOG.isDebugEnabled()) LOG.debug("isValidRequestor is rejecting: " + remoteUser);
+    if(LOG.isInfoEnabled()) LOG.info("GetImageServlet rejecting: " + remoteUser);
     return false;
   }
   

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Sat May 12 20:52:34 2012
@@ -30,13 +30,15 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.primitives.SignedBytes;
+
 /**
  * We keep an in-memory representation of the file/block hierarchy.
  * This is a base INode class containing common fields for file and 
  * directory inodes.
  */
 @InterfaceAudience.Private
-abstract class INode implements Comparable<byte[]>, FSInodeInfo {
+abstract class INode implements Comparable<byte[]> {
   /*
    *  The inode name is in java UTF8 encoding; 
    *  The name in HdfsFileStatus should keep the same encoding as this.
@@ -143,8 +145,7 @@ abstract class INode implements Comparab
   protected PermissionStatus getPermissionStatus() {
     return new PermissionStatus(getUserName(),getGroupName(),getFsPermission());
   }
-  private synchronized void updatePermissionStatus(
-      PermissionStatusFormat f, long n) {
+  private void updatePermissionStatus(PermissionStatusFormat f, long n) {
     permission = f.combine(n, permission);
   }
   /** Get user name */
@@ -263,7 +264,6 @@ abstract class INode implements Comparab
     this.name = name;
   }
 
-  @Override
   public String getFullPathName() {
     // Get the full path name of this inode.
     return FSDirectory.getFullPathName(this);
@@ -400,48 +400,30 @@ abstract class INode implements Comparab
     }
   }
 
-  //
-  // Comparable interface
-  //
-  public int compareTo(byte[] o) {
-    return compareBytes(name, o);
+  private static final byte[] EMPTY_BYTES = {};
+
+  @Override
+  public final int compareTo(byte[] bytes) {
+    final byte[] left = name == null? EMPTY_BYTES: name;
+    final byte[] right = bytes == null? EMPTY_BYTES: bytes;
+    return SignedBytes.lexicographicalComparator().compare(left, right);
   }
 
-  public boolean equals(Object o) {
-    if (!(o instanceof INode)) {
+  @Override
+  public final boolean equals(Object that) {
+    if (this == that) {
+      return true;
+    }
+    if (that == null || !(that instanceof INode)) {
       return false;
     }
-    return Arrays.equals(this.name, ((INode)o).name);
+    return Arrays.equals(this.name, ((INode)that).name);
   }
 
-  public int hashCode() {
+  @Override
+  public final int hashCode() {
     return Arrays.hashCode(this.name);
   }
-
-  //
-  // static methods
-  //
-  /**
-   * Compare two byte arrays.
-   * 
-   * @return a negative integer, zero, or a positive integer 
-   * as defined by {@link #compareTo(byte[])}.
-   */
-  static int compareBytes(byte[] a1, byte[] a2) {
-    if (a1==a2)
-        return 0;
-    int len1 = (a1==null ? 0 : a1.length);
-    int len2 = (a2==null ? 0 : a2.length);
-    int n = Math.min(len1, len2);
-    byte b1, b2;
-    for (int i=0; i<n; i++) {
-      b1 = a1[i];
-      b2 = a2[i];
-      if (b1 != b2)
-        return b1 - b2;
-    }
-    return len1 - len2;
-  }
   
   /**
    * Create an INode; the inode's name is not set yet

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Sat May 12 20:52:34 2012
@@ -173,9 +173,9 @@ class INodeDirectory extends INode {
    */
   int getExistingPathINodes(byte[][] components, INode[] existing, 
       boolean resolveLink) throws UnresolvedLinkException {
-    assert compareBytes(this.name, components[0]) == 0 :
-      "Incorrect name " + getLocalName() + " expected " + 
-      DFSUtil.bytes2String(components[0]);
+    assert this.compareTo(components[0]) == 0 :
+        "Incorrect name " + getLocalName() + " expected "
+        + (components[0] == null? null: DFSUtil.bytes2String(components[0]));
 
     INode curNode = this;
     int count = 0;
@@ -317,8 +317,7 @@ class INodeDirectory extends INode {
                               INode newNode,
                               INodeDirectory parent,
                               boolean propagateModTime
-                              ) throws FileNotFoundException, 
-                                       UnresolvedLinkException {
+                              ) throws FileNotFoundException {
     // insert into the parent children list
     newNode.name = localname;
     if(parent.addChild(newNode, propagateModTime) == null)

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Sat May 12 20:52:34 2012
@@ -20,15 +20,18 @@ package org.apache.hadoop.hdfs.server.na
 import java.io.IOException;
 import java.util.List;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 
 /** I-node for closed file. */
-public class INodeFile extends INode {
+@InterfaceAudience.Private
+public class INodeFile extends INode implements BlockCollection {
   static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
 
   //Number of bits for Block size
@@ -128,7 +131,7 @@ public class INodeFile extends INode {
     }
     
     for(BlockInfo bi: newlist) {
-      bi.setINode(this);
+      bi.setBlockCollection(this);
     }
     this.blocks = newlist;
   }
@@ -161,12 +164,18 @@ public class INodeFile extends INode {
     if(blocks != null && v != null) {
       for (BlockInfo blk : blocks) {
         v.add(blk);
-        blk.setINode(null);
+        blk.setBlockCollection(null);
       }
     }
     blocks = null;
     return 1;
   }
+  
+  public String getName() {
+    // Get the full path name of this inode.
+    return getFullPathName();
+  }
+
 
   @Override
   long[] computeContentSummary(long[] summary) {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Sat May 12 20:52:34 2012
@@ -25,13 +25,15 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
 
 import com.google.common.base.Joiner;
 
 /**
  * I-node for file being written.
  */
-public class INodeFileUnderConstruction extends INodeFile {
+public class INodeFileUnderConstruction extends INodeFile 
+                                        implements MutableBlockCollection {
   private  String clientName;         // lease holder
   private final String clientMachine;
   private final DatanodeDescriptor clientNode; // if client is a cluster node too.
@@ -154,7 +156,7 @@ public class INodeFileUnderConstruction 
     BlockInfoUnderConstruction ucBlock =
       lastBlock.convertToBlockUnderConstruction(
           BlockUCState.UNDER_CONSTRUCTION, targets);
-    ucBlock.setINode(this);
+    ucBlock.setBlockCollection(this);
     setBlock(numBlocks()-1, ucBlock);
     return ucBlock;
   }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Sat May 12 20:52:34 2012
@@ -174,10 +174,8 @@ public class NameNode {
     DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
     DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
     DFS_NAMENODE_HTTP_ADDRESS_KEY,
-    DFS_NAMENODE_HTTPS_ADDRESS_KEY,
     DFS_NAMENODE_KEYTAB_FILE_KEY,
     DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
-    DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY,
     DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
     DFS_NAMENODE_BACKUP_ADDRESS_KEY,
     DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
@@ -229,6 +227,7 @@ public class NameNode {
   private final boolean haEnabled;
   private final HAContext haContext;
   protected boolean allowStaleStandbyReads;
+  private Runtime runtime = Runtime.getRuntime();
 
   
   /** httpServer */
@@ -382,8 +381,9 @@ public class NameNode {
   }
   
   protected void setHttpServerAddress(Configuration conf) {
-    conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,
-        NetUtils.getHostPortString(getHttpAddress()));
+    String hostPort = NetUtils.getHostPortString(getHttpAddress());
+    conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, hostPort);
+    LOG.info("Web-server up at: " + hostPort);
   }
 
   protected void loadNamesystem(Configuration conf) throws IOException {
@@ -503,11 +503,16 @@ public class NameNode {
   }
   
   private void startTrashEmptier(Configuration conf) throws IOException {
-    long trashInterval 
-      = conf.getLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, 
-                     CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
-    if(trashInterval == 0)
+    long trashInterval = conf.getLong(
+        CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY,
+        CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
+    if (trashInterval == 0) {
       return;
+    } else if (trashInterval < 0) {
+      throw new IOException("Cannot start tresh emptier with negative interval."
+          + " Set " + CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY + " to a"
+          + " positive value.");
+    }
     this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
     this.emptier.setDaemon(true);
     this.emptier.start();
@@ -1151,23 +1156,21 @@ public class NameNode {
    */
   public static void initializeGenericKeys(Configuration conf,
       String nameserviceId, String namenodeId) {
-    if ((nameserviceId == null || nameserviceId.isEmpty()) && 
-        (namenodeId == null || namenodeId.isEmpty())) {
-      return;
-    }
-    
-    if (nameserviceId != null) {
-      conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
-    }
-    if (namenodeId != null) {
-      conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId);
+    if ((nameserviceId != null && !nameserviceId.isEmpty()) || 
+        (namenodeId != null && !namenodeId.isEmpty())) {
+      if (nameserviceId != null) {
+        conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
+      }
+      if (namenodeId != null) {
+        conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId);
+      }
+      
+      DFSUtil.setGenericConf(conf, nameserviceId, namenodeId,
+          NAMENODE_SPECIFIC_KEYS);
+      DFSUtil.setGenericConf(conf, nameserviceId, null,
+          NAMESERVICE_SPECIFIC_KEYS);
     }
     
-    DFSUtil.setGenericConf(conf, nameserviceId, namenodeId,
-        NAMENODE_SPECIFIC_KEYS);
-    DFSUtil.setGenericConf(conf, nameserviceId, null,
-        NAMESERVICE_SPECIFIC_KEYS);
-    
     if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
       URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
           + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
@@ -1262,14 +1265,37 @@ public class NameNode {
     }
     return state.getServiceState();
   }
+  
+  @VisibleForTesting
+  public synchronized void setRuntimeForTesting(Runtime runtime) {
+    this.runtime = runtime;
+  }
 
   /**
-   * Class used as expose {@link NameNode} as context to {@link HAState}
+   * Shutdown the NN immediately in an ungraceful way. Used when it would be
+   * unsafe for the NN to continue operating, e.g. during a failed HA state
+   * transition.
    * 
-   * TODO(HA):
-   * When entering and exiting state, on failing to start services,
-   * appropriate action is needed todo either shutdown the node or recover
-   * from failure.
+   * @param t exception which warrants the shutdown. Printed to the NN log
+   *          before exit.
+   * @throws ServiceFailedException thrown only for testing.
+   */
+  private synchronized void doImmediateShutdown(Throwable t)
+      throws ServiceFailedException {
+    String message = "Error encountered requiring NN shutdown. " +
+        "Shutting down immediately.";
+    try {
+      LOG.fatal(message, t);
+    } catch (Throwable ignored) {
+      // This is unlikely to happen, but there's nothing we can do if it does.
+    }
+    runtime.exit(1);
+    // This code is only reached during testing, when runtime is stubbed out.
+    throw new ServiceFailedException(message, t);
+  }
+  
+  /**
+   * Class used to expose {@link NameNode} as context to {@link HAState}
    */
   protected class NameNodeHAContext implements HAContext {
     @Override
@@ -1284,32 +1310,52 @@ public class NameNode {
 
     @Override
     public void startActiveServices() throws IOException {
-      namesystem.startActiveServices();
-      startTrashEmptier(conf);
+      try {
+        namesystem.startActiveServices();
+        startTrashEmptier(conf);
+      } catch (Throwable t) {
+        doImmediateShutdown(t);
+      }
     }
 
     @Override
     public void stopActiveServices() throws IOException {
-      if (namesystem != null) {
-        namesystem.stopActiveServices();
+      try {
+        if (namesystem != null) {
+          namesystem.stopActiveServices();
+        }
+        stopTrashEmptier();
+      } catch (Throwable t) {
+        doImmediateShutdown(t);
       }
-      stopTrashEmptier();
     }
 
     @Override
     public void startStandbyServices() throws IOException {
-      namesystem.startStandbyServices(conf);
+      try {
+        namesystem.startStandbyServices(conf);
+      } catch (Throwable t) {
+        doImmediateShutdown(t);
+      }
     }
 
     @Override
     public void prepareToStopStandbyServices() throws ServiceFailedException {
-      namesystem.prepareToStopStandbyServices();
+      try {
+        namesystem.prepareToStopStandbyServices();
+      } catch (Throwable t) {
+        doImmediateShutdown(t);
+      }
     }
     
     @Override
     public void stopStandbyServices() throws IOException {
-      if (namesystem != null) {
-        namesystem.stopStandbyServices();
+      try {
+        if (namesystem != null) {
+          namesystem.stopStandbyServices();
+        }
+      } catch (Throwable t) {
+        doImmediateShutdown(t);
       }
     }
     

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java Sat May 12 20:52:34 2012
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT;
@@ -43,6 +44,7 @@ import org.apache.hadoop.http.HttpServer
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
 
 /**
@@ -78,127 +80,101 @@ public class NameNodeHttpServer {
         conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
         nn.getNameNodeAddress().getHostName());
   }
-  
+
   public void start() throws IOException {
     final String infoHost = bindAddress.getHostName();
-    
-    if(UserGroupInformation.isSecurityEnabled()) {
-      String httpsUser = SecurityUtil.getServerPrincipal(conf
-          .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), infoHost);
-      if (httpsUser == null) {
-        LOG.warn(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY
-            + " not defined in config. Starting http server as "
-            + getDefaultServerPrincipal()
-            + ": Kerberized SSL may be not function correctly.");
-      } else {
-        // Kerberized SSL servers must be run from the host principal...
-        LOG.info("Logging in as " + httpsUser + " to start http server.");
-        SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
-            DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY, infoHost);
-      }
-    }
+    int infoPort = bindAddress.getPort();
 
-    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
-    try {
-      this.httpServer = ugi.doAs(new PrivilegedExceptionAction<HttpServer>() {
-        @Override
-        public HttpServer run() throws IOException, InterruptedException {
-          int infoPort = bindAddress.getPort();
-          httpServer = new HttpServer("hdfs", infoHost, infoPort,
-              infoPort == 0, conf, 
-              new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " "))) {
-            {
-              if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
-                //add SPNEGO authentication filter for webhdfs
-                final String name = "SPNEGO";
-                final String classname =  AuthFilter.class.getName();
-                final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
-                Map<String, String> params = getAuthFilterParams(conf);
-                defineFilter(webAppContext, name, classname, params,
-                    new String[]{pathSpec});
-                LOG.info("Added filter '" + name + "' (class=" + classname + ")");
-
-                // add webhdfs packages
-                addJerseyResourcePackage(
-                    NamenodeWebHdfsMethods.class.getPackage().getName()
-                    + ";" + Param.class.getPackage().getName(), pathSpec);
-              }
+    httpServer = new HttpServer("hdfs", infoHost, infoPort,
+                                infoPort == 0, conf,
+                                new AccessControlList(conf.get(DFS_ADMIN, " "))) {
+      {
+        // Add SPNEGO support to NameNode
+        if (UserGroupInformation.isSecurityEnabled()) {
+          Map<String, String> params = new HashMap<String, String>();
+          String principalInConf = conf.get(
+            DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY);
+          if (principalInConf != null && !principalInConf.isEmpty()) {
+            params.put("kerberos.principal",
+                       SecurityUtil.getServerPrincipal(principalInConf, infoHost));
+            String httpKeytab = conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
+            if (httpKeytab != null && !httpKeytab.isEmpty()) {
+              params.put("kerberos.keytab", httpKeytab);
             }
 
-            private Map<String, String> getAuthFilterParams(Configuration conf)
-                throws IOException {
-              Map<String, String> params = new HashMap<String, String>();
-              String principalInConf = conf
-                  .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
-              if (principalInConf != null && !principalInConf.isEmpty()) {
-                params
-                    .put(
-                        DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
-                        SecurityUtil.getServerPrincipal(principalInConf,
-                            infoHost));
-              }
-              String httpKeytab = conf
-                  .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
-              if (httpKeytab != null && !httpKeytab.isEmpty()) {
-                params.put(
-                    DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
-                    httpKeytab);
-              }
-              return params;
-            }
-          };
+            params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
 
-          boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
-          boolean useKrb = UserGroupInformation.isSecurityEnabled();
-          if (certSSL || useKrb) {
-            boolean needClientAuth = conf.getBoolean(
-                DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
-                DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
-            InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf
-                .get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
-                    DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
-            Configuration sslConf = new HdfsConfiguration(false);
-            if (certSSL) {
-              sslConf.addResource(conf.get(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
-                  DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
-            }
-            httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth,
-                useKrb);
-            // assume same ssl port for all datanodes
-            InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(
-                conf.get(DFS_DATANODE_HTTPS_ADDRESS_KEY,
-                    infoHost + ":" + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT));
-            httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY,
-                datanodeSslPort.getPort());
+            defineFilter(webAppContext, SPNEGO_FILTER,
+                         AuthenticationFilter.class.getName(), params, null);
           }
-          httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
-          httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY,
-              nn.getNameNodeAddress());
-          httpServer.setAttribute(FSIMAGE_ATTRIBUTE_KEY, nn.getFSImage());
-          httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
-          setupServlets(httpServer, conf);
-          httpServer.start();
-
-          // The web-server port can be ephemeral... ensure we have the correct
-          // info
-          infoPort = httpServer.getPort();
-          httpAddress = new InetSocketAddress(infoHost, infoPort);
-          LOG.info(nn.getRole() + " Web-server up at: " + httpAddress);
-          return httpServer;
         }
-      });
-    } catch (InterruptedException e) {
-      throw new IOException(e);
-    } finally {
-      if(UserGroupInformation.isSecurityEnabled() && 
-          conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY) != null) {
-        // Go back to being the correct Namenode principal
-        LOG.info("Logging back in as NameNode user following http server start");
-        nn.loginAsNameNodeUser(conf);
+        if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
+          //add SPNEGO authentication filter for webhdfs
+          final String name = "SPNEGO";
+          final String classname = AuthFilter.class.getName();
+          final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
+          Map<String, String> params = getAuthFilterParams(conf);
+          defineFilter(webAppContext, name, classname, params,
+                       new String[]{pathSpec});
+          LOG.info("Added filter '" + name + "' (class=" + classname + ")");
+
+          // add webhdfs packages
+          addJerseyResourcePackage(
+            NamenodeWebHdfsMethods.class.getPackage().getName()
+            + ";" + Param.class.getPackage().getName(), pathSpec);
+        }
+      }
+
+      private Map<String, String> getAuthFilterParams(Configuration conf)
+        throws IOException {
+        Map<String, String> params = new HashMap<String, String>();
+        String principalInConf = conf
+          .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
+        if (principalInConf != null && !principalInConf.isEmpty()) {
+          params
+            .put(
+              DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+              SecurityUtil.getServerPrincipal(principalInConf,
+                                              bindAddress.getHostName()));
+        }
+        String httpKeytab = conf
+          .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
+        if (httpKeytab != null && !httpKeytab.isEmpty()) {
+          params.put(
+            DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
+            httpKeytab);
+        }
+        return params;
+      }
+    };
+
+    boolean certSSL = conf.getBoolean("dfs.https.enable", false);
+    if (certSSL) {
+      boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
+      InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(infoHost + ":" + conf.get(
+        "dfs.https.port", infoHost + ":" + 0));
+      Configuration sslConf = new Configuration(false);
+      if (certSSL) {
+        sslConf.addResource(conf.get("dfs.https.server.keystore.resource",
+                                     "ssl-server.xml"));
       }
+      httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
+      // assume same ssl port for all datanodes
+      InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
+        "dfs.datanode.https.address", infoHost + ":" + 50475));
+      httpServer.setAttribute("datanode.https.port", datanodeSslPort
+        .getPort());
     }
+    httpServer.setAttribute("name.node", nn);
+    httpServer.setAttribute("name.node.address", bindAddress);
+    httpServer.setAttribute("name.system.image", nn.getFSImage());
+    httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
+    setupServlets(httpServer, conf);
+    httpServer.start();
+    httpAddress = new InetSocketAddress(bindAddress.getAddress(), httpServer.getPort());
   }
-  
+
+
   public void stop() throws Exception {
     if (httpServer != null) {
       httpServer.stop();

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Sat May 12 20:52:34 2012
@@ -734,7 +734,7 @@ class NamenodeJspHelper {
         this.inode = null;
       } else {
         this.block = new Block(blockId);
-        this.inode = blockManager.getINode(block);
+        this.inode = (INodeFile) blockManager.getBlockCollection(block);
       }
     }
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Sat May 12 20:52:34 2012
@@ -25,8 +25,10 @@ import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.Date;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
@@ -44,6 +46,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.NameNodeProxies;
@@ -63,9 +66,9 @@ import org.apache.hadoop.ipc.RemoteExcep
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
 
 import org.apache.hadoop.util.Daemon;
@@ -108,7 +111,6 @@ public class SecondaryNameNode implement
   private volatile boolean shouldRun;
   private HttpServer infoServer;
   private int infoPort;
-  private int imagePort;
   private String infoBindAddress;
 
   private Collection<URI> checkpointDirs;
@@ -229,63 +231,47 @@ public class SecondaryNameNode implement
 
     // Initialize other scheduling parameters from the configuration
     checkpointConf = new CheckpointConf(conf);
-    
+
     // initialize the webserver for uploading files.
-    // Kerberized SSL servers must be run from the host principal...
-    UserGroupInformation httpUGI = 
-      UserGroupInformation.loginUserFromKeytabAndReturnUGI(
-          SecurityUtil.getServerPrincipal(conf
-              .get(DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
-              infoBindAddress),
-          conf.get(DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
-    try {
-      infoServer = httpUGI.doAs(new PrivilegedExceptionAction<HttpServer>() {
-        @Override
-        public HttpServer run() throws IOException, InterruptedException {
-          LOG.info("Starting web server as: " +
-              UserGroupInformation.getCurrentUser().getUserName());
-
-          int tmpInfoPort = infoSocAddr.getPort();
-          infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
-              tmpInfoPort == 0, conf, 
-              new AccessControlList(conf.get(DFS_ADMIN, " ")));
-          
-          if(UserGroupInformation.isSecurityEnabled()) {
-            SecurityUtil.initKrb5CipherSuites();
-            InetSocketAddress secInfoSocAddr = 
-              NetUtils.createSocketAddr(infoBindAddress + ":"+ conf.getInt(
-                DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY,
-                DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT));
-            imagePort = secInfoSocAddr.getPort();
-            infoServer.addSslListener(secInfoSocAddr, conf, false, true);
+    int tmpInfoPort = infoSocAddr.getPort();
+    infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
+                                tmpInfoPort == 0, conf,
+                                new AccessControlList(conf.get(DFS_ADMIN, " "))) {
+      {
+        if (UserGroupInformation.isSecurityEnabled()) {
+          Map<String, String> params = new HashMap<String, String>();
+          String principalInConf = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY);
+          if (principalInConf != null && !principalInConf.isEmpty()) {
+            params.put("kerberos.principal",
+                       SecurityUtil.getServerPrincipal(principalInConf, infoSocAddr.getHostName()));
           }
-          
-          infoServer.setAttribute("secondary.name.node", SecondaryNameNode.this);
-          infoServer.setAttribute("name.system.image", checkpointImage);
-          infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
-          infoServer.addInternalServlet("getimage", "/getimage",
-              GetImageServlet.class, true);
-          infoServer.start();
-          return infoServer;
+          String httpKeytab = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
+          if (httpKeytab != null && !httpKeytab.isEmpty()) {
+            params.put("kerberos.keytab", httpKeytab);
+          }
+          params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
+
+          defineFilter(webAppContext, SPNEGO_FILTER, AuthenticationFilter.class.getName(),
+                       params, null);
         }
-      });
-    } catch (InterruptedException e) {
-      throw new RuntimeException(e);
-    } 
-    
+      }
+    };
+    infoServer.setAttribute("secondary.name.node", this);
+    infoServer.setAttribute("name.system.image", checkpointImage);
+    infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
+    infoServer.addInternalServlet("getimage", "/getimage",
+                                  GetImageServlet.class, true);
+    infoServer.start();
+
     LOG.info("Web server init done");
 
     // The web-server port can be ephemeral... ensure we have the correct info
     infoPort = infoServer.getPort();
-    if (!UserGroupInformation.isSecurityEnabled()) {
-      imagePort = infoPort;
-    }
-    
-    conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort); 
-    LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
-    LOG.info("Secondary image servlet up at: " + infoBindAddress + ":" + imagePort);
+
+    conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort);
+    LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
     LOG.info("Checkpoint Period   :" + checkpointConf.getPeriod() + " secs " +
-             "(" + checkpointConf.getPeriod()/60 + " min)");
+             "(" + checkpointConf.getPeriod() / 60 + " min)");
     LOG.info("Log Size Trigger    :" + checkpointConf.getTxnCount() + " txns");
   }
 
@@ -434,7 +420,7 @@ public class SecondaryNameNode implement
       throw new IOException("This is not a DFS");
     }
 
-    String configuredAddress = DFSUtil.getInfoServer(null, conf, true);
+    String configuredAddress = DFSUtil.getInfoServer(null, conf, false);
     String address = DFSUtil.substituteForWildcardAddress(configuredAddress,
         fsName.getHost());
     LOG.debug("Will connect to NameNode at HTTP address: " + address);
@@ -446,7 +432,7 @@ public class SecondaryNameNode implement
    * for image transfers
    */
   private InetSocketAddress getImageListenAddress() {
-    return new InetSocketAddress(infoBindAddress, imagePort);
+    return new InetSocketAddress(infoBindAddress, infoPort);
   }
 
   /**
@@ -507,7 +493,7 @@ public class SecondaryNameNode implement
   
   
   /**
-   * @param argv The parameters passed to this program.
+   * @param opts The parameters passed to this program.
    * @exception Exception if the filesystem does not exist.
    * @return 0 on success, non zero on error.
    */
@@ -709,7 +695,7 @@ public class SecondaryNameNode implement
      * Construct a checkpoint image.
      * @param conf Node configuration.
      * @param imageDirs URIs of storage for image.
-     * @param editDirs URIs of storage for edit logs.
+     * @param editsDirs URIs of storage for edit logs.
      * @throws IOException If storage cannot be access.
      */
     CheckpointStorage(Configuration conf, 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java Sat May 12 20:52:34 2012
@@ -201,19 +201,17 @@ public class TransferFsImage {
       String queryString, List<File> localPaths,
       NNStorage dstStorage, boolean getChecksum) throws IOException {
     byte[] buf = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE];
-    String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://";
-    StringBuilder str = new StringBuilder(proto+nnHostPort+"/getimage?");
-    str.append(queryString);
 
+    String str = "http://" + nnHostPort + "/getimage?" + queryString;
+    LOG.info("Opening connection to " + str);
     //
     // open connection to remote server
     //
-    URL url = new URL(str.toString());
-    
-    // Avoid Krb bug with cross-realm hosts
-    SecurityUtil.fetchServiceTicket(url);
-    HttpURLConnection connection = (HttpURLConnection) url.openConnection();
-    
+    URL url = new URL(str);
+
+    HttpURLConnection connection = (HttpURLConnection)
+      SecurityUtil.openSecureHttpConnection(url);
+
     if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
       throw new HttpGetFailedException(
           "Image transfer servlet at " + url +

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java Sat May 12 20:52:34 2012
@@ -97,7 +97,6 @@ public class BootstrapStandby implements
   static final int ERR_CODE_LOGS_UNAVAILABLE = 6; 
 
   public int run(String[] args) throws Exception {
-    SecurityUtil.initKrb5CipherSuites();
     parseArgs(args);
     parseConfAndFindOtherNN();
     NameNode.checkAllowFormat(conf);
@@ -325,7 +324,7 @@ public class BootstrapStandby implements
         "Could not determine valid IPC address for other NameNode (%s)" +
         ", got: %s", otherNNId, otherIpcAddr);
 
-    otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, true);
+    otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, false);
     otherHttpAddr = DFSUtil.substituteForWildcardAddress(otherHttpAddr,
         otherIpcAddr.getHostName());
     

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java Sat May 12 20:52:34 2012
@@ -92,7 +92,7 @@ public class StandbyCheckpointer {
   }
   
   private String getHttpAddress(Configuration conf) {
-    String configuredAddr = DFSUtil.getInfoServer(null, conf, true);
+    String configuredAddr = DFSUtil.getInfoServer(null, conf, false);
     
     // Use the hostname from the RPC address as a default, in case
     // the HTTP address is configured to 0.0.0.0.

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java Sat May 12 20:52:34 2012
@@ -17,22 +17,16 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.hadoop.io.Writable;
-
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
 
 /**
  * An enumeration of logs available on a remote NameNode.
  */
-public class RemoteEditLogManifest implements Writable {
+public class RemoteEditLogManifest {
 
   private List<RemoteEditLog> logs;
   
@@ -75,25 +69,4 @@ public class RemoteEditLogManifest imple
   public String toString() {
     return "[" + Joiner.on(", ").join(logs) + "]";
   }
-  
-  
-  @Override
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(logs.size());
-    for (RemoteEditLog log : logs) {
-      log.write(out);
-    }
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    int numLogs = in.readInt();
-    logs = Lists.newArrayList();
-    for (int i = 0; i < numLogs; i++) {
-      RemoteEditLog log = new RemoteEditLog();
-      log.readFields(in);
-      logs.add(log);
-    }
-    checkState();
-  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Sat May 12 20:52:34 2012
@@ -26,6 +26,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.TreeSet;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -64,9 +66,11 @@ import org.apache.hadoop.util.ToolRunner
 @InterfaceAudience.Private
 public class DFSAdmin extends FsShell {
 
-  static{
+  static {
     HdfsConfiguration.init();
   }
+  
+  private static final Log LOG = LogFactory.getLog(DFSAdmin.class);
 
   /**
    * An abstract class for the execution of a file system command
@@ -504,7 +508,7 @@ public class DFSAdmin extends FsShell {
    */
   public int fetchImage(String[] argv, int idx) throws IOException {
     String infoServer = DFSUtil.getInfoServer(
-        HAUtil.getAddressOfActive(getDFS()), getConf(), true);
+        HAUtil.getAddressOfActive(getDFS()), getConf(), false);
     TransferFsImage.downloadMostRecentImageToDirectory(infoServer,
         new File(argv[idx]));
     return 0;
@@ -1089,6 +1093,7 @@ public class DFSAdmin extends FsShell {
       return exitCode;
     }
 
+    Exception debugException = null;
     exitCode = 0;
     try {
       if ("-report".equals(cmd)) {
@@ -1143,6 +1148,7 @@ public class DFSAdmin extends FsShell {
         printUsage("");
       }
     } catch (IllegalArgumentException arge) {
+      debugException = arge;
       exitCode = -1;
       System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
       printUsage(cmd);
@@ -1151,6 +1157,7 @@ public class DFSAdmin extends FsShell {
       // This is a error returned by hadoop server. Print
       // out the first line of the error message, ignore the stack trace.
       exitCode = -1;
+      debugException = e;
       try {
         String[] content;
         content = e.getLocalizedMessage().split("\n");
@@ -1159,12 +1166,17 @@ public class DFSAdmin extends FsShell {
       } catch (Exception ex) {
         System.err.println(cmd.substring(1) + ": "
                            + ex.getLocalizedMessage());
+        debugException = ex;
       }
     } catch (Exception e) {
       exitCode = -1;
+      debugException = e;
       System.err.println(cmd.substring(1) + ": "
                          + e.getLocalizedMessage());
-    } 
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Exception encountered:", debugException);
+    }
     return exitCode;
   }
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java Sat May 12 20:52:34 2012
@@ -153,8 +153,7 @@ public class DFSck extends Configured im
         url.append("&startblockafter=").append(String.valueOf(cookie));
       }
       URL path = new URL(url.toString());
-      SecurityUtil.fetchServiceTicket(path);
-      URLConnection connection = path.openConnection();
+      URLConnection connection = SecurityUtil.openSecureHttpConnection(path);
       InputStream stream = connection.getInputStream();
       BufferedReader input = new BufferedReader(new InputStreamReader(
           stream, "UTF-8"));
@@ -222,16 +221,11 @@ public class DFSck extends Configured im
       return null;
     }
     
-    return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, true);
+    return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, false);
   }
 
   private int doWork(final String[] args) throws IOException {
-    String proto = "http://";
-    if (UserGroupInformation.isSecurityEnabled()) {
-      SecurityUtil.initKrb5CipherSuites();
-      proto = "https://";
-    }
-    final StringBuilder url = new StringBuilder(proto);
+    final StringBuilder url = new StringBuilder("http://");
     
     String namenodeAddress = getCurrentNamenodeAddress();
     if (namenodeAddress == null) {
@@ -279,8 +273,7 @@ public class DFSck extends Configured im
       return listCorruptFileBlocks(dir, url.toString());
     }
     URL path = new URL(url.toString());
-    SecurityUtil.fetchServiceTicket(path);
-    URLConnection connection = path.openConnection();
+    URLConnection connection = SecurityUtil.openSecureHttpConnection(path);
     InputStream stream = connection.getInputStream();
     BufferedReader input = new BufferedReader(new InputStreamReader(
                                               stream, "UTF-8"));

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Sat May 12 20:52:34 2012
@@ -72,11 +72,6 @@ public class DelegationTokenFetcher {
   private static final String RENEW = "renew";
   private static final String PRINT = "print";
 
-  static {
-    // Enable Kerberos sockets
-    System.setProperty("https.cipherSuites", "TLS_KRB5_WITH_3DES_EDE_CBC_SHA");
-  }
-
   private static void printUsage(PrintStream err) throws IOException {
     err.println("fetchdt retrieves delegation tokens from the NameNode");
     err.println();
@@ -106,7 +101,7 @@ public class DelegationTokenFetcher {
     final Configuration conf = new HdfsConfiguration();
     Options fetcherOptions = new Options();
     fetcherOptions.addOption(WEBSERVICE, true,
-        "HTTPS url to reach the NameNode at");
+        "HTTP url to reach the NameNode at");
     fetcherOptions.addOption(RENEWER, true,
         "Name of the delegation token renewer");
     fetcherOptions.addOption(CANCEL, false, "cancel the token");
@@ -224,8 +219,7 @@ public class DelegationTokenFetcher {
       }
       
       URL remoteURL = new URL(url.toString());
-      SecurityUtil.fetchServiceTicket(remoteURL);
-      URLConnection connection = URLUtils.openConnection(remoteURL);
+      URLConnection connection = SecurityUtil.openSecureHttpConnection(remoteURL);
       InputStream in = connection.getInputStream();
       Credentials ts = new Credentials();
       dis = new DataInputStream(in);
@@ -264,7 +258,7 @@ public class DelegationTokenFetcher {
     
     try {
       URL url = new URL(buf.toString());
-      SecurityUtil.fetchServiceTicket(url);
+      connection = (HttpURLConnection) SecurityUtil.openSecureHttpConnection(url);
       connection = (HttpURLConnection)URLUtils.openConnection(url);
       if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
         throw new IOException("Error renewing token: " + 
@@ -358,8 +352,7 @@ public class DelegationTokenFetcher {
     HttpURLConnection connection=null;
     try {
       URL url = new URL(buf.toString());
-      SecurityUtil.fetchServiceTicket(url);
-      connection = (HttpURLConnection)URLUtils.openConnection(url);
+      connection = (HttpURLConnection) SecurityUtil.openSecureHttpConnection(url);
       if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
         throw new IOException("Error cancelling token: " + 
             connection.getResponseMessage());

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Sat May 12 20:52:34 2012
@@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1333291-1337618

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Sat May 12 20:52:34 2012
@@ -868,4 +868,15 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.namenode.kerberos.internal.spnego.principal</name>
+  <value>${dfs.web.authentication.kerberos.principal}</value>
+</property>
+
+<property>
+  <name>dfs.secondary.namenode.kerberos.internal.spnego.principal</name>
+  <value>${dfs.web.authentication.kerberos.principal}</value>
+</property>
+
+
 </configuration>

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1333291-1337618

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1333291-1337618

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1333291-1337618

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1333291-1337618

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java Sat May 12 20:52:34 2012
@@ -105,17 +105,17 @@ public class TestViewFileSystemHdfs exte
   // additional mount.
   @Override
   int getExpectedDirPaths() {
-    return 7;
+    return 8;
   }
   
   @Override
   int getExpectedMountPoints() {
-    return 8;
+    return 9;
   }
 
   @Override
   int getExpectedDelegationTokenCount() {
-    return 8;
+    return 9;
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java Sat May 12 20:52:34 2012
@@ -20,7 +20,6 @@ package org.apache.hadoop.fs.viewfs;
 
 import java.io.IOException;
 import java.net.URISyntaxException;
-import java.util.List;
 
 import javax.security.auth.login.LoginException;
 
@@ -30,20 +29,13 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-
-import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Test;
-
 
 public class TestViewFsHdfs extends ViewFsBaseTest {
 
   private static MiniDFSCluster cluster;
-  private static Path defaultWorkingDirectory;
   private static HdfsConfiguration CONF = new HdfsConfiguration();
   private static FileContext fc;
   
@@ -57,7 +49,7 @@ public class TestViewFsHdfs extends View
     cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
     cluster.waitClusterUp();
     fc = FileContext.getFileContext(cluster.getURI(0), CONF);
-    defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
+    Path defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
         UserGroupInformation.getCurrentUser().getShortUserName()));
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
   }
@@ -73,25 +65,15 @@ public class TestViewFsHdfs extends View
     // create the test root on local_fs
     fcTarget = fc;
     super.setUp();
-    
   }
-
-  @After
-  public void tearDown() throws Exception {
-    super.tearDown();
-  }
-  
   
-  /*
-   * This overides the default implementation since hdfs does have delegation
+  /**
+   * This overrides the default implementation since hdfs does have delegation
    * tokens.
    */
   @Override
-  @Test
-  public void testGetDelegationTokens() throws IOException {
-    List<Token<?>> delTokens = 
-        fcView.getDelegationTokens(new Path("/"), "sanjay");
-    Assert.assertEquals(7, delTokens.size()); 
+  int getExpectedDelegationTokenCount() {
+    return 8;
   }
  
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Sat May 12 20:52:34 2012
@@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -706,13 +707,59 @@ public class DFSTestUtil {
         .join(nameservices));
   }
   
+  private static DatanodeID getDatanodeID(String ipAddr) {
+    return new DatanodeID(ipAddr, "localhost",
+        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
+  }
+
+  public static DatanodeID getLocalDatanodeID() {
+    return new DatanodeID("127.0.0.1", "localhost",
+        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
+  }
+
+  public static DatanodeID getLocalDatanodeID(int port) {
+    return new DatanodeID("127.0.0.1", "localhost", "",
+        port, port, port);
+  }
+
   public static DatanodeDescriptor getLocalDatanodeDescriptor() {
-    return new DatanodeDescriptor(
-        new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+    return new DatanodeDescriptor(getLocalDatanodeID());
   }
 
   public static DatanodeInfo getLocalDatanodeInfo() {
-    return new DatanodeInfo(
-        new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+    return new DatanodeInfo(getLocalDatanodeID());
+  }
+
+  public static DatanodeInfo getDatanodeInfo(String ipAddr) {
+    return new DatanodeInfo(getDatanodeID(ipAddr));
+  }
+  
+  public static DatanodeInfo getLocalDatanodeInfo(int port) {
+    return new DatanodeInfo(getLocalDatanodeID(port));
+  }
+
+  public static DatanodeInfo getDatanodeInfo(String ipAddr, 
+      String host, int port) {
+    return new DatanodeInfo(new DatanodeID(ipAddr, host, port));
+  }
+
+  public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
+      String hostname, AdminStates adminState) {
+    return new DatanodeInfo(ipAddr, hostname, "storage",
+        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
+        DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
+        1, 2, 3, 4, 5, 6, "local", adminState);
+  }
+
+  public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
+      String rackLocation) {
+    return getDatanodeDescriptor(ipAddr, DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
+        rackLocation);
+  }
+
+  public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
+      int port, String rackLocation) {
+    return new DatanodeDescriptor(new DatanodeID(ipAddr, port), rackLocation);
   }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java Sat May 12 20:52:34 2012
@@ -24,6 +24,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient;
@@ -34,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.io.IOUtils;
 
 import org.apache.hadoop.security.token.Token;
 import org.junit.Test;
@@ -230,6 +232,33 @@ public class TestConnCache {
 
     in.close();
   }
+  
+  /**
+   * Test that the socket cache can be disabled by setting the capacity to
+   * 0. Regression test for HDFS-3365.
+   */
+  @Test
+  public void testDisableCache() throws IOException {
+    LOG.info("Starting testDisableCache()");
+
+    // Reading with the normally configured filesystem should
+    // cache a socket.
+    DFSTestUtil.readFile(fs, testFile);
+    assertEquals(1, ((DistributedFileSystem)fs).dfs.socketCache.size());
+    
+    // Configure a new instance with no caching, ensure that it doesn't
+    // cache anything
+    Configuration confWithoutCache = new Configuration(fs.getConf());
+    confWithoutCache.setInt(
+        DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0);
+    FileSystem fsWithoutCache = FileSystem.newInstance(confWithoutCache);
+    try {
+      DFSTestUtil.readFile(fsWithoutCache, testFile);
+      assertEquals(0, ((DistributedFileSystem)fsWithoutCache).dfs.socketCache.size());
+    } finally {
+      fsWithoutCache.close();
+    }
+  }
 
   @AfterClass
   public static void teardownCluster() throws Exception {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Sat May 12 20:52:34 2012
@@ -333,7 +333,7 @@ public class TestDFSClientRetries extend
       LocatedBlock badLocatedBlock = new LocatedBlock(
         goodLocatedBlock.getBlock(),
         new DatanodeInfo[] {
-          new DatanodeInfo(new DatanodeID("255.255.255.255", 234))
+          DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
         },
         goodLocatedBlock.getStartOffset(),
         false);
@@ -627,8 +627,7 @@ public class TestDFSClientRetries extend
     server.start();
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
-    DatanodeID fakeDnId = new DatanodeID(
-        "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
+    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
     
     ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
     LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Sat May 12 20:52:34 2012
@@ -320,6 +320,25 @@ public class TestDFSUtil {
   }
 
   /**
+   * Ensure that fs.defaultFS is set in the configuration even if neither HA nor
+   * Federation is enabled.
+   * 
+   * Regression test for HDFS-3351.
+   */
+  @Test
+  public void testConfModificationNoFederationOrHa() {
+    final HdfsConfiguration conf = new HdfsConfiguration();
+    String nsId = null;
+    String nnId = null;
+    
+    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1234");
+
+    assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY)));
+    NameNode.initializeGenericKeys(conf, nsId, nnId);
+    assertEquals("hdfs://localhost:1234", conf.get(FS_DEFAULT_NAME_KEY));
+  }
+
+  /**
    * Regression test for HDFS-2934.
    */
   @Test

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Sat May 12 20:52:34 2012
@@ -85,6 +85,7 @@ public class TestDistributedFileSystem {
   /**
    * Tests DFSClient.close throws no ConcurrentModificationException if 
    * multiple files are open.
+   * Also tests that any cached sockets are closed. (HDFS-3359)
    */
   @Test
   public void testDFSClose() throws Exception {
@@ -94,11 +95,23 @@ public class TestDistributedFileSystem {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
       FileSystem fileSys = cluster.getFileSystem();
       
-      // create two files
+      // create two files, leaving them open
       fileSys.create(new Path("/test/dfsclose/file-0"));
       fileSys.create(new Path("/test/dfsclose/file-1"));
+      
+      // create another file, close it, and read it, so
+      // the client gets a socket in its SocketCache
+      Path p = new Path("/non-empty-file");
+      DFSTestUtil.createFile(fileSys, p, 1L, (short)1, 0L);
+      DFSTestUtil.readFile(fileSys, p);
+      
+      DFSClient client = ((DistributedFileSystem)fileSys).dfs;
+      SocketCache cache = client.socketCache;
+      assertEquals(1, cache.size());
 
       fileSys.close();
+      
+      assertEquals(0, cache.size());
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java Sat May 12 20:52:34 2012
@@ -31,12 +31,15 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.ipc.RemoteException;
+import org.junit.Assert;
 import org.junit.Test;
 
 /**
@@ -295,4 +298,43 @@ public class TestFileAppend{
       cluster.shutdown();
     }
   }
+
+  /** Test two consecutive appends on a file with a full block. */
+  @Test
+  public void testAppendTwice() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    final FileSystem fs1 = cluster.getFileSystem();
+    final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
+    try {
+  
+      final Path p = new Path("/testAppendTwice/foo");
+      final int len = 1 << 16;
+      final byte[] fileContents = AppendTestUtil.initBuffer(len);
+
+      {
+        // create a new file with a full block.
+        FSDataOutputStream out = fs2.create(p, true, 4096, (short)1, len);
+        out.write(fileContents, 0, len);
+        out.close();
+      }
+  
+      //1st append does not add any data so that the last block remains full
+      //and the last block in INodeFileUnderConstruction is a BlockInfo
+      //but not BlockInfoUnderConstruction. 
+      fs2.append(p);
+      
+      //2nd append should get AlreadyBeingCreatedException
+      fs1.append(p);
+      Assert.fail();
+    } catch(RemoteException re) {
+      AppendTestUtil.LOG.info("Got an exception:", re);
+      Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
+          re.getClassName());
+    } finally {
+      fs2.close();
+      fs1.close();
+      cluster.shutdown();
+    }
+  }
 }



Mime
View raw message