hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1333291 [2/3] - in /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project: dev-support/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-httpfs...
Date Thu, 03 May 2012 02:14:26 GMT
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Thu May  3 02:14:01 2012
@@ -55,7 +55,6 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
-import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -2380,7 +2379,7 @@ assert storedBlock.findDatanode(dn) < 0 
       // necessary. In that case, put block on a possibly-will-
       // be-replicated list.
       //
-      INode fileINode = blocksMap.getINode(block);
+      INodeFile fileINode = blocksMap.getINode(block);
       if (fileINode != null) {
         namesystem.decrementSafeBlockCount(block);
         updateNeededReplications(block, -1, 0);
@@ -2612,7 +2611,7 @@ assert storedBlock.findDatanode(dn) < 0 
       NumberReplicas num) {
     int curReplicas = num.liveReplicas();
     int curExpectedReplicas = getReplication(block);
-    INode fileINode = blocksMap.getINode(block);
+    INodeFile fileINode = blocksMap.getINode(block);
     Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block);
     StringBuilder nodeList = new StringBuilder();
     while (nodeIter.hasNext()) {
@@ -2663,7 +2662,7 @@ assert storedBlock.findDatanode(dn) < 0 
     final Iterator<? extends Block> it = srcNode.getBlockIterator();
     while(it.hasNext()) {
       final Block block = it.next();
-      INode fileINode = blocksMap.getINode(block);
+      INodeFile fileINode = blocksMap.getINode(block);
 
       if (fileINode != null) {
         NumberReplicas num = countNodes(block);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Thu May  3 02:14:01 2012
@@ -651,7 +651,6 @@ public class DatanodeManager {
    * checks if any of the hosts have changed states:
    */
   public void refreshNodes(final Configuration conf) throws IOException {
-    namesystem.checkSuperuserPrivilege();
     refreshHostsReader(conf);
     namesystem.writeLock();
     try {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Thu May  3 02:14:01 2012
@@ -1513,11 +1513,6 @@ public class DataNode extends Configured
       printUsage();
       return null;
     }
-    if (conf.get("dfs.network.script") != null) {
-      LOG.error("This configuration for rack identification is not supported" +
-          " anymore. RackID resolution is handled by the NameNode.");
-      System.exit(-1);
-    }
     Collection<URI> dataDirs = getStorageDirs(conf);
     UserGroupInformation.setConfiguration(conf);
     SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY,

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java Thu May  3 02:14:01 2012
@@ -50,7 +50,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -398,22 +398,21 @@ public class DatanodeWebHdfsMethods {
     {
       final int b = bufferSize.getValue(conf);
       final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
-      DFSDataInputStream in = null;
+      HdfsDataInputStream in = null;
       try {
-        in = new DFSClient.DFSDataInputStream(
-            dfsclient.open(fullpath, b, true));
+        in = new HdfsDataInputStream(dfsclient.open(fullpath, b, true));
         in.seek(offset.getValue());
       } catch(IOException ioe) {
         IOUtils.cleanup(LOG, in);
         IOUtils.cleanup(LOG, dfsclient);
         throw ioe;
       }
-      final DFSDataInputStream dis = in;
+      final HdfsDataInputStream dis = in;
       final StreamingOutput streaming = new StreamingOutput() {
         @Override
         public void write(final OutputStream out) throws IOException {
           final Long n = length.getValue();
-          DFSDataInputStream dfsin = dis;
+          HdfsDataInputStream dfsin = dis;
           DFSClient client = dfsclient;
           try {
             if (n == null) {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Thu May  3 02:14:01 2012
@@ -1160,7 +1160,9 @@ public abstract class FSEditLogOp {
     @Override
     protected void toXml(ContentHandler contentHandler) throws SAXException {
       XMLUtils.addSaxString(contentHandler, "SRC", src);
-      XMLUtils.addSaxString(contentHandler, "USERNAME", username);
+      if (username != null) {
+        XMLUtils.addSaxString(contentHandler, "USERNAME", username);
+      }
       if (groupname != null) {
         XMLUtils.addSaxString(contentHandler, "GROUPNAME", groupname);
       }
@@ -1168,12 +1170,10 @@ public abstract class FSEditLogOp {
     
     @Override void fromXml(Stanza st) throws InvalidXmlException {
       this.src = st.getValue("SRC");
-      this.username = st.getValue("USERNAME");
-      if (st.hasChildren("GROUPNAME")) {
-        this.groupname = st.getValue("GROUPNAME");
-      } else {
-        this.groupname = null;
-      }
+      this.username = (st.hasChildren("USERNAME")) ? 
+          st.getValue("USERNAME") : null;
+      this.groupname = (st.hasChildren("GROUPNAME")) ? 
+          st.getValue("GROUPNAME") : null;
     }
   }
 
@@ -2346,7 +2346,7 @@ public abstract class FSEditLogOp {
         Long.valueOf(block.getBlockId()).toString());
     XMLUtils.addSaxString(contentHandler, "NUM_BYTES",
         Long.valueOf(block.getNumBytes()).toString());
-    XMLUtils.addSaxString(contentHandler, "GENERATION_STAMP",
+    XMLUtils.addSaxString(contentHandler, "GENSTAMP",
         Long.valueOf(block.getGenerationStamp()).toString());
     contentHandler.endElement("", "", "BLOCK");
   }
@@ -2355,7 +2355,7 @@ public abstract class FSEditLogOp {
       throws InvalidXmlException {
     long blockId = Long.valueOf(st.getValue("BLOCK_ID"));
     long numBytes = Long.valueOf(st.getValue("NUM_BYTES"));
-    long generationStamp = Long.valueOf(st.getValue("GENERATION_STAMP"));
+    long generationStamp = Long.valueOf(st.getValue("GENSTAMP"));
     return new Block(blockId, numBytes, generationStamp);
   }
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu May  3 02:14:01 2012
@@ -457,7 +457,7 @@ public class FSNamesystem implements Nam
 
       this.accessTimePrecision = conf.getLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
       this.supportAppends = conf.getBoolean(DFS_SUPPORT_APPEND_KEY, DFS_SUPPORT_APPEND_DEFAULT);
-      LOG.info("Append Enabled: " + haEnabled);
+      LOG.info("Append Enabled: " + supportAppends);
 
       this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
       
@@ -3332,8 +3332,26 @@ public class FSNamesystem implements Nam
   }
     
   void finalizeUpgrade() throws IOException {
+    writeLock();
+    try {
+      checkOperation(OperationCategory.WRITE);
+      checkSuperuserPrivilege();
+      getFSImage().finalizeUpgrade();
+    } finally {
+      writeUnlock();
+    }
+  }
+
+  void refreshNodes() throws IOException {
+    checkOperation(OperationCategory.UNCHECKED);
+    checkSuperuserPrivilege();
+    getBlockManager().getDatanodeManager().refreshNodes(new HdfsConfiguration());
+  }
+
+  void setBalancerBandwidth(long bandwidth) throws IOException {
+    checkOperation(OperationCategory.UNCHECKED);
     checkSuperuserPrivilege();
-    getFSImage().finalizeUpgrade();
+    getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
   }
 
   /**
@@ -3723,7 +3741,7 @@ public class FSNamesystem implements Nam
           msg += String.format(
             "The number of live datanodes %d needs an additional %d live "
             + "datanodes to reach the minimum number %d.",
-            numLive, (datanodeThreshold - numLive) + 1 , datanodeThreshold);
+            numLive, (datanodeThreshold - numLive), datanodeThreshold);
         }
         msg += " " + leaveMsg;
       } else {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java Thu May  3 02:14:01 2012
@@ -35,6 +35,8 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
@@ -44,6 +46,7 @@ import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
 /**
@@ -116,6 +119,7 @@ public class GetImageServlet extends Htt
             if (imageFile == null) {
               throw new IOException(errorMessage);
             }
+            CheckpointFaultInjector.getInstance().beforeGetImageSetsHeaders();
             setFileNameHeaders(response, imageFile);
             setVerificationHeaders(response, imageFile);
             // send fsImage
@@ -186,8 +190,8 @@ public class GetImageServlet extends Htt
         }       
       });
       
-    } catch (Exception ie) {
-      String errMsg = "GetImage failed. " + StringUtils.stringifyException(ie);
+    } catch (Throwable t) {
+      String errMsg = "GetImage failed. " + StringUtils.stringifyException(t);
       response.sendError(HttpServletResponse.SC_GONE, errMsg);
       throw new IOException(errMsg);
     } finally {
@@ -218,26 +222,44 @@ public class GetImageServlet extends Htt
     return throttler;
   }
   
-  protected boolean isValidRequestor(String remoteUser, Configuration conf)
+  @VisibleForTesting
+  static boolean isValidRequestor(String remoteUser, Configuration conf)
       throws IOException {
     if(remoteUser == null) { // This really shouldn't happen...
       LOG.warn("Received null remoteUser while authorizing access to getImage servlet");
       return false;
     }
+    
+    Set<String> validRequestors = new HashSet<String>();
 
-    String[] validRequestors = {
+    validRequestors.add(
         SecurityUtil.getServerPrincipal(conf
             .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), NameNode
-            .getAddress(conf).getHostName()),
+            .getAddress(conf).getHostName()));
+    validRequestors.add(
         SecurityUtil.getServerPrincipal(conf
             .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), NameNode
-            .getAddress(conf).getHostName()),
+            .getAddress(conf).getHostName()));
+    validRequestors.add(
         SecurityUtil.getServerPrincipal(conf
             .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
-            SecondaryNameNode.getHttpAddress(conf).getHostName()),
+            SecondaryNameNode.getHttpAddress(conf).getHostName()));
+    validRequestors.add(
         SecurityUtil.getServerPrincipal(conf
             .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY),
-            SecondaryNameNode.getHttpAddress(conf).getHostName()) };
+            SecondaryNameNode.getHttpAddress(conf).getHostName()));
+
+    if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) {
+      Configuration otherNnConf = HAUtil.getConfForOtherNode(conf);
+      validRequestors.add(
+          SecurityUtil.getServerPrincipal(otherNnConf
+              .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
+              NameNode.getAddress(otherNnConf).getHostName()));
+      validRequestors.add(
+          SecurityUtil.getServerPrincipal(otherNnConf
+              .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
+              NameNode.getAddress(otherNnConf).getHostName()));
+    }
 
     for(String v : validRequestors) {
       if(v != null && v.equals(remoteUser)) {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Thu May  3 02:14:01 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -34,7 +35,8 @@ import org.apache.hadoop.util.StringUtil
  * This is a base INode class containing common fields for file and 
  * directory inodes.
  */
-public abstract class INode implements Comparable<byte[]>, FSInodeInfo {
+@InterfaceAudience.Private
+abstract class INode implements Comparable<byte[]>, FSInodeInfo {
   /*
    *  The inode name is in java UTF8 encoding; 
    *  The name in HdfsFileStatus should keep the same encoding as this.

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu May  3 02:14:01 2012
@@ -25,6 +25,7 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Iterator;
@@ -181,8 +182,10 @@ public class NameNode {
     DFS_NAMENODE_BACKUP_ADDRESS_KEY,
     DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
     DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY,
+    DFS_NAMENODE_USER_NAME_KEY,
     DFS_HA_FENCE_METHODS_KEY,
-    DFS_HA_ZKFC_PORT_KEY
+    DFS_HA_ZKFC_PORT_KEY,
+    DFS_HA_FENCE_METHODS_KEY
   };
   
   /**
@@ -693,10 +696,14 @@ public class NameNode {
     initializeGenericKeys(conf, nsId, namenodeId);
     checkAllowFormat(conf);
     
-    Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
+    Collection<URI> nameDirsToFormat = FSNamesystem.getNamespaceDirs(conf);
+    List<URI> sharedDirs = FSNamesystem.getSharedEditsDirs(conf);
+    List<URI> dirsToPrompt = new ArrayList<URI>();
+    dirsToPrompt.addAll(nameDirsToFormat);
+    dirsToPrompt.addAll(sharedDirs);
     List<URI> editDirsToFormat = 
                  FSNamesystem.getNamespaceEditsDirs(conf);
-    if (!confirmFormat(dirsToFormat, force, isInteractive)) {
+    if (!confirmFormat(dirsToPrompt, force, isInteractive)) {
       return true; // aborted
     }
 
@@ -708,7 +715,7 @@ public class NameNode {
     }
     System.out.println("Formatting using clusterid: " + clusterId);
     
-    FSImage fsImage = new FSImage(conf, dirsToFormat, editDirsToFormat);
+    FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat);
     FSNamesystem fsn = new FSNamesystem(conf, fsImage);
     fsImage.format(fsn, clusterId);
     return false;
@@ -730,7 +737,18 @@ public class NameNode {
       boolean force, boolean interactive)
       throws IOException {
     for(Iterator<URI> it = dirsToFormat.iterator(); it.hasNext();) {
-      File curDir = new File(it.next().getPath());
+      URI dirUri = it.next();
+      if (!dirUri.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) {
+        System.err.println("Skipping format for directory \"" + dirUri
+            + "\". Can only format local directories with scheme \""
+            + NNStorage.LOCAL_URI_SCHEME + "\".");
+        continue;
+      }
+      // To validate only file based schemes are formatted
+      assert dirUri.getScheme().equals(NNStorage.LOCAL_URI_SCHEME) :
+        "formatting is not supported for " + dirUri;
+
+      File curDir = new File(dirUri.getPath());
       // Its alright for a dir not to exist, or to exist (properly accessible)
       // and be completely empty.
       if (!curDir.exists() ||

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Thu May  3 02:14:01 2012
@@ -707,9 +707,7 @@ class NameNodeRpcServer implements Namen
 
   @Override // ClientProtocol
   public void refreshNodes() throws IOException {
-    namesystem.checkOperation(OperationCategory.UNCHECKED);
-    namesystem.getBlockManager().getDatanodeManager().refreshNodes(
-        new HdfsConfiguration());
+    namesystem.refreshNodes();
   }
 
   @Override // NamenodeProtocol
@@ -732,7 +730,6 @@ class NameNodeRpcServer implements Namen
     
   @Override // ClientProtocol
   public void finalizeUpgrade() throws IOException {
-    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.finalizeUpgrade();
   }
 
@@ -772,8 +769,7 @@ class NameNodeRpcServer implements Namen
    */
   @Override // ClientProtocol
   public void setBalancerBandwidth(long bandwidth) throws IOException {
-    namesystem.checkOperation(OperationCategory.UNCHECKED);
-    namesystem.getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
+    namesystem.setBalancerBandwidth(bandwidth);
   }
   
   @Override // ClientProtocol

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Thu May  3 02:14:01 2012
@@ -47,7 +47,6 @@ import static org.apache.hadoop.hdfs.DFS
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -476,10 +475,7 @@ public class SecondaryNameNode implement
     }
 
     // error simulation code for junit test
-    if (ErrorSimulator.getErrorSimulation(0)) {
-      throw new IOException("Simulating error0 " +
-                            "after creating edits.new");
-    }
+    CheckpointFaultInjector.getInstance().afterSecondaryCallsRollEditLog();
 
     RemoteEditLogManifest manifest =
       namenode.getEditLogManifest(sig.mostRecentCheckpointTxId + 1);
@@ -497,10 +493,7 @@ public class SecondaryNameNode implement
         dstStorage, txid);
 
     // error simulation code for junit test
-    if (ErrorSimulator.getErrorSimulation(1)) {
-      throw new IOException("Simulating error1 " +
-                            "after uploading new image to NameNode");
-    }
+    CheckpointFaultInjector.getInstance().afterSecondaryUploadsNewImage();
 
     LOG.warn("Checkpoint done. New Image Size: " 
              + dstStorage.getFsImageName(txid).length());

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java Thu May  3 02:14:01 2012
@@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
-import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -153,15 +152,12 @@ public class TransferFsImage {
     FileInputStream infile = null;
     try {
       infile = new FileInputStream(localfile);
-      if (ErrorSimulator.getErrorSimulation(2)
-          && localfile.getAbsolutePath().contains("secondary")) {
-        // throw exception only when the secondary sends its image
-        throw new IOException("If this exception is not caught by the " +
-            "name-node fs image will be truncated.");
-      }
+      CheckpointFaultInjector.getInstance()
+          .aboutToSendFile(localfile);
       
-      if (ErrorSimulator.getErrorSimulation(3)
-          && localfile.getAbsolutePath().contains("fsimage")) {
+
+      if (CheckpointFaultInjector.getInstance().
+            shouldSendShortFile(localfile)) {
           // Test sending image shorter than localfile
           long len = localfile.length();
           buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)];
@@ -175,8 +171,8 @@ public class TransferFsImage {
         if (num <= 0) {
           break;
         }
-
-        if (ErrorSimulator.getErrorSimulation(4)) {
+        if (CheckpointFaultInjector.getInstance()
+              .shouldCorruptAByte(localfile)) {
           // Simulate a corrupted byte on the wire
           LOG.warn("SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!");
           buf[0]++;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java Thu May  3 02:14:01 2012
@@ -45,4 +45,29 @@ public class JournalInfo {
   public int getNamespaceId() {
     return namespaceId;
   }
+  
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("lv=").append(layoutVersion).append(";cid=").append(clusterId)
+    .append(";nsid=").append(namespaceId);
+    return sb.toString();
+  }
+  
+  @Override
+  public boolean equals(Object o) {
+    JournalInfo jInfo;
+    if (!(o instanceof JournalInfo)) {
+      return false;
+    }
+    jInfo = (JournalInfo) o;
+    return ((jInfo.clusterId.equals(this.clusterId))
+        && (jInfo.namespaceId == this.namespaceId)
+        && (jInfo.layoutVersion == this.layoutVersion));
+  }
+  
+  @Override
+  public int hashCode() {
+    return (namespaceId ^ layoutVersion ^ clusterId.hashCode());
+  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Thu May  3 02:14:01 2012
@@ -155,6 +155,17 @@ public class WebHdfsFileSystem extends F
     }
   }
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>webhdfs</code>
+   */
+  @Override
+  public String getScheme() {
+    return "webhdfs";
+  }
+
   @Override
   public synchronized void initialize(URI uri, Configuration conf
       ) throws IOException {
@@ -165,7 +176,7 @@ public class WebHdfsFileSystem extends F
     } catch (URISyntaxException e) {
       throw new IllegalArgumentException(e);
     }
-    this.nnAddr = NetUtils.createSocketAddrForHost(uri.getHost(), uri.getPort());
+    this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
     this.workingDir = getHomeDirectory();
 
     if (UserGroupInformation.isSecurityEnabled()) {
@@ -175,7 +186,7 @@ public class WebHdfsFileSystem extends F
 
   protected void initDelegationToken() throws IOException {
     // look for webhdfs token, then try hdfs
-    Token<?> token = selectDelegationToken();
+    Token<?> token = selectDelegationToken(ugi);
 
     //since we don't already have a token, go get one
     boolean createdToken = false;
@@ -196,8 +207,9 @@ public class WebHdfsFileSystem extends F
     }
   }
 
-  protected Token<DelegationTokenIdentifier> selectDelegationToken() {
-    return DT_SELECTOR.selectToken(getUri(), ugi.getTokens(), getConf());
+  protected Token<DelegationTokenIdentifier> selectDelegationToken(
+      UserGroupInformation ugi) {
+    return DT_SELECTOR.selectToken(getCanonicalUri(), ugi.getTokens(), getConf());
   }
 
   @Override

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1327719-1333290

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4 (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4 Thu May  3 02:14:01 2012
@@ -71,7 +71,7 @@ AC_DEFUN([AP_SUPPORTED_HOST],[
   esac
 
   case $host_cpu in
-  powerpc)
+  powerpc*)
     CFLAGS="$CFLAGS -DCPU=\\\"$host_cpu\\\""
     HOST_CPU=$host_cpu;;
   sparc*)

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1327719-1333290

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1327719-1333290

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1327719-1333290

Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1327719-1333290

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Thu May  3 02:14:01 2012
@@ -55,8 +55,8 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -515,15 +515,14 @@ public class DFSTestUtil {
   }
   
   public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
-    DFSDataInputStream in = 
-      (DFSDataInputStream) ((DistributedFileSystem)fs).open(path);
+    HdfsDataInputStream in = (HdfsDataInputStream)((DistributedFileSystem)fs).open(path);
     in.readByte();
     return in.getCurrentBlock();
   }  
 
   public static List<LocatedBlock> getAllBlocks(FSDataInputStream in)
       throws IOException {
-    return ((DFSClient.DFSDataInputStream) in).getAllBlocks();
+    return ((HdfsDataInputStream) in).getAllBlocks();
   }
 
   public static Token<BlockTokenIdentifier> getBlockToken(

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Thu May  3 02:14:01 2012
@@ -89,7 +89,6 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -105,7 +104,6 @@ import org.apache.hadoop.util.ToolRunner
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import com.google.common.io.Files;
 
 /**
  * This class creates a single-process DFS cluster for junit testing.
@@ -1581,7 +1579,7 @@ public class MiniDFSCluster {
   /**
    * Get a client handle to the DFS cluster with a single namenode.
    */
-  public FileSystem getFileSystem() throws IOException {
+  public DistributedFileSystem getFileSystem() throws IOException {
     checkSingleNameNode();
     return getFileSystem(0);
   }
@@ -1589,8 +1587,9 @@ public class MiniDFSCluster {
   /**
    * Get a client handle to the DFS cluster for the namenode at given index.
    */
-  public FileSystem getFileSystem(int nnIndex) throws IOException {
-    return FileSystem.get(getURI(nnIndex), nameNodes[nnIndex].conf);
+  public DistributedFileSystem getFileSystem(int nnIndex) throws IOException {
+    return (DistributedFileSystem)FileSystem.get(getURI(nnIndex),
+        nameNodes[nnIndex].conf);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java Thu May  3 02:14:01 2012
@@ -48,7 +48,7 @@ public class TestBlocksScheduledCounter 
       out.write(i);
     }
     // flush to make sure a block is allocated.
-    ((DFSOutputStream)(out.getWrappedStream())).hflush();
+    out.hflush();
     
     ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
     final DatanodeManager dm = cluster.getNamesystem().getBlockManager(

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java Thu May  3 02:14:01 2012
@@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -31,6 +33,7 @@ import java.io.InputStream;
 import java.net.HttpURLConnection;
 import java.net.URL;
 
+import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 import org.junit.Test;
 
 public class TestByteRangeInputStream {
@@ -84,6 +87,11 @@ public static class MockHttpURLConnectio
   public void setResponseCode(int resCode) {
     responseCode = resCode;
   }
+  
+  @Override
+  public String getHeaderField(String field) {
+    return (field.equalsIgnoreCase(StreamFile.CONTENT_LENGTH)) ? "65535" : null;
+  }
 }
   
   @Test
@@ -163,4 +171,74 @@ public static class MockHttpURLConnectio
                    "HTTP_OK expected, received 206", e.getMessage());
     }
   }
+  
+  @Test
+  public void testPropagatedClose() throws IOException {
+    ByteRangeInputStream brs = spy(
+        new HftpFileSystem.RangeHeaderInputStream(new URL("http://test/")));
+    
+    InputStream mockStream = mock(InputStream.class);
+    doReturn(mockStream).when(brs).openInputStream();
+
+    int brisOpens = 0;
+    int brisCloses = 0;
+    int isCloses = 0;
+    
+    // first open, shouldn't close underlying stream
+    brs.getInputStream();
+    verify(brs, times(++brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+    
+    // stream is open, shouldn't close underlying stream
+    brs.getInputStream();
+    verify(brs, times(brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+    
+    // seek forces a reopen, should close underlying stream
+    brs.seek(1);
+    brs.getInputStream();
+    verify(brs, times(++brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(++isCloses)).close();
+
+    // verify that the underlying stream isn't closed after a seek
+    // ie. the state was correctly updated
+    brs.getInputStream();
+    verify(brs, times(brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+
+    // seeking to same location should be a no-op
+    brs.seek(1);
+    brs.getInputStream();
+    verify(brs, times(brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+
+    // close should of course close
+    brs.close();
+    verify(brs, times(++brisCloses)).close();
+    verify(mockStream, times(++isCloses)).close();
+    
+    // it's already closed, underlying stream should not close
+    brs.close();
+    verify(brs, times(++brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+    
+    // it's closed, don't reopen it
+    boolean errored = false;
+    try {
+      brs.getInputStream();
+    } catch (IOException e) {
+      errored = true;
+      assertEquals("Stream closed", e.getMessage());
+    } finally {
+      assertTrue("Read a closed steam", errored);
+    }
+    verify(brs, times(brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Thu May  3 02:14:01 2012
@@ -63,7 +63,6 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.mockito.internal.stubbing.answers.ThrowsException;
@@ -100,7 +99,7 @@ public class TestDFSClientRetries extend
     }
 
     @Override
-    public Writable call(RpcKind rpcKind, String protocol, Writable param, long receiveTime)
+    public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param, long receiveTime)
         throws IOException {
       if (sleep) {
         // sleep a bit

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java Thu May  3 02:14:01 2012
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
@@ -143,10 +144,10 @@ public class TestDecommission {
     String downnode, int numDatanodes) throws IOException {
     boolean isNodeDown = (downnode != null);
     // need a raw stream
-    assertTrue("Not HDFS:"+fileSys.getUri(), 
-    fileSys instanceof DistributedFileSystem);
-    DFSClient.DFSDataInputStream dis = (DFSClient.DFSDataInputStream)
-      ((DistributedFileSystem)fileSys).open(name);
+    assertTrue("Not HDFS:"+fileSys.getUri(),
+        fileSys instanceof DistributedFileSystem);
+    HdfsDataInputStream dis = (HdfsDataInputStream)
+        ((DistributedFileSystem)fileSys).open(name);
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
     for (LocatedBlock blk : dinfo) { // for each block
       int hasdown = 0;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Thu May  3 02:14:01 2012
@@ -31,6 +31,7 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.junit.Assume.assumeTrue;
 
 import java.io.BufferedReader;
 import java.io.File;
@@ -53,6 +54,7 @@ import org.apache.hadoop.fs.FsServerDefa
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -68,8 +70,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
 
-import static org.junit.Assume.assumeTrue;
-
 /**
  * This class tests various cases during file creation.
  */
@@ -99,6 +99,11 @@ public class TestFileCreation extends ju
     return stm;
   }
 
+  public static HdfsDataOutputStream create(DistributedFileSystem dfs,
+      Path name, int repl) throws IOException {
+    return (HdfsDataOutputStream)createFile(dfs, name, repl);
+  }
+
   //
   // writes to file but does not close it
   //
@@ -494,7 +499,7 @@ public class TestFileCreation extends ju
 
     // create cluster
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
-    FileSystem fs = null;
+    DistributedFileSystem fs = null;
     try {
       cluster.waitActive();
       fs = cluster.getFileSystem();
@@ -502,21 +507,17 @@ public class TestFileCreation extends ju
 
       // create a new file.
       Path file1 = new Path("/filestatus.dat");
-      FSDataOutputStream stm = createFile(fs, file1, 1);
+      HdfsDataOutputStream stm = create(fs, file1, 1);
       System.out.println("testFileCreationNamenodeRestart: "
                          + "Created file " + file1);
-      int actualRepl = ((DFSOutputStream)(stm.getWrappedStream())).
-                        getNumCurrentReplicas();
-      assertTrue(file1 + " should be replicated to 1 datanodes.",
-                 actualRepl == 1);
+      assertEquals(file1 + " should be replicated to 1 datanode.", 1,
+          stm.getCurrentBlockReplication());
 
       // write two full blocks.
       writeFile(stm, numBlocks * blockSize);
       stm.hflush();
-      actualRepl = ((DFSOutputStream)(stm.getWrappedStream())).
-                        getNumCurrentReplicas();
-      assertTrue(file1 + " should still be replicated to 1 datanodes.",
-                 actualRepl == 1);
+      assertEquals(file1 + " should still be replicated to 1 datanode.", 1,
+          stm.getCurrentBlockReplication());
 
       // rename file wile keeping it open.
       Path fileRenamed = new Path("/filestatusRenamed.dat");
@@ -849,11 +850,10 @@ public class TestFileCreation extends ju
       // create a new file.
       final String f = DIR + "foo";
       final Path fpath = new Path(f);
-      FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
+      HdfsDataOutputStream out = create(dfs, fpath, DATANODE_NUM);
       out.write("something".getBytes());
       out.hflush();
-      int actualRepl = ((DFSOutputStream)(out.getWrappedStream())).
-                        getNumCurrentReplicas();
+      int actualRepl = out.getCurrentBlockReplication();
       assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.",
                  actualRepl == DATANODE_NUM);
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java Thu May  3 02:14:01 2012
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
 import static 
   org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
 
+import java.io.IOException;
 import java.lang.reflect.Field;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
@@ -69,38 +70,104 @@ public class TestHftpDelegationToken {
   }
 
   @Test
-  public void testSelectHdfsDelegationToken() throws Exception {
+  public void testSelectHftpDelegationToken() throws Exception {
     SecurityUtilTestHelper.setTokenServiceUseIp(true);
 
     Configuration conf = new Configuration();
-    URI hftpUri = URI.create("hftp://localhost:0");
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    Token<?> token = null;
+    conf.setClass("fs.hftp.impl", MyHftpFileSystem.class, FileSystem.class);
     
+    int httpPort = 80;
+    int httpsPort = 443;
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, httpPort);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, httpsPort);
+    
+    // test with implicit default port 
+    URI fsUri = URI.create("hftp://localhost");
+    MyHftpFileSystem fs = (MyHftpFileSystem) FileSystem.newInstance(fsUri, conf);
+    assertEquals(httpPort, fs.getCanonicalUri().getPort());
+    checkTokenSelection(fs, httpsPort, conf); // should still use secure port
+
+    // test with explicit default port
+    fsUri = URI.create("hftp://localhost:"+httpPort);
+    fs = (MyHftpFileSystem) FileSystem.newInstance(fsUri, conf);
+    assertEquals(httpPort, fs.getCanonicalUri().getPort());
+    checkTokenSelection(fs, httpsPort, conf); // should still use secure port
+    
+    // test with non-default port
+    fsUri = URI.create("hftp://localhost:"+(httpPort+1));
+    fs = (MyHftpFileSystem) FileSystem.newInstance(fsUri, conf);
+    assertEquals(httpPort+1, fs.getCanonicalUri().getPort());
+    checkTokenSelection(fs, httpsPort, conf); // should still use secure port
+    
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 5);
+  }
+
+  @Test
+  public void testSelectHsftpDelegationToken() throws Exception {
+    SecurityUtilTestHelper.setTokenServiceUseIp(true);
+
+    Configuration conf = new Configuration();
+    conf.setClass("fs.hsftp.impl", MyHsftpFileSystem.class, FileSystem.class);
+
+    int httpPort = 80;
+    int httpsPort = 443;
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, httpPort);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, httpsPort);
+
+    // test with implicit default port 
+    URI fsUri = URI.create("hsftp://localhost");
+    MyHsftpFileSystem fs = (MyHsftpFileSystem) FileSystem.newInstance(fsUri, conf);
+    assertEquals(httpsPort, fs.getCanonicalUri().getPort());
+    checkTokenSelection(fs, httpsPort, conf); 
+
+    // test with explicit default port
+    fsUri = URI.create("hsftp://localhost:"+httpsPort);
+    fs = (MyHsftpFileSystem) FileSystem.newInstance(fsUri, conf);
+    assertEquals(httpsPort, fs.getCanonicalUri().getPort());
+    checkTokenSelection(fs, httpsPort, conf);
+    
+    // test with non-default port
+    fsUri = URI.create("hsftp://localhost:"+(httpsPort+1));
+    fs = (MyHsftpFileSystem) FileSystem.newInstance(fsUri, conf);
+    assertEquals(httpsPort+1, fs.getCanonicalUri().getPort());
+    checkTokenSelection(fs, httpsPort+1, conf);
+    
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 5);
+  }
+  
+  private void checkTokenSelection(HftpFileSystem fs,
+                                   int port,
+                                   Configuration conf) throws IOException {
+    UserGroupInformation ugi =
+        UserGroupInformation.createUserForTesting(fs.getUri().getAuthority(), new String[]{});
+
+    // use ip-based tokens
+    SecurityUtilTestHelper.setTokenServiceUseIp(true);
+
     // test fallback to hdfs token
     Token<?> hdfsToken = new Token<TokenIdentifier>(
         new byte[0], new byte[0],
         DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
         new Text("127.0.0.1:8020"));
     ugi.addToken(hdfsToken);
-    
-    HftpFileSystem fs = (HftpFileSystem) FileSystem.get(hftpUri, conf);
-    token = fs.selectDelegationToken();
+
+    // test fallback to hdfs token
+    Token<?> token = fs.selectDelegationToken(ugi);
     assertNotNull(token);
     assertEquals(hdfsToken, token);
-    
+
     // test hftp is favored over hdfs
     Token<?> hftpToken = new Token<TokenIdentifier>(
         new byte[0], new byte[0],
-        HftpFileSystem.TOKEN_KIND, new Text("127.0.0.1:0"));
+        HftpFileSystem.TOKEN_KIND, new Text("127.0.0.1:"+port));
     ugi.addToken(hftpToken);
-    token = fs.selectDelegationToken();
+    token = fs.selectDelegationToken(ugi);
     assertNotNull(token);
     assertEquals(hftpToken, token);
     
     // switch to using host-based tokens, no token should match
     SecurityUtilTestHelper.setTokenServiceUseIp(false);
-    token = fs.selectDelegationToken();
+    token = fs.selectDelegationToken(ugi);
     assertNull(token);
     
     // test fallback to hdfs token
@@ -109,17 +176,45 @@ public class TestHftpDelegationToken {
         DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
         new Text("localhost:8020"));
     ugi.addToken(hdfsToken);
-    token = fs.selectDelegationToken();
+    token = fs.selectDelegationToken(ugi);
     assertNotNull(token);
     assertEquals(hdfsToken, token);
 
     // test hftp is favored over hdfs
     hftpToken = new Token<TokenIdentifier>(
         new byte[0], new byte[0],
-        HftpFileSystem.TOKEN_KIND, new Text("localhost:0"));
+        HftpFileSystem.TOKEN_KIND, new Text("localhost:"+port));
     ugi.addToken(hftpToken);
-    token = fs.selectDelegationToken();
+    token = fs.selectDelegationToken(ugi);
     assertNotNull(token);
     assertEquals(hftpToken, token);
   }
+  
+  static class MyHftpFileSystem extends HftpFileSystem {
+    @Override
+    public URI getCanonicalUri() {
+      return super.getCanonicalUri();
+    }
+    @Override
+    public int getDefaultPort() {
+      return super.getDefaultPort();
+    }
+    // don't automatically get a token
+    @Override
+    protected void initDelegationToken() throws IOException {}
+  }
+  
+  static class MyHsftpFileSystem extends HsftpFileSystem {
+    @Override
+    public URI getCanonicalUri() {
+      return super.getCanonicalUri();
+    }
+    @Override
+    public int getDefaultPort() {
+      return super.getDefaultPort();
+    }
+    // don't automatically get a token
+    @Override
+    protected void initDelegationToken() throws IOException {}
+  }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java Thu May  3 02:14:01 2012
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
+import java.io.InputStream;
 import java.net.URISyntaxException;
 import java.net.URI;
 import java.net.URL;
@@ -234,6 +235,45 @@ public class TestHftpFileSystem {
     assertEquals('7', in.read());
   }
 
+  @Test
+  public void testReadClosedStream() throws IOException {
+    final Path testFile = new Path("/testfile+2");
+    FSDataOutputStream os = hdfs.create(testFile, true);
+    os.writeBytes("0123456789");
+    os.close();
+
+    // ByteRangeInputStream delays opens until reads.  Make sure it doesn't
+    // open a closed stream that has never been opened
+    FSDataInputStream in = hftpFs.open(testFile);
+    in.close();
+    checkClosedStream(in);
+    checkClosedStream(in.getWrappedStream());
+    
+    // force the stream to connect and then close it
+    in = hftpFs.open(testFile);
+    int ch = in.read(); 
+    assertEquals('0', ch);
+    in.close();
+    checkClosedStream(in);
+    checkClosedStream(in.getWrappedStream());
+    
+    // make sure seeking doesn't automagically reopen the stream
+    in.seek(4);
+    checkClosedStream(in);
+    checkClosedStream(in.getWrappedStream());
+  }
+  
+  private void checkClosedStream(InputStream is) {
+    IOException ioe = null;
+    try {
+      is.read();
+    } catch (IOException e) {
+      ioe = e;
+    }
+    assertNotNull("No exception on closed read", ioe);
+    assertEquals("Stream closed", ioe.getMessage());
+  }
+  
   public void resetFileSystem() throws IOException {
     // filesystem caching has a quirk/bug that it caches based on the user's
     // given uri.  the result is if a filesystem is instantiated with no port,

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Thu May  3 02:14:01 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -49,6 +50,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -90,7 +92,7 @@ public class TestLeaseRecovery2 {
 
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
     cluster.waitActive();
-    dfs = (DistributedFileSystem)cluster.getFileSystem();
+    dfs = cluster.getFileSystem();
   }
   
   /**
@@ -406,17 +408,26 @@ public class TestLeaseRecovery2 {
    */
   @Test
   public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception {
-    hardLeaseRecoveryRestartHelper(false);
+    hardLeaseRecoveryRestartHelper(false, -1);
   }
-  
+
+  @Test
+  public void testHardLeaseRecoveryAfterNameNodeRestart2() throws Exception {
+    hardLeaseRecoveryRestartHelper(false, 1535);
+  }
+
   @Test
   public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart()
       throws Exception {
-    hardLeaseRecoveryRestartHelper(true);
+    hardLeaseRecoveryRestartHelper(true, -1);
   }
   
-  public void hardLeaseRecoveryRestartHelper(boolean doRename)
+  public void hardLeaseRecoveryRestartHelper(boolean doRename, int size)
       throws Exception {
+    if (size < 0) {
+      size =  AppendTestUtil.nextInt(FILE_SIZE + 1);
+    }
+
     //create a file
     String fileStr = "/hardLeaseRecovery";
     AppendTestUtil.LOG.info("filestr=" + fileStr);
@@ -426,7 +437,6 @@ public class TestLeaseRecovery2 {
     assertTrue(dfs.dfs.exists(fileStr));
 
     // write bytes into the file.
-    int size = AppendTestUtil.nextInt(FILE_SIZE);
     AppendTestUtil.LOG.info("size=" + size);
     stm.write(buffer, 0, size);
     
@@ -440,6 +450,11 @@ public class TestLeaseRecovery2 {
     AppendTestUtil.LOG.info("hflush");
     stm.hflush();
     
+    // check visible length
+    final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
+    Assert.assertEquals(size, in.getVisibleLength());
+    in.close();
+    
     if (doRename) {
       fileStr += ".renamed";
       Path renamedPath = new Path(fileStr);
@@ -463,14 +478,11 @@ public class TestLeaseRecovery2 {
     // Make sure lease recovery begins.
     Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2);
     
-    assertEquals("lease holder should now be the NN", HdfsServerConstants.NAMENODE_LEASE_HOLDER,
-        NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
+    checkLease(fileStr, size);
     
     cluster.restartNameNode(false);
     
-    assertEquals("lease holder should still be the NN after restart",
-        HdfsServerConstants.NAMENODE_LEASE_HOLDER,
-        NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
+    checkLease(fileStr, size);
     
     // Let the DNs send heartbeats again.
     for (DataNode dn : cluster.getDataNodes()) {
@@ -492,12 +504,12 @@ public class TestLeaseRecovery2 {
     assertEquals(size, locatedBlocks.getFileLength());
 
     // make sure that the client can't write data anymore.
-    stm.write('b');
     try {
+      stm.write('b');
       stm.hflush();
       fail("Should not be able to flush after we've lost the lease");
     } catch (IOException e) {
-      LOG.info("Expceted exception on hflush", e);
+      LOG.info("Expceted exception on write/hflush", e);
     }
     
     try {
@@ -512,4 +524,16 @@ public class TestLeaseRecovery2 {
         "File size is good. Now validating sizes from datanodes...");
     AppendTestUtil.checkFullFile(dfs, filePath, size, buffer, fileStr);
   }
+  
+  static void checkLease(String f, int size) {
+    final String holder = NameNodeAdapter.getLeaseHolderForPath(
+        cluster.getNameNode(), f); 
+    if (size == 0) {
+      assertEquals("lease holder should null, file is closed", null, holder);
+    } else {
+      assertEquals("lease holder should now be the NN",
+          HdfsServerConstants.NAMENODE_LEASE_HOLDER, holder);
+    }
+    
+  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java Thu May  3 02:14:01 2012
@@ -28,7 +28,7 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.ipc.RemoteException;
@@ -147,7 +147,7 @@ public class TestReadWhileWriting {
     
     final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
     
-    final DFSDataInputStream in = (DFSDataInputStream)fs.open(p);
+    final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);
 
     //Check visible length
     Assert.assertTrue(in.getVisibleLength() >= expectedsize);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java Thu May  3 02:14:01 2012
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
@@ -187,7 +188,7 @@ public class TestReplaceDatanodeOnFailur
 
   static class SlowWriter extends Thread {
     final Path filepath;
-    private FSDataOutputStream out = null;
+    final HdfsDataOutputStream out;
     final long sleepms;
     private volatile boolean running = true;
     
@@ -195,7 +196,7 @@ public class TestReplaceDatanodeOnFailur
         ) throws IOException {
       super(SlowWriter.class.getSimpleName() + ":" + filepath);
       this.filepath = filepath;
-      this.out = fs.create(filepath, REPLICATION);
+      this.out = (HdfsDataOutputStream)fs.create(filepath, REPLICATION);
       this.sleepms = sleepms;
     }
 
@@ -231,8 +232,7 @@ public class TestReplaceDatanodeOnFailur
     }
 
     void checkReplication() throws IOException {
-      final DFSOutputStream dfsout = (DFSOutputStream)out.getWrappedStream();
-      Assert.assertEquals(REPLICATION, dfsout.getNumCurrentReplicas());
+      Assert.assertEquals(REPLICATION, out.getCurrentBlockReplication());
     }        
   }
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java Thu May  3 02:14:01 2012
@@ -342,7 +342,7 @@ public class TestSafeMode {
     String tipMsg = cluster.getNamesystem().getSafemode();
     assertTrue("Safemode tip message looks right: " + tipMsg,
                tipMsg.contains("The number of live datanodes 0 needs an additional " +
-                               "2 live datanodes to reach the minimum number 1. " +
+                               "1 live datanodes to reach the minimum number 1. " +
                                "Safe mode will be turned off automatically."));
 
     // Start a datanode

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java Thu May  3 02:14:01 2012
@@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -119,7 +119,7 @@ public class TestShortCircuitLocalRead {
    */
   static void checkFileContentDirect(FileSystem fs, Path name, byte[] expected,
       int readOffset) throws IOException {
-    DFSDataInputStream stm = (DFSDataInputStream)fs.open(name);
+    HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);
 
     ByteBuffer actual = ByteBuffer.allocate(expected.length - readOffset);
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java Thu May  3 02:14:01 2012
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -155,7 +156,7 @@ public class TestWriteRead {
     try {
       in = openInputStream(path);
 
-      long visibleLenFromReadStream = getVisibleFileLength(in);
+      long visibleLenFromReadStream = ((HdfsDataInputStream)in).getVisibleLength();
 
       if (visibleLenFromReadStream < byteExpected)
       {
@@ -418,11 +419,6 @@ public class TestWriteRead {
     return fileStatus.getLen();
   }
 
-  private long getVisibleFileLength(FSDataInputStream in) throws IOException {
-    DFSClient.DFSDataInputStream din = (DFSClient.DFSDataInputStream) in;
-    return din.getVisibleLength();
-  }
-
   private boolean ifExists(Path path) throws IOException {
     return useFCOption ? mfc.util().exists(path) : mfs.exists(path);
   }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Thu May  3 02:14:01 2012
@@ -26,8 +26,6 @@ import java.util.List;
 import java.util.Random;
 import java.util.concurrent.TimeoutException;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -46,11 +44,14 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 /**
  * This class tests if a balancer schedules tasks correctly.
  */
-public class TestBalancer extends TestCase {
+public class TestBalancer {
   private static final Log LOG = LogFactory.getLog(
   "org.apache.hadoop.hdfs.TestBalancer");
   
@@ -365,8 +366,33 @@ public class TestBalancer extends TestCa
     oneNodeTest(conf);
   }
   
+  /**
+   * Test parse method in Balancer#Cli class with threshold value out of
+   * boundaries.
+   */
+  @Test
+  public void testBalancerCliParseWithThresholdOutOfBoundaries() {
+    String parameters[] = new String[] { "-threshold", "0" };
+    String reason = "IllegalArgumentException is expected when threshold value"
+        + " is out of boundary.";
+    try {
+      Balancer.Cli.parse(parameters);
+      fail(reason);
+    } catch (IllegalArgumentException e) {
+      assertEquals("Number out of range: threshold = 0.0", e.getMessage());
+    }
+    parameters = new String[] { "-threshold", "101" };
+    try {
+      Balancer.Cli.parse(parameters);
+      fail(reason);
+    } catch (IllegalArgumentException e) {
+      assertEquals("Number out of range: threshold = 101.0", e.getMessage());
+    }
+  }
+  
   /** Test a cluster with even distribution, 
    * then a new empty node is added to the cluster*/
+  @Test
   public void testBalancer0() throws Exception {
     Configuration conf = new HdfsConfiguration();
     initConf(conf);
@@ -375,6 +401,7 @@ public class TestBalancer extends TestCa
   }
 
   /** Test unevenly distributed cluster */
+  @Test
   public void testBalancer1() throws Exception {
     Configuration conf = new HdfsConfiguration();
     initConf(conf);
@@ -384,6 +411,7 @@ public class TestBalancer extends TestCa
         new String[] {RACK0, RACK1});
   }
   
+  @Test
   public void testBalancer2() throws Exception {
     Configuration conf = new HdfsConfiguration();
     initConf(conf);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java Thu May  3 02:14:01 2012
@@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
 import org.junit.Assert;
@@ -86,7 +85,7 @@ public class TestInterDatanodeProtocol {
     }
 
     @Override
-    public Writable call(RpcKind rpcKind, String protocol, Writable param, long receiveTime)
+    public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param, long receiveTime)
         throws IOException {
       if (sleep) {
         // sleep a bit

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Thu May  3 02:14:01 2012
@@ -126,7 +126,8 @@ public class NameNodeAdapter {
   }
 
   public static String getLeaseHolderForPath(NameNode namenode, String path) {
-    return namenode.getNamesystem().leaseManager.getLeaseByPath(path).getHolder();
+    Lease l = namenode.getNamesystem().leaseManager.getLeaseByPath(path);
+    return l == null? null: l.getHolder();
   }
 
   /**

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java Thu May  3 02:14:01 2012
@@ -27,13 +27,19 @@ import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.TestGenericJournalConf.DummyJournalManager;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -144,4 +150,34 @@ public class TestAllowFormat {
     NameNode.format(config);
     LOG.info("Done verifying format will succeed with allowformat true");
   }
+
+  /**
+   * Test to skip format for non file scheme directory configured
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    String logicalName = "mycluster";
+
+    // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
+    // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
+    // is considered.
+    String localhost = "127.0.0.1";
+    InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
+    InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
+    HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
+
+    conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
+    conf.set(DFSUtil.addKeySuffixes(
+        DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
+        DummyJournalManager.class.getName());
+    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
+        + localhost + ":2181/ledgers");
+    conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
+
+    // An internal assert is added to verify the working of test
+    NameNode.format(conf);
+  }
 }



Mime
View raw message