hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1099687 [9/15] - in /hadoop/hdfs/branches/HDFS-1073: ./ bin/ src/c++/libhdfs/ src/contrib/hdfsproxy/ src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/pro...
Date Thu, 05 May 2011 05:40:13 GMT
Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu May  5 05:40:07 2011
@@ -44,6 +44,7 @@ import org.apache.hadoop.fs.UnresolvedLi
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -53,6 +54,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -172,6 +174,8 @@ public class NameNode implements Namenod
 
   public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
   public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
+  
+  public static final String NAMENODE_ADDRESS_ATTRIBUTE_KEY = "name.node.address";
 
   protected FSNamesystem namesystem; 
   protected NamenodeRole role;
@@ -227,7 +231,7 @@ public class NameNode implements Namenod
   public static InetSocketAddress getAddress(String address) {
     return NetUtils.createSocketAddr(address, DEFAULT_PORT);
   }
-
+  
   /**
    * Set the configuration property for the service rpc address
    * to address
@@ -256,6 +260,16 @@ public class NameNode implements Namenod
 
   public static InetSocketAddress getAddress(Configuration conf) {
     URI filesystemURI = FileSystem.getDefaultUri(conf);
+    return getAddress(filesystemURI);
+  }
+
+
+  /**
+   * TODO:FEDERATION
+   * @param filesystemURI
+   * @return address of file system
+   */
+  public static InetSocketAddress getAddress(URI filesystemURI) {
     String authority = filesystemURI.getAuthority();
     if (authority == null) {
       throw new IllegalArgumentException(String.format(
@@ -436,14 +450,6 @@ public class NameNode implements Namenod
     this.emptier.setDaemon(true);
     this.emptier.start();
   }
-
-  public static String getInfoServer(Configuration conf) {
-    return UserGroupInformation.isSecurityEnabled() ? conf.get(
-        DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT) : conf.get(
-        DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT);
-  }
   
   private void startHttpServer(final Configuration conf) throws IOException {
     final InetSocketAddress infoSocAddr = getHttpServerAddress(conf);
@@ -498,7 +504,8 @@ public class NameNode implements Namenod
                 .getPort());
           }
           httpServer.setAttribute("name.node", NameNode.this);
-          httpServer.setAttribute("name.node.address", getNameNodeAddress());
+          httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY,
+              getNameNodeAddress());
           httpServer.setAttribute("name.system.image", getFSImage());
           httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
           httpServer.addInternalServlet("getDelegationToken",
@@ -584,6 +591,7 @@ public class NameNode implements Namenod
       throws IOException { 
     this.role = role;
     try {
+      initializeGenericKeys(conf);
       initialize(conf);
     } catch (IOException e) {
       this.stop();
@@ -817,7 +825,7 @@ public class NameNode implements Namenod
   @Override
   public LocatedBlock addBlock(String src,
                                String clientName,
-                               Block previous,
+                               ExtendedBlock previous,
                                DatanodeInfo[] excludedNodes)
       throws IOException {
     if(stateChangeLog.isDebugEnabled()) {
@@ -839,7 +847,7 @@ public class NameNode implements Namenod
   }
 
   @Override
-  public LocatedBlock getAdditionalDatanode(final String src, final Block blk,
+  public LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk,
       final DatanodeInfo[] existings, final DatanodeInfo[] excludes,
       final int numAdditionalNodes, final String clientName
       ) throws IOException {
@@ -868,7 +876,7 @@ public class NameNode implements Namenod
   /**
    * The client needs to give up on the block.
    */
-  public void abandonBlock(Block b, String src, String holder)
+  public void abandonBlock(ExtendedBlock b, String src, String holder)
       throws IOException {
     if(stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
@@ -880,7 +888,7 @@ public class NameNode implements Namenod
   }
 
   /** {@inheritDoc} */
-  public boolean complete(String src, String clientName, Block last)
+  public boolean complete(String src, String clientName, ExtendedBlock last)
       throws IOException {
     if(stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*DIR* NameNode.complete: "
@@ -898,7 +906,7 @@ public class NameNode implements Namenod
   public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
     stateChangeLog.info("*DIR* NameNode.reportBadBlocks");
     for (int i = 0; i < blocks.length; i++) {
-      Block blk = blocks[i].getBlock();
+      ExtendedBlock blk = blocks[i].getBlock();
       DatanodeInfo[] nodes = blocks[i].getLocations();
       for (int j = 0; j < nodes.length; j++) {
         DatanodeInfo dn = nodes[j];
@@ -909,21 +917,21 @@ public class NameNode implements Namenod
 
   /** {@inheritDoc} */
   @Override
-  public LocatedBlock updateBlockForPipeline(Block block, String clientName)
+  public LocatedBlock updateBlockForPipeline(ExtendedBlock block, String clientName)
       throws IOException {
     return namesystem.updateBlockForPipeline(block, clientName);
   }
 
 
   @Override
-  public void updatePipeline(String clientName, Block oldBlock,
-      Block newBlock, DatanodeID[] newNodes)
+  public void updatePipeline(String clientName, ExtendedBlock oldBlock,
+      ExtendedBlock newBlock, DatanodeID[] newNodes)
       throws IOException {
     namesystem.updatePipeline(clientName, oldBlock, newBlock, newNodes);
   }
   
   /** {@inheritDoc} */
-  public void commitBlockSynchronization(Block block,
+  public void commitBlockSynchronization(ExtendedBlock block,
       long newgenerationstamp, long newlength,
       boolean closeFile, boolean deleteblock, DatanodeID[] newtargets)
       throws IOException {
@@ -1275,14 +1283,22 @@ public class NameNode implements Namenod
                                        long capacity,
                                        long dfsUsed,
                                        long remaining,
+                                       long blockPoolUsed,
                                        int xmitsInProgress,
-                                       int xceiverCount) throws IOException {
+                                       int xceiverCount,
+                                       int failedVolumes) throws IOException {
     verifyRequest(nodeReg);
     return namesystem.handleHeartbeat(nodeReg, capacity, dfsUsed, remaining,
-        xceiverCount, xmitsInProgress);
+        blockPoolUsed, xceiverCount, xmitsInProgress, failedVolumes);
   }
 
+  /**
+   * sends block report to the corresponding namenode (for the poolId)
+   * @return DataNodeCommand from the namenode
+   * @throws IOException
+   */
   public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
+                                     String poolId,
                                      long[] blocks) throws IOException {
     verifyRequest(nodeReg);
     BlockListAsLongs blist = new BlockListAsLongs(blocks);
@@ -1292,13 +1308,14 @@ public class NameNode implements Namenod
            + " blocks");
     }
 
-    namesystem.processReport(nodeReg, blist);
+    namesystem.processReport(nodeReg, poolId, blist);
     if (getFSImage().isUpgradeFinalized())
-      return DatanodeCommand.FINALIZE;
+      return new DatanodeCommand.Finalize(poolId);
     return null;
   }
 
   public void blockReceived(DatanodeRegistration nodeReg, 
+                            String poolId,
                             Block blocks[],
                             String delHints[]) throws IOException {
     verifyRequest(nodeReg);
@@ -1307,27 +1324,30 @@ public class NameNode implements Namenod
           +"from "+nodeReg.getName()+" "+blocks.length+" blocks.");
     }
     for (int i = 0; i < blocks.length; i++) {
-      namesystem.blockReceived(nodeReg, blocks[i], delHints[i]);
+      namesystem.blockReceived(nodeReg, poolId, blocks[i], delHints[i]);
     }
   }
 
   /**
+   * Handle an error report from a datanode.
    */
   public void errorReport(DatanodeRegistration nodeReg,
-                          int errorCode, 
-                          String msg) throws IOException {
-    // Log error message from datanode
+                          int errorCode, String msg) throws IOException { 
     String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName());
-    LOG.info("Error report from " + dnName + ": " + msg);
+
     if (errorCode == DatanodeProtocol.NOTIFY) {
+      LOG.info("Error report from " + dnName + ": " + msg);
       return;
     }
     verifyRequest(nodeReg);
-    namesystem.incVolumeFailure(nodeReg);
+
     if (errorCode == DatanodeProtocol.DISK_ERROR) {
-      LOG.warn("Volume failed on " + dnName); 
+      LOG.warn("Disk error on " + dnName + ": " + msg);
     } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) {
+      LOG.warn("Fatal disk error on " + dnName + ": " + msg);
       namesystem.removeDatanode(nodeReg);            
+    } else {
+      LOG.info("Error report from " + dnName + ": " + msg);
     }
   }
     
@@ -1350,8 +1370,12 @@ public class NameNode implements Namenod
    */
   public void verifyRequest(NodeRegistration nodeReg) throws IOException {
     verifyVersion(nodeReg.getVersion());
-    if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID()))
+    if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) {
+      LOG.warn("Invalid registrationID - expected: "
+          + namesystem.getRegistrationID() + " received: "
+          + nodeReg.getRegistrationID());
       throw new UnregisteredNodeException(nodeReg);
+    }
   }
     
   /**
@@ -1386,11 +1410,20 @@ public class NameNode implements Namenod
 
   /**
    * Returns the address on which the NameNodes is listening to.
-   * @return the address on which the NameNodes is listening to.
+   * @return namenode rpc address
    */
   public InetSocketAddress getNameNodeAddress() {
     return rpcAddress;
   }
+  
+  /**
+   * Returns namenode service rpc address, if set. Otherwise returns
+   * namenode rpc address.
+   * @return namenode service rpc address used by datanodes
+   */
+  public InetSocketAddress getServiceRpcAddress() {
+    return serviceRPCAddress != null ? serviceRPCAddress : rpcAddress;
+  }
 
   /**
    * Returns the address of the NameNodes http server, 
@@ -1437,7 +1470,7 @@ public class NameNode implements Namenod
         continue;
       if (isConfirmationNeeded) {
         System.err.print("Re-format filesystem in " + curDir +" ? (Y or N) ");
-        if (!(System.in.read() == 'Y')) {
+        if (System.in.read() != 'Y') {
           System.err.println("Format aborted in "+ curDir);
           return true;
         }
@@ -1445,9 +1478,26 @@ public class NameNode implements Namenod
       }
     }
 
-    FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat,
-                                         editDirsToFormat), conf);
-    nsys.dir.fsImage.getStorage().format();
+    FSImage fsImage = new FSImage(dirsToFormat, editDirsToFormat);
+    FSNamesystem nsys = new FSNamesystem(fsImage, conf);
+    
+    // if clusterID is not provided - see if you can find the current one
+    String clusterId = StartupOption.FORMAT.getClusterId();
+    if(clusterId == null || clusterId.equals("")) {
+      // try to get one from the existing storage
+      clusterId = fsImage.getStorage().determineClusterId();
+      if (clusterId == null || clusterId.equals("")) {
+        throw new IllegalArgumentException("Format must be provided with clusterid");
+      }
+      if(isConfirmationNeeded) {
+        System.err.print("Use existing cluster id=" + clusterId + "? (Y or N)");
+        if(System.in.read() != 'Y') {
+          throw new IllegalArgumentException("Format must be provided with clusterid");
+        }
+        while(System.in.read() != '\n'); // discard the enter-key
+      }
+    }
+    nsys.dir.fsImage.getStorage().format(clusterId);
     return false;
   }
 
@@ -1506,7 +1556,8 @@ public class NameNode implements Namenod
       "Usage: java NameNode [" +
       StartupOption.BACKUP.getName() + "] | [" +
       StartupOption.CHECKPOINT.getName() + "] | [" +
-      StartupOption.FORMAT.getName() + "] | [" +
+      StartupOption.FORMAT.getName() + "[" + StartupOption.CLUSTERID.getName() +  
+      " cid ]] | [" +
       StartupOption.UPGRADE.getName() + "] | [" +
       StartupOption.ROLLBACK.getName() + "] | [" +
       StartupOption.FINALIZE.getName() + "] | [" +
@@ -1520,6 +1571,14 @@ public class NameNode implements Namenod
       String cmd = args[i];
       if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.FORMAT;
+        // might be followed by two args
+        if (i + 2 < argsLen
+            && args[i + 1].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
+          i += 2;
+          startOpt.setClusterId(args[i]);
+        }
+      } else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
+        startOpt = StartupOption.GENCLUSTERID;
       } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.REGULAR;
       } else if (StartupOption.BACKUP.getName().equalsIgnoreCase(cmd)) {
@@ -1528,6 +1587,12 @@ public class NameNode implements Namenod
         startOpt = StartupOption.CHECKPOINT;
       } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.UPGRADE;
+        // might be followed by two args
+        if (i + 2 < argsLen
+            && args[i + 1].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
+          i += 2;
+          startOpt.setClusterId(args[i]);
+        }
       } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.ROLLBACK;
       } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
@@ -1565,6 +1630,11 @@ public class NameNode implements Namenod
         boolean aborted = format(conf, true);
         System.exit(aborted ? 1 : 0);
         return null; // avoid javac warning
+      case GENCLUSTERID:
+        System.err.println("Generating new cluster id:");
+        System.out.println(NNStorage.newClusterID());
+        System.exit(0);
+        return null;
       case FINALIZE:
         aborted = finalize(conf, true);
         System.exit(aborted ? 1 : 0);
@@ -1576,6 +1646,47 @@ public class NameNode implements Namenod
         return new NameNode(conf);
     }
   }
+
+  /**
+   * In federation configuration is set for a set of
+   * namenode and secondary namenode/backup/checkpointer, which are
+   * grouped under a logical nameservice ID. The configuration keys specific 
+   * to them have suffix set to configured nameserviceId.
+   * 
+   * This method copies the value from specific key of format key.nameserviceId
+   * to key, to set up the generic configuration. Once this is done, only
+   * generic version of the configuration is read in rest of the code, for
+   * backward compatibility and simpler code changes.
+   * 
+   * @param conf
+   *          Configuration object to lookup specific key and to set the value
+   *          to the key passed. Note the conf object is modified
+   * @see DFSUtil#setGenericConf()
+   */
+  static void initializeGenericKeys(Configuration conf) {
+    final String nameserviceId = DFSUtil.getNameServiceId(conf);
+    if ((nameserviceId == null) || nameserviceId.isEmpty()) {
+      return;
+    }
+    
+    DFSUtil.setGenericConf(conf, nameserviceId,
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
+        DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+        DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
+        DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY);
+    
+    if (conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
+      URI defaultUri = URI.create(FSConstants.HDFS_URI_SCHEME + "://"
+          + conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY));
+      conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, defaultUri.toString());
+    }
+  }
     
   /**
    */

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java Thu May  5 05:40:07 2011
@@ -21,7 +21,6 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 
 /**
- * 
  * This is the JMX management interface for namenode information
  */
 @InterfaceAudience.Public
@@ -56,6 +55,7 @@ public interface NameNodeMXBean {
    */
   public long getTotal();
   
+  
   /**
    * Gets the safemode status
    * 
@@ -95,6 +95,16 @@ public interface NameNodeMXBean {
   public float getPercentRemaining();
   
   /**
+   * Get the total space used by the block pools of this namenode
+   */
+  public long getBlockPoolUsedSpace();
+  
+  /**
+   * Get the total space used by the block pool as percentage of total capacity
+   */
+  public float getPercentBlockPoolUsed();
+    
+  /**
    * Gets the total numbers of blocks on the cluster.
    * 
    * @return the total number of blocks of the cluster
@@ -135,4 +145,18 @@ public interface NameNodeMXBean {
    * @return the decommissioning node information
    */
   public String getDecomNodes();
+  
+  /**
+   * Gets the cluster id.
+   * 
+   * @return the cluster id
+   */
+  public String getClusterId();
+  
+  /**
+   * Gets the block pool id.
+   * 
+   * @return the block pool id
+   */
+  public String getBlockPoolId();
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Thu May  5 05:40:07 2011
@@ -39,7 +39,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -297,7 +297,7 @@ public class NamenodeFsck {
     StringBuilder report = new StringBuilder();
     int i = 0;
     for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
-      Block block = lBlk.getBlock();
+      ExtendedBlock block = lBlk.getBlock();
       boolean isCorrupt = lBlk.isCorrupt();
       String blkName = block.toString();
       DatanodeInfo[] locs = lBlk.getLocations();
@@ -311,7 +311,8 @@ public class NamenodeFsck {
       if (isCorrupt) {
         corrupt++;
         res.corruptBlocks++;
-        out.print("\n" + path + ": CORRUPT block " + block.getBlockName()+"\n");
+        out.print("\n" + path + ": CORRUPT blockpool " + block.getBlockPoolId() + 
+            " block " + block.getBlockName()+"\n");
       }
       if (locs.length >= minReplication)
         res.numMinReplicatedBlocks++;
@@ -476,7 +477,7 @@ public class NamenodeFsck {
     TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
     Socket s = null;
     BlockReader blockReader = null; 
-    Block block = lblock.getBlock(); 
+    ExtendedBlock block = lblock.getBlock(); 
 
     while (s == null) {
       DatanodeInfo chosenNode;
@@ -502,7 +503,8 @@ public class NamenodeFsck {
         s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
         s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
         
-        String file = BlockReader.getFileName(targetAddr, block.getBlockId());
+        String file = BlockReader.getFileName(targetAddr, block.getBlockPoolId(),
+            block.getBlockId());
         blockReader = BlockReader.newBlockReader(s, file, block, lblock
             .getBlockToken(), 0, -1, conf.getInt("io.file.buffer.size", 4096));
         

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Thu May  5 05:40:07 2011
@@ -22,6 +22,7 @@ import java.lang.management.ManagementFa
 import java.lang.management.MemoryMXBean;
 import java.lang.management.MemoryUsage;
 import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.net.URLEncoder;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -29,11 +30,13 @@ import java.util.Arrays;
 import java.util.Iterator;
 import java.util.List;
 
+import javax.servlet.ServletContext;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.jsp.JspWriter;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -131,7 +134,12 @@ class NamenodeJspHelper {
         + "\n  <tr><td id='col1'>Compiled:</td><td>" + VersionInfo.getDate()
         + " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch()
         + "\n  <tr><td id='col1'>Upgrades:</td><td>"
-        + getUpgradeStatusText(fsn) + "\n</table></div>";
+        + getUpgradeStatusText(fsn) 
+        + "\n  <tr><td id='col1'>Cluster ID:</td><td>" + fsn.getClusterId()
+        + "</td></tr>\n" 
+        + "\n  <tr><td id='col1'>Block Pool ID:</td><td>" + fsn.getBlockPoolId()
+        + "</td></tr>\n" 
+        + "\n</table></div>";
   }
 
   static String getWarningText(FSNamesystem fsn) {
@@ -209,7 +217,12 @@ class NamenodeJspHelper {
       ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
       ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
       fsn.DFSNodesStatus(live, dead);
-      
+      // If a data node has been first included in the include list, 
+      // then decommissioned, then removed from both include and exclude list.  
+      // We make the web console to "forget" this node by not displaying it.
+      fsn.removeDecomNodeFromList(live);  
+      fsn.removeDecomNodeFromList(dead); 
+
       int liveDecommissioned = 0;
       for (DatanodeDescriptor d : live) {
         liveDecommissioned += d.isDecommissioned() ? 1 : 0;
@@ -254,10 +267,8 @@ class NamenodeJspHelper {
       long used = fsnStats[1];
       long nonDFS = total - remaining - used;
       nonDFS = nonDFS < 0 ? 0 : nonDFS;
-      float percentUsed = total <= 0 ? 0f : ((float) used * 100.0f)
-          / (float) total;
-      float percentRemaining = total <= 0 ? 100f : ((float) remaining * 100.0f)
-          / (float) total;
+      float percentUsed = DFSUtil.getPercentUsed(used, total);
+      float percentRemaining = DFSUtil.getPercentRemaining(used, total);
       float median = 0;
       float max = 0;
       float min = 0;
@@ -283,6 +294,9 @@ class NamenodeJspHelper {
         dev = (float) Math.sqrt(dev/usages.length);
       }
 
+      long bpUsed = fsnStats[6];
+      float percentBpUsed = DFSUtil.getPercentUsed(bpUsed, total);
+      
       out.print("<div id=\"dfstable\"> <table>\n" + rowTxt() + colTxt()
           + "Configured Capacity" + colTxt() + ":" + colTxt()
           + StringUtils.byteDesc(total) + rowTxt() + colTxt() + "DFS Used"
@@ -295,6 +309,10 @@ class NamenodeJspHelper {
           + StringUtils.limitDecimalTo2(percentUsed) + " %" + rowTxt()
           + colTxt() + "DFS Remaining%" + colTxt() + ":" + colTxt()
           + StringUtils.limitDecimalTo2(percentRemaining) + " %"
+          + rowTxt() + colTxt() + "Block Pool Used" + colTxt() + ":" + colTxt()
+          + StringUtils.byteDesc(bpUsed) + rowTxt()
+          + colTxt() + "Block Pool Used%"+ colTxt() + ":" + colTxt()
+          + StringUtils.limitDecimalTo2(percentBpUsed) + " %" 
           + rowTxt() + colTxt() + "DataNodes usages" + colTxt() + ":" + colTxt()
           + "Min %" + colTxt() + "Median %" + colTxt() + "Max %" + colTxt()
           + "stdev %" + rowTxt() + colTxt() + colTxt() + colTxt()
@@ -326,28 +344,26 @@ class NamenodeJspHelper {
   }
 
   static String getDelegationToken(final NameNode nn,
-      HttpServletRequest request, Configuration conf) throws IOException,
-      InterruptedException {
-    final UserGroupInformation ugi = JspHelper.getUGI(request, conf);
-
+      HttpServletRequest request, Configuration conf,
+      final UserGroupInformation ugi) throws IOException, InterruptedException {
     Token<DelegationTokenIdentifier> token = ugi
         .doAs(new PrivilegedExceptionAction<Token<DelegationTokenIdentifier>>() {
           public Token<DelegationTokenIdentifier> run() throws IOException {
             return nn.getDelegationToken(new Text(ugi.getUserName()));
           }
         });
-
     return token == null ? null : token.encodeToUrlString();
   }
 
-  static void redirectToRandomDataNode(final NameNode nn, 
-                                       HttpServletRequest request,
-                                       HttpServletResponse resp,
-                                       Configuration conf
-                                       ) throws IOException,
-                                                InterruptedException {
+  static void redirectToRandomDataNode(ServletContext context,
+      HttpServletRequest request, HttpServletResponse resp) throws IOException,
+      InterruptedException {
+    final NameNode nn = (NameNode) context.getAttribute("name.node");
+    final Configuration conf = (Configuration) context
+        .getAttribute(JspHelper.CURRENT_CONF);
     final DatanodeID datanode = nn.getNamesystem().getRandomDatanode();
-    String tokenString = getDelegationToken(nn, request, conf);
+    UserGroupInformation ugi = JspHelper.getUGI(context, request, conf);
+    String tokenString = getDelegationToken(nn, request, conf, ugi);
     // if the user is defined, get a delegation token and stringify it
     final String redirectLocation;
     final String nodeToRedirect;
@@ -359,12 +375,14 @@ class NamenodeJspHelper {
       nodeToRedirect = nn.getHttpAddress().getHostName();
       redirectPort = nn.getHttpAddress().getPort();
     }
+    String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
     String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
     redirectLocation = "http://" + fqdn + ":" + redirectPort
         + "/browseDirectory.jsp?namenodeInfoPort="
         + nn.getHttpAddress().getPort() + "&dir=/"
         + (tokenString == null ? "" :
-           JspHelper.getDelegationTokenUrlParam(tokenString));
+           JspHelper.getDelegationTokenUrlParam(tokenString))
+        + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
     resp.sendRedirect(redirectLocation);
   }
 
@@ -405,11 +423,13 @@ class NamenodeJspHelper {
     }
 
     private void generateNodeDataHeader(JspWriter out, DatanodeDescriptor d,
-        String suffix, boolean alive, int nnHttpPort) throws IOException {
+        String suffix, boolean alive, int nnHttpPort, String nnaddr)
+        throws IOException {
       // from nn_browsedfscontent.jsp:
       String url = "http://" + d.getHostName() + ":" + d.getInfoPort()
           + "/browseDirectory.jsp?namenodeInfoPort=" + nnHttpPort + "&dir="
-          + URLEncoder.encode("/", "UTF-8");
+          + URLEncoder.encode("/", "UTF-8")
+          + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnaddr);
 
       String name = d.getHostName() + ":" + d.getPort();
       if (!name.matches("\\d+\\.\\d+.\\d+\\.\\d+.*"))
@@ -424,8 +444,9 @@ class NamenodeJspHelper {
     }
 
     void generateDecommissioningNodeData(JspWriter out, DatanodeDescriptor d,
-        String suffix, boolean alive, int nnHttpPort) throws IOException {
-      generateNodeDataHeader(out, d, suffix, alive, nnHttpPort);
+        String suffix, boolean alive, int nnHttpPort, String nnaddr)
+        throws IOException {
+      generateNodeDataHeader(out, d, suffix, alive, nnHttpPort, nnaddr);
       if (!alive) {
         return;
       }
@@ -448,8 +469,8 @@ class NamenodeJspHelper {
           + "\n");
     }
     
-    void generateNodeData(JspWriter out, DatanodeDescriptor d,
-        String suffix, boolean alive, int nnHttpPort) throws IOException {
+    void generateNodeData(JspWriter out, DatanodeDescriptor d, String suffix,
+        boolean alive, int nnHttpPort, String nnaddr) throws IOException {
       /*
        * Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5 we use:
        * 1) d.getHostName():d.getPort() to display. Domain and port are stripped
@@ -461,7 +482,7 @@ class NamenodeJspHelper {
        * interact with datanodes.
        */
 
-      generateNodeDataHeader(out, d, suffix, alive, nnHttpPort);
+      generateNodeDataHeader(out, d, suffix, alive, nnHttpPort, nnaddr);
       if (!alive) {
         out.print("<td class=\"decommissioned\"> " + 
             d.isDecommissioned() + "\n");
@@ -480,6 +501,11 @@ class NamenodeJspHelper {
 
       long timestamp = d.getLastUpdate();
       long currentTime = System.currentTimeMillis();
+      
+      long bpUsed = d.getBlockPoolUsed();
+      String percentBpUsed = StringUtils.limitDecimalTo2(d
+          .getBlockPoolUsedPercent());
+
       out.print("<td class=\"lastcontact\"> "
           + ((currentTime - timestamp) / 1000)
           + "<td class=\"adminstate\">"
@@ -496,18 +522,31 @@ class NamenodeJspHelper {
           + percentUsed
           + "<td class=\"pcused\">"
           + ServletUtil.percentageGraph((int) Double.parseDouble(percentUsed),
-              100) + "<td align=\"right\" class=\"pcremaining`\">"
-          + percentRemaining + "<td title=" + "\"blocks scheduled : "
-          + d.getBlocksScheduled() + "\" class=\"blocks\">" + d.numBlocks()
+              100) 
+          + "<td align=\"right\" class=\"pcremaining`\">"
+          + percentRemaining 
+          + "<td title=" + "\"blocks scheduled : "
+          + d.getBlocksScheduled() + "\" class=\"blocks\">" + d.numBlocks()+"\n"
+          + "<td align=\"right\" class=\"bpused\">"
+          + StringUtils.limitDecimalTo2(bpUsed * 1.0 / diskBytes)
+          + "<td align=\"right\" class=\"pcbpused\">"
+          + percentBpUsed
           + "<td align=\"right\" class=\"volfails\">"
           + d.getVolumeFailures() + "\n");
     }
 
-    void generateNodesList(JspWriter out, NameNode nn,
+    void generateNodesList(ServletContext context, JspWriter out,
         HttpServletRequest request) throws IOException {
       ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
       ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+      final NameNode nn = (NameNode)context.getAttribute("name.node");
       nn.getNamesystem().DFSNodesStatus(live, dead);
+      nn.getNamesystem().removeDecomNodeFromList(live);
+      nn.getNamesystem().removeDecomNodeFromList(dead);
+      InetSocketAddress nnSocketAddress = (InetSocketAddress) context
+          .getAttribute(NameNode.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
+      String nnaddr = nnSocketAddress.getAddress().getHostAddress() + ":"
+          + nnSocketAddress.getPort();
 
       whatNodes = request.getParameter("whatNodes"); // show only live or only
                                                      // dead nodes
@@ -576,12 +615,18 @@ class NamenodeJspHelper {
                 + "> Used <br>(%) <th " + nodeHeaderStr("pcused")
                 + "> Used <br>(%) <th " + nodeHeaderStr("pcremaining")
                 + "> Remaining <br>(%) <th " + nodeHeaderStr("blocks")
+                + "> Blocks <th "
+                + nodeHeaderStr("bpused") + "> Block Pool<br>Used (" 
+                + diskByteStr + ") <th "
+                + nodeHeaderStr("pcbpused")
+                + "> Block Pool<br>Used (%)"
                 + "> Blocks <th " + nodeHeaderStr("volfails")
                 +"> Failed Volumes\n");
 
             JspHelper.sortNodeList(live, sorterField, sorterOrder);
             for (int i = 0; i < live.size(); i++) {
-              generateNodeData(out, live.get(i), port_suffix, true, nnHttpPort);
+              generateNodeData(out, live.get(i), port_suffix, true, nnHttpPort,
+                  nnaddr);
             }
           }
           out.print("</table>\n");
@@ -598,7 +643,8 @@ class NamenodeJspHelper {
 
             JspHelper.sortNodeList(dead, sorterField, sorterOrder);
             for (int i = 0; i < dead.size(); i++) {
-              generateNodeData(out, dead.get(i), port_suffix, false, nnHttpPort);
+              generateNodeData(out, dead.get(i), port_suffix, false,
+                  nnHttpPort, nnaddr);
             }
 
             out.print("</table>\n");
@@ -628,7 +674,7 @@ class NamenodeJspHelper {
             JspHelper.sortNodeList(decommissioning, "name", "ASC");
             for (int i = 0; i < decommissioning.size(); i++) {
               generateDecommissioningNodeData(out, decommissioning.get(i),
-                  port_suffix, true, nnHttpPort);
+                  port_suffix, true, nnHttpPort, nnaddr);
             }
             out.print("</table>\n");
           }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java Thu May  5 05:40:07 2011
@@ -78,9 +78,12 @@ public class RenewDelegationTokenServlet
       os.println(result);
       os.close();
     } catch(Exception e) {
-      LOG.info("Exception while renewing token. Re-throwing. ", e);
-      resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR,
-                     e.getMessage());
+      // transfer exception over the http
+      String exceptionClass = e.getClass().getName();
+      String exceptionMsg = e.getLocalizedMessage();
+      String strException = exceptionClass + ";" + exceptionMsg;
+      LOG.info("Exception while renewing token. Re-throwing. s=" + strException, e);
+      resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, strException);
     }
   }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Thu May  5 05:40:07 2011
@@ -34,7 +34,10 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ -44,8 +47,6 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -125,6 +126,7 @@ public class SecondaryNameNode implement
    */
   public SecondaryNameNode(Configuration conf)  throws IOException {
     try {
+      NameNode.initializeGenericKeys(conf);
       initialize(conf);
     } catch(IOException e) {
       shutdown();
@@ -382,6 +384,10 @@ public class SecondaryNameNode implement
         throw new RuntimeException(e);
       }
   }
+  
+  InetSocketAddress getNameNodeAddress() {
+    return nameNodeAddr;
+  }
 
   /**
    * Copy the new fsimage into the NameNode
@@ -404,7 +410,7 @@ public class SecondaryNameNode implement
       throw new IOException("This is not a DFS");
     }
 
-    String configuredAddress = NameNode.getInfoServer(conf);
+    String configuredAddress = DFSUtil.getInfoServer(null, conf);
     InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
     if (sockAddr.getAddress().isAnyLocalAddress()) {
       if(UserGroupInformation.isSecurityEnabled()) {
@@ -710,12 +716,15 @@ public class SecondaryNameNode implement
       this.getStorage().setStorageInfo(sig);
       this.getStorage().setImageDigest(sig.getImageDigest());
       if (loadImage) {
-        loadFSImage(getStorage().getStorageFile(sdName, NameNodeFile.IMAGE));
+        getStorage();
+        loadFSImage(NNStorage.getStorageFile(sdName, NameNodeFile.IMAGE));
       }
       List<File> editsFiles =
         FSImageOldStorageInspector.getEditsInStorageDir(sdEdits);
       loadEdits(editsFiles);
       
+      storage.setClusterID(sig.getClusterID());
+      storage.setBlockPoolID(sig.getBlockpoolID());
       sig.validateStorageInfo(this);
       saveNamespace(false);
     }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java Thu May  5 05:40:07 2011
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintWriter;
 import java.net.InetSocketAddress;
-import java.security.PrivilegedExceptionAction;
 import java.util.Enumeration;
 import java.util.List;
 
@@ -36,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.mortbay.jetty.InclusiveByteRange;
 
@@ -46,29 +46,15 @@ public class StreamFile extends DfsServl
 
   public static final String CONTENT_LENGTH = "Content-Length";
 
-  static InetSocketAddress nameNodeAddr;
-  static DataNode datanode = null;
-  static {
-    if ((datanode = DataNode.getDataNode()) != null) {
-      nameNodeAddr = datanode.getNameNodeAddrForClient();
-    }
-  }
+  static DataNode datanode = DataNode.getDataNode();
   
   /** getting a client for connecting to dfs */
   protected DFSClient getDFSClient(HttpServletRequest request)
       throws IOException, InterruptedException {
     final Configuration conf =
       (Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF);
-    
     UserGroupInformation ugi = getUGI(request, conf);
-    DFSClient client = ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
-      @Override
-      public DFSClient run() throws IOException {
-        return new DFSClient(nameNodeAddr, conf);
-      }
-    });
-    
-    return client;
+    return DatanodeJspHelper.getDFSClient(request, datanode, conf, ugi);
   }
   
   public void doGet(HttpServletRequest request, HttpServletResponse response)

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java Thu May  5 05:40:07 2011
@@ -39,6 +39,7 @@ import org.apache.hadoop.io.*;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class BlockCommand extends DatanodeCommand {
+  String poolId;
   Block blocks[];
   DatanodeInfo targets[][];
 
@@ -48,9 +49,11 @@ public class BlockCommand extends Datano
    * Create BlockCommand for transferring blocks to another datanode
    * @param blocktargetlist    blocks to be transferred 
    */
-  public BlockCommand(int action, List<BlockTargetPair> blocktargetlist) {
+  public BlockCommand(int action, String poolId,
+      List<BlockTargetPair> blocktargetlist) {
     super(action);
 
+    this.poolId = poolId;
     blocks = new Block[blocktargetlist.size()]; 
     targets = new DatanodeInfo[blocks.length][];
     for(int i = 0; i < blocks.length; i++) {
@@ -66,12 +69,17 @@ public class BlockCommand extends Datano
    * Create BlockCommand for the given action
    * @param blocks blocks related to the action
    */
-  public BlockCommand(int action, Block blocks[]) {
+  public BlockCommand(int action, String poolId, Block blocks[]) {
     super(action);
+    this.poolId = poolId;
     this.blocks = blocks;
     this.targets = EMPTY_TARGET;
   }
 
+  public String getBlockPoolId() {
+    return poolId;
+  }
+  
   public Block[] getBlocks() {
     return blocks;
   }
@@ -93,6 +101,7 @@ public class BlockCommand extends Datano
 
   public void write(DataOutput out) throws IOException {
     super.write(out);
+    Text.writeString(out, poolId);
     out.writeInt(blocks.length);
     for (int i = 0; i < blocks.length; i++) {
       blocks[i].write(out);
@@ -108,6 +117,7 @@ public class BlockCommand extends Datano
 
   public void readFields(DataInput in) throws IOException {
     super.readFields(in);
+    this.poolId = Text.readString(in);
     this.blocks = new Block[in.readInt()];
     for (int i = 0; i < blocks.length; i++) {
       blocks[i] = new Block();

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java Thu May  5 05:40:07 2011
@@ -25,8 +25,8 @@ import java.util.ArrayList;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
@@ -70,7 +70,7 @@ public class BlockRecoveryCommand extend
     /**
      * Create RecoveringBlock.
      */
-    public RecoveringBlock(Block b, DatanodeInfo[] locs, long newGS) {
+    public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs, long newGS) {
       super(b, locs, -1, false); // startOffset is unknown
       this.newGenerationStamp = newGS;
     }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java Thu May  5 05:40:07 2011
@@ -19,12 +19,14 @@ package org.apache.hadoop.hdfs.server.pr
 
 import java.io.DataInput;
 import java.io.DataOutput;
+import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactory;
 import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableUtils;
 import org.apache.avro.reflect.Union;
 
 /**
@@ -47,10 +49,27 @@ public abstract class DatanodeCommand ex
     public void write(DataOutput out) {}
   }
 
-  static class Finalize extends DatanodeCommand {
-    private Finalize() {super(DatanodeProtocol.DNA_FINALIZE);}
-    public void readFields(DataInput in) {}
-    public void write(DataOutput out) {}
+  public static class Finalize extends DatanodeCommand {
+    String blockPoolId;
+    private Finalize() {
+      super(DatanodeProtocol.DNA_FINALIZE);
+    }
+    
+    public Finalize(String bpid) {
+      super(DatanodeProtocol.DNA_FINALIZE);
+      blockPoolId = bpid;
+    }
+    
+    public String getBlockPoolId() {
+      return blockPoolId;
+    }
+    
+    public void readFields(DataInput in) throws IOException {
+      blockPoolId = WritableUtils.readString(in);
+    }
+    public void write(DataOutput out) throws IOException {
+      WritableUtils.writeString(out, blockPoolId);
+    }
   }
 
   static {                                      // register a ctor
@@ -65,7 +84,6 @@ public abstract class DatanodeCommand ex
   }
 
   public static final DatanodeCommand REGISTER = new Register();
-  public static final DatanodeCommand FINALIZE = new Finalize();
   
   public DatanodeCommand() {
     super();

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Thu May  5 05:40:07 2011
@@ -23,6 +23,7 @@ import java.io.*;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.ipc.VersionedProtocol;
@@ -44,9 +45,9 @@ import org.apache.avro.reflect.Nullable;
 @InterfaceAudience.Private
 public interface DatanodeProtocol extends VersionedProtocol {
   /**
-   * 26: remove getBlockLocations optimization
+   * 27: Add block pool ID to Block
    */
-  public static final long versionID = 26;
+  public static final long versionID = 27L;
   
   // error code
   final static int NOTIFY = 0;
@@ -70,7 +71,6 @@ public interface DatanodeProtocol extend
   /** 
    * Register Datanode.
    *
-   * @see org.apache.hadoop.hdfs.server.datanode.DataNode#dnRegistration
    * @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
    * 
    * @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains 
@@ -86,13 +86,24 @@ public interface DatanodeProtocol extend
    * an array of "DatanodeCommand" objects.
    * A DatanodeCommand tells the DataNode to invalidate local block(s), 
    * or to copy them to other DataNodes, etc.
+   * @param registration datanode registration information
+   * @param capacity total storage capacity available at the datanode
+   * @param dfsUsed storage used by HDFS
+   * @param remaining remaining storage available for HDFS
+   * @param blockPoolUsed storage used by the block pool
+   * @param xmitsInProgress number of transfers from this datanode to others
+   * @param xceiverCount number of active transceiver threads
+   * @param failedVolumes number of failed volumes
+   * @throws IOException on error
    */
   @Nullable
   public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration,
                                        long capacity,
                                        long dfsUsed, long remaining,
+                                       long blockPoolUsed,
                                        int xmitsInProgress,
-                                       int xceiverCount) throws IOException;
+                                       int xceiverCount,
+                                       int failedVolumes) throws IOException;
 
   /**
    * blockReport() tells the NameNode about all the locally-stored blocks.
@@ -101,6 +112,7 @@ public interface DatanodeProtocol extend
    * the locally-stored blocks.  It's invoked upon startup and then
    * infrequently afterwards.
    * @param registration
+   * @param poolId - the block pool ID for the blocks
    * @param blocks - the block list as an array of longs.
    *     Each block is represented as 2 longs.
    *     This is done instead of Block[] to reduce memory used by block reports.
@@ -109,6 +121,7 @@ public interface DatanodeProtocol extend
    * @throws IOException
    */
   public DatanodeCommand blockReport(DatanodeRegistration registration,
+                                     String poolId,
                                      long[] blocks) throws IOException;
     
   /**
@@ -120,6 +133,7 @@ public interface DatanodeProtocol extend
    * this DataNode, it will call blockReceived().
    */
   public void blockReceived(DatanodeRegistration registration,
+                            String poolId,
                             Block blocks[],
                             String[] delHints) throws IOException;
 
@@ -154,7 +168,7 @@ public interface DatanodeProtocol extend
   /**
    * Commit block synchronization in lease recovery
    */
-  public void commitBlockSynchronization(Block block,
+  public void commitBlockSynchronization(ExtendedBlock block,
       long newgenerationstamp, long newlength,
       boolean closeFile, boolean deleteblock, DatanodeID[] newtargets
       ) throws IOException;

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Thu May  5 05:40:07 2011
@@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
@@ -69,23 +68,10 @@ implements Writable, NodeRegistration {
     this.exportedKeys = new ExportedBlockKeys();
   }
   
-  public void setInfoPort(int infoPort) {
-    this.infoPort = infoPort;
-  }
-  
-  public void setIpcPort(int ipcPort) {
-    this.ipcPort = ipcPort;
-  }
-
-  public void setStorageInfo(DataStorage storage) {
+  public void setStorageInfo(StorageInfo storage) {
     this.storageInfo = new StorageInfo(storage);
-    this.storageID = storage.getStorageID();
   }
   
-  public void setName(String name) {
-    this.name = name;
-  }
-
   @Override // NodeRegistration
   public int getVersion() {
     return storageInfo.getLayoutVersion();
@@ -108,6 +94,7 @@ implements Writable, NodeRegistration {
       + ", storageID=" + storageID
       + ", infoPort=" + infoPort
       + ", ipcPort=" + ipcPort
+      + ", storageInfo=" + storageInfo
       + ")";
   }
 

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java Thu May  5 05:40:07 2011
@@ -24,7 +24,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.KerberosInfo;
@@ -39,9 +39,9 @@ public interface InterDatanodeProtocol e
   public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class);
 
   /**
-   * 5: getBlockMetaDataInfo(), updateBlock() removed.
+   * 6: Add block pool ID to Block
    */
-  public static final long versionID = 5L;
+  public static final long versionID = 6L;
 
   /**
    * Initialize a replica recovery.
@@ -55,7 +55,7 @@ public interface InterDatanodeProtocol e
   /**
    * Update replica with the new generation stamp and length.  
    */
-  Block updateReplicaUnderRecovery(Block oldBlock,
+  ExtendedBlock updateReplicaUnderRecovery(ExtendedBlock oldBlock,
                                    long recoveryId,
                                    long newLength) throws IOException;
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java Thu May  5 05:40:07 2011
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.Deprecated
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.io.WritableUtils;
 
 /**
  * NamespaceInfo is returned by the name-node in reply 
@@ -42,14 +43,17 @@ import org.apache.hadoop.io.WritableFact
 public class NamespaceInfo extends StorageInfo {
   String  buildVersion;
   int distributedUpgradeVersion;
+  String blockPoolID = "";    // id of the block pool
 
   public NamespaceInfo() {
     super();
     buildVersion = null;
   }
   
-  public NamespaceInfo(int nsID, long cT, int duVersion) {
-    super(FSConstants.LAYOUT_VERSION, nsID, cT);
+  public NamespaceInfo(int nsID, String clusterID, String bpID, 
+      long cT, int duVersion) {
+    super(FSConstants.LAYOUT_VERSION, nsID, clusterID, cT);
+    blockPoolID = bpID;
     buildVersion = Storage.getBuildVersion();
     this.distributedUpgradeVersion = duVersion;
   }
@@ -62,6 +66,10 @@ public class NamespaceInfo extends Stora
     return distributedUpgradeVersion;
   }
   
+  public String getBlockPoolID() {
+    return blockPoolID;
+  }
+
   /////////////////////////////////////////////////
   // Writable
   /////////////////////////////////////////////////
@@ -77,11 +85,17 @@ public class NamespaceInfo extends Stora
     DeprecatedUTF8.writeString(out, getBuildVersion());
     super.write(out);
     out.writeInt(getDistributedUpgradeVersion());
+    WritableUtils.writeString(out, blockPoolID);
   }
 
   public void readFields(DataInput in) throws IOException {
     buildVersion = DeprecatedUTF8.readString(in);
     super.readFields(in);
     distributedUpgradeVersion = in.readInt();
+    blockPoolID = WritableUtils.readString(in);
+  }
+  
+  public String toString(){
+    return super.toString() + ";bpid=" + blockPoolID;
   }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Thu May  5 05:40:07 2011
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -35,7 +36,9 @@ import org.apache.hadoop.fs.shell.Comman
 import org.apache.hadoop.fs.shell.CommandFormat;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
@@ -467,6 +470,8 @@ public class DFSAdmin extends FsShell {
       "\t[-refreshUserToGroupsMappings]\n" +
       "\t[refreshSuperUserGroupsConfiguration]\n" +
       "\t[-printTopology]\n" +
+      "\t[-refreshNamenodes datanodehost:port]\n"+
+      "\t[-deleteBlockPool datanodehost:port blockpoolId [force]]\n"+
       "\t[-help [cmd]]\n";
 
     String report ="-report: \tReports basic filesystem information and statistics.\n";
@@ -531,6 +536,20 @@ public class DFSAdmin extends FsShell {
     String printTopology = "-printTopology: Print a tree of the racks and their\n" +
                            "\t\tnodes as reported by the Namenode\n";
     
+    String refreshNamenodes = "-refreshNamenodes: Takes a datanodehost:port as argument,\n"+
+                              "\t\tFor the given datanode, reloads the configuration files,\n" +
+                              "\t\tstops serving the removed block-pools\n"+
+                              "\t\tand starts serving new block-pools\n";
+    
+    String deleteBlockPool = "-deleteBlockPool: Arguments are datanodehost:port, blockpool id\n"+
+                             "\t\t and an optional argument \"force\". If force is passed,\n"+
+                             "\t\t block pool directory for the given blockpool id on the given\n"+
+                             "\t\t datanode is deleted along with its contents, otherwise\n"+
+                             "\t\t the directory is deleted only if it is empty. The command\n" +
+                             "\t\t will fail if datanode is still serving the block pool.\n" +
+                             "\t\t   Refer to refreshNamenodes to shutdown a block pool\n" +
+                             "\t\t service on a datanode.\n";
+    
     String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" +
       "\t\tis specified.\n";
 
@@ -566,6 +585,10 @@ public class DFSAdmin extends FsShell {
       System.out.println(refreshSuperUserGroupsConfiguration);
     } else if ("printTopology".equals(cmd)) {
       System.out.println(printTopology);
+    } else if ("refreshNamenodes".equals(cmd)) {
+      System.out.println(refreshNamenodes);
+    } else if ("deleteBlockPool".equals(cmd)) {
+      System.out.println(deleteBlockPool);
     } else if ("help".equals(cmd)) {
       System.out.println(help);
     } else {
@@ -586,6 +609,8 @@ public class DFSAdmin extends FsShell {
       System.out.println(refreshUserToGroupsMappings);
       System.out.println(refreshSuperUserGroupsConfiguration);
       System.out.println(printTopology);
+      System.out.println(refreshNamenodes);
+      System.out.println(deleteBlockPool);
       System.out.println(help);
       System.out.println();
       ToolRunner.printGenericCommandUsage(System.out);
@@ -852,6 +877,12 @@ public class DFSAdmin extends FsShell {
     } else if ("-printTopology".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-printTopology]");
+    } else if ("-refreshNamenodes".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+                         + " [-refreshNamenodes datanode-host:port]");
+    } else if ("-deleteBlockPool".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+          + " [-deleteBlockPool datanode-host:port blockpoolId [force]]");
     } else {
       System.err.println("Usage: java DFSAdmin");
       System.err.println("           [-report]");
@@ -866,6 +897,8 @@ public class DFSAdmin extends FsShell {
       System.err.println("           [-refreshUserToGroupsMappings]");
       System.err.println("           [-refreshSuperUserGroupsConfiguration]");
       System.err.println("           [-printTopology]");
+      System.err.println("           [-refreshNamenodes datanodehost:port]");
+      System.err.println("           [-deleteBlockPool datanode-host:port blockpoolId [force]]");
       System.err.println("           ["+SetQuotaCommand.USAGE+"]");
       System.err.println("           ["+ClearQuotaCommand.USAGE+"]");
       System.err.println("           ["+SetSpaceQuotaCommand.USAGE+"]");
@@ -951,6 +984,16 @@ public class DFSAdmin extends FsShell {
         printUsage(cmd);
         return exitCode;
       }
+    } else if ("-refreshNamenodes".equals(cmd)) {
+      if (argv.length != 2) {
+        printUsage(cmd);
+        return exitCode;
+      }
+    } else if ("-deleteBlockPool".equals(cmd)) {
+      if ((argv.length != 3) && (argv.length != 4)) {
+        printUsage(cmd);
+        return exitCode;
+      }
     }
     
     // initialize DFSAdmin
@@ -999,6 +1042,10 @@ public class DFSAdmin extends FsShell {
         exitCode = refreshSuperUserGroupsConfiguration();
       } else if ("-printTopology".equals(cmd)) {
         exitCode = printTopology();
+      } else if ("-refreshNamenodes".equals(cmd)) {
+        exitCode = refreshNamenodes(argv, i);
+      } else if ("-deleteBlockPool".equals(cmd)) {
+        exitCode = deleteBlockPool(argv, i);
       } else if ("-help".equals(cmd)) {
         if (i < argv.length) {
           printHelp(argv[i]);
@@ -1036,6 +1083,47 @@ public class DFSAdmin extends FsShell {
     return exitCode;
   }
 
+  private ClientDatanodeProtocol getDataNodeProxy(String datanode)
+      throws IOException {
+    InetSocketAddress datanodeAddr = DFSUtil.getSocketAddress(datanode);
+    // Get the current configuration
+    Configuration conf = getConf();
+
+    // For datanode proxy the server principal should be DN's one.
+    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
+        conf.get(DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY, ""));
+
+    // Create the client
+    ClientDatanodeProtocol dnProtocol = RPC.getProxy(
+        ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID,
+        datanodeAddr, getUGI(), conf, NetUtils.getSocketFactory(conf,
+            ClientDatanodeProtocol.class));
+    return dnProtocol;
+  }
+  
+  private int deleteBlockPool(String[] argv, int i) throws IOException {
+    ClientDatanodeProtocol dnProxy = getDataNodeProxy(argv[i]);
+    boolean force = false;
+    if (argv.length-1 == i+2) {
+      if ("force".equals(argv[i+2])) {
+        force = true;
+      } else {
+        printUsage("-deleteBlockPool");
+        return -1;
+      }
+    }
+    dnProxy.deleteBlockPool(argv[i+1], force);
+    return 0;
+  }
+  
+  private int refreshNamenodes(String[] argv, int i) throws IOException {
+    String datanode = argv[i];
+    ClientDatanodeProtocol refreshProtocol = getDataNodeProxy(datanode);
+    refreshProtocol.refreshNamenodes();
+    
+    return 0;
+  }
+
   /**
    * main() has some simple utility methods.
    * @param argv Command line parameters.

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DFSck.java Thu May  5 05:40:07 2011
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.PrintStream;
+import java.net.InetSocketAddress;
 import java.net.URL;
 import java.net.URLConnection;
 import java.net.URLEncoder;
@@ -30,12 +31,17 @@ import java.security.PrivilegedException
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -192,7 +198,41 @@ public class DFSck extends Configured im
       errCode = 0;
     return errCode;
   }
-            
+  
+  /**
+   * Derive the namenode http address from the current file system,
+   * either default or as set by "-fs" in the generic options.
+   * @return Returns http address or null if failure.
+   */
+  private String getCurrentNamenodeAddress() {
+    //String nnAddress = null;
+    Configuration conf = getConf();
+
+    //get the filesystem object to verify it is an HDFS system
+    FileSystem fs;
+    try {
+      fs = FileSystem.get(conf);
+    } catch (IOException ioe) {
+      System.err.println("FileSystem is inaccessible due to:\n"
+          + StringUtils.stringifyException(ioe));
+      return null;
+    }
+    if (!(fs instanceof DistributedFileSystem)) {
+      System.err.println("FileSystem is " + fs.getUri());
+      return null;
+    }
+    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+
+    // Derive the nameservice ID from the filesystem URI.
+    // The URI may have been provided by a human, and the server name may be
+    // aliased, so compare InetSocketAddresses instead of URI strings, and
+    // test against both possible variants of RPC address.
+    InetSocketAddress namenode = 
+      NameNode.getAddress(dfs.getUri().getAuthority());
+    
+    return DFSUtil.getInfoServer(namenode, conf);
+  }
+
   private int doWork(final String[] args) throws IOException {
     String proto = "http://";
     if (UserGroupInformation.isSecurityEnabled()) {
@@ -201,9 +241,17 @@ public class DFSck extends Configured im
       proto = "https://";
     }
     final StringBuilder url = new StringBuilder(proto);
-    url.append(NameNode.getInfoServer(getConf()));
+    
+    String namenodeAddress = getCurrentNamenodeAddress();
+    if (namenodeAddress == null) {
+      //Error message already output in {@link #getCurrentNamenodeAddress()}
+      System.err.println("DFSck exiting.");
+      return 0;
+    }
+    url.append(namenodeAddress);
+    System.err.println("Connecting to namenode via " + url.toString());
+    
     url.append("/fsck?ugi=").append(ugi.getShortUserName()).append("&path=");
-
     String dir = "/";
     // find top-level dir first
     for (int idx = 0; idx < args.length; idx++) {

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Thu May  5 05:40:07 2011
@@ -254,10 +254,12 @@ public class DelegationTokenFetcher {
     buf.append("=");
     buf.append(tok.encodeToUrlString());
     BufferedReader in = null;
+    HttpURLConnection connection = null;
+    
     try {
       URL url = new URL(buf.toString());
       SecurityUtil.fetchServiceTicket(url);
-      HttpURLConnection connection = (HttpURLConnection) url.openConnection();
+      connection = (HttpURLConnection) url.openConnection();
       if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
         throw new IOException("Error renewing token: " + 
             connection.getResponseMessage());
@@ -268,11 +270,67 @@ public class DelegationTokenFetcher {
       in.close();
       return result;
     } catch (IOException ie) {
+      LOG.info("error in renew over HTTP", ie);
+      IOException e = getExceptionFromResponse(connection);
+
       IOUtils.cleanup(LOG, in);
+      if(e!=null) {
+        LOG.info("rethrowing exception from HTTP request: " + e.getLocalizedMessage());
+        throw e;
+      }
       throw ie;
     }
   }
 
+  // parse the message and extract the name of the exception and the message
+  static private IOException getExceptionFromResponse(HttpURLConnection con) {
+    IOException e = null;
+    String resp;
+    if(con == null) 
+      return null;    
+    
+    try {
+      resp = con.getResponseMessage();
+    } catch (IOException ie) { return null; }
+    if(resp == null || resp.isEmpty())
+      return null;
+
+    String exceptionClass = "", exceptionMsg = "";
+    String[] rs = resp.split(";");
+    if(rs.length < 2)
+      return null;
+    exceptionClass = rs[0];
+    exceptionMsg = rs[1];
+    LOG.info("Error response from HTTP request=" + resp + 
+        ";ec=" + exceptionClass + ";em="+exceptionMsg);
+    
+    if(exceptionClass == null || exceptionClass.isEmpty())
+      return null;
+    
+    // recreate exception objects
+    try {
+      Class<? extends Exception> ec = 
+         Class.forName(exceptionClass).asSubclass(Exception.class);
+      // we are interested in constructor with String arguments
+      java.lang.reflect.Constructor<? extends Exception> constructor =
+        (java.lang.reflect.Constructor<? extends Exception>) 
+        ec.getConstructor (new Class[] {String.class});
+
+      // create an instance
+      e =  (IOException) constructor.newInstance (exceptionMsg);
+
+    } catch (Exception ee)  {
+      LOG.warn("failed to create object of this class", ee);
+    }
+    if(e == null)
+      return null;
+    
+    e.setStackTrace(new StackTraceElement[0]); // local stack is not relevant
+    LOG.info("Exception from HTTP response=" + e.getLocalizedMessage());
+    return e;
+  }
+
+  
   /**
    * Cancel a Delegation Token.
    * @param nnAddr the NameNode's address
@@ -290,16 +348,24 @@ public class DelegationTokenFetcher {
     buf.append("=");
     buf.append(tok.encodeToUrlString());
     BufferedReader in = null;
+    HttpURLConnection connection=null;
     try {
       URL url = new URL(buf.toString());
       SecurityUtil.fetchServiceTicket(url);
-      HttpURLConnection connection = (HttpURLConnection) url.openConnection();
+      connection = (HttpURLConnection) url.openConnection();
       if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
         throw new IOException("Error cancelling token: " + 
             connection.getResponseMessage());
       }
     } catch (IOException ie) {
+      LOG.info("error in cancel over HTTP", ie);
+      IOException e = getExceptionFromResponse(connection);
+
       IOUtils.cleanup(LOG, in);
+      if(e!=null) {
+        LOG.info("rethrowing exception from HTTP request: " + e.getLocalizedMessage());
+        throw e;
+      }
       throw ie;
     }
   }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java Thu May  5 05:40:07 2011
@@ -40,7 +40,7 @@ class EditsLoaderCurrent implements Edit
 
   private static int[] supportedVersions = {
       -18, -19, -20, -21, -22, -23, -24,
-      -25, -26, -27, -28, -30, -31, -32, -33, -34 };
+      -25, -26, -27, -28, -30, -31, -32, -33, -34, -35 };
 
   private EditsVisitor v;
   private int editsVersion = 0;

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Thu May  5 05:40:07 2011
@@ -121,7 +121,7 @@ class ImageLoaderCurrent implements Imag
   protected final DateFormat dateFormat = 
                                       new SimpleDateFormat("yyyy-MM-dd HH:mm");
   private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
-      -24, -25, -26, -27, -28, -30, -31, -32, -33, -34 };
+      -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35 };
   private int imageVersion = 0;
 
   /* (non-Javadoc)

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/fs/TestFiRename.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/fs/TestFiRename.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/fs/TestFiRename.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/fs/TestFiRename.java Thu May  5 05:40:07 2011
@@ -87,9 +87,9 @@ public class TestFiRename {
       cluster.shutdown();
       cluster = null;
     }
-    cluster = new MiniDFSCluster(CONF, 1, format, null);
+    cluster = new MiniDFSCluster.Builder(CONF).format(format).build();
     cluster.waitClusterUp();
-    fc = FileContext.getFileContext(cluster.getURI(), CONF);
+    fc = FileContext.getFileContext(cluster.getURI(0), CONF);
   }
 
   /**

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/hdfs/TestFiHftp.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/hdfs/TestFiHftp.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/hdfs/TestFiHftp.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/hdfs/TestFiHftp.java Thu May  5 05:40:07 2011
@@ -31,7 +31,7 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -130,7 +130,7 @@ public class TestFiHftp {
     DFSTestUtil.waitReplication(dfs, filepath, DATANODE_NUM);
 
     //test hftp open and read
-    final HftpFileSystem hftpfs = cluster.getHftpFileSystem();
+    final HftpFileSystem hftpfs = cluster.getHftpFileSystem(0);
     {
       final FSDataInputStream in = hftpfs.open(filepath);
       long bytesRead = 0;
@@ -154,7 +154,7 @@ public class TestFiHftp {
     Assert.assertEquals((filesize - 1)/blocksize + 1,
         locatedblocks.locatedBlockCount());
     final LocatedBlock lb = locatedblocks.get(1);
-    final Block blk = lb.getBlock();
+    final ExtendedBlock blk = lb.getBlock();
     Assert.assertEquals(blocksize, lb.getBlockSize());
     final DatanodeInfo[] datanodeinfos = lb.getLocations();
     Assert.assertEquals(DATANODE_NUM, datanodeinfos.length);

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj Thu May  5 05:40:07 2011
@@ -32,6 +32,7 @@ import org.apache.hadoop.fi.DataTransfer
 import org.apache.hadoop.hdfs.server.datanode.BlockReceiver.PacketResponder;
 import org.apache.hadoop.hdfs.PipelinesTestUtil.PipelinesTest;
 import org.apache.hadoop.hdfs.PipelinesTestUtil.NodeBytes;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -54,11 +55,12 @@ privileged public aspect BlockReceiverAs
 	
   before(BlockReceiver blockreceiver
       ) throws IOException : callReceivePacket(blockreceiver) {
-    final DatanodeRegistration dr = blockreceiver.getDataNode().getDatanodeRegistration();
-    LOG.info("FI: callReceivePacket, datanode=" + dr.getName());
+    final String dnName = blockreceiver.getDataNode().getMachineName();
+    final DatanodeID dnId = blockreceiver.getDataNode().getDatanodeId();
+    LOG.info("FI: callReceivePacket, datanode=" + dnName);
     DataTransferTest dtTest = DataTransferTestUtil.getDataTransferTest();
     if (dtTest != null)
-      dtTest.fiCallReceivePacket.run(dr);
+      dtTest.fiCallReceivePacket.run(dnId);
 
     if (ProbabilityModel.injectCriteria(BlockReceiver.class.getSimpleName())) {
       LOG.info("Before the injection point");
@@ -77,7 +79,7 @@ privileged public aspect BlockReceiverAs
     DataTransferTest dtTest = DataTransferTestUtil.getDataTransferTest();
     if (dtTest != null)
       dtTest.fiCallWritePacketToDisk.run(
-          blockreceiver.getDataNode().getDatanodeRegistration());
+          blockreceiver.getDataNode().getDatanodeId());
   }
 
   pointcut afterDownstreamStatusRead(BlockReceiver.PacketResponder responder):
@@ -88,7 +90,7 @@ privileged public aspect BlockReceiverAs
     final DataNode d = responder.getReceiver().getDataNode();
     DataTransferTest dtTest = DataTransferTestUtil.getDataTransferTest();
     if (dtTest != null)
-      dtTest.fiAfterDownstreamStatusRead.run(d.getDatanodeRegistration());
+      dtTest.fiAfterDownstreamStatusRead.run(d.getDatanodeId());
   }
 
     // Pointcuts and advises for TestFiPipelines  
@@ -99,7 +101,7 @@ privileged public aspect BlockReceiverAs
     && this(br);
   
   after(BlockReceiver br, long offset) : callSetNumBytes(br, offset) {
-    LOG.debug("FI: Received bytes To: " + br.datanode.dnRegistration.getStorageID() + ": " + offset);
+    LOG.debug("FI: Received bytes To: " + br.datanode.getStorageId() + ": " + offset);
     PipelineTest pTest = DataTransferTestUtil.getDataTransferTest();
     if (pTest == null) {
       LOG.debug("FI: no pipeline has been found in receiving");
@@ -108,7 +110,7 @@ privileged public aspect BlockReceiverAs
     if (!(pTest instanceof PipelinesTest)) {
       return;
     }
-    NodeBytes nb = new NodeBytes(br.datanode.dnRegistration, offset);
+    NodeBytes nb = new NodeBytes(br.datanode.getDatanodeId(), offset);
     try {
       ((PipelinesTest)pTest).fiCallSetNumBytes.run(nb);
     } catch (IOException e) {
@@ -129,9 +131,8 @@ privileged public aspect BlockReceiverAs
       LOG.debug("FI: no pipeline has been found in acking");
       return;
     }
-    LOG.debug("FI: Acked total bytes from: "
-        + pr.getReceiver().datanode.dnRegistration.getStorageID()
-        + ": " + acked);
+    LOG.debug("FI: Acked total bytes from: " + 
+        pr.getReceiver().datanode.getStorageId() + ": " + acked);
     if (pTest instanceof PipelinesTest) {
       bytesAckedService((PipelinesTest)pTest, pr, acked);
     }
@@ -139,7 +140,7 @@ privileged public aspect BlockReceiverAs
 
   private void bytesAckedService 
       (final PipelinesTest pTest, final PacketResponder pr, final long acked) {
-    NodeBytes nb = new NodeBytes(pr.getReceiver().datanode.dnRegistration, acked);
+    NodeBytes nb = new NodeBytes(pr.getReceiver().datanode.getDatanodeId(), acked);
     try {
       pTest.fiCallSetBytesAcked.run(nb);
     } catch (IOException e) {
@@ -186,8 +187,8 @@ privileged public aspect BlockReceiverAs
       ) throws IOException : pipelineClose(blockreceiver, offsetInBlock, seqno,
           lastPacketInBlock, len, endOfHeader) {
     if (len == 0) {
-      final DatanodeRegistration dr = blockreceiver.getDataNode().getDatanodeRegistration();
-      LOG.info("FI: pipelineClose, datanode=" + dr.getName()
+      final DatanodeID dnId = blockreceiver.getDataNode().getDatanodeId();
+      LOG.info("FI: pipelineClose, datanode=" + dnId.getName()
           + ", offsetInBlock=" + offsetInBlock
           + ", seqno=" + seqno
           + ", lastPacketInBlock=" + lastPacketInBlock
@@ -196,7 +197,7 @@ privileged public aspect BlockReceiverAs
   
       final DataTransferTest test = DataTransferTestUtil.getDataTransferTest();
       if (test != null) {
-        test.fiPipelineClose.run(dr);
+        test.fiPipelineClose.run(dnId);
       }
     }
   }
@@ -207,12 +208,12 @@ privileged public aspect BlockReceiverAs
 
   after(BlockReceiver.PacketResponder packetresponder) throws IOException
       : pipelineAck(packetresponder) {
-    final DatanodeRegistration dr = packetresponder.getReceiver().getDataNode().getDatanodeRegistration();
-    LOG.info("FI: fiPipelineAck, datanode=" + dr);
+    final DatanodeID dnId = packetresponder.getReceiver().getDataNode().getDatanodeId();
+    LOG.info("FI: fiPipelineAck, datanode=" + dnId);
 
     final DataTransferTest test = DataTransferTestUtil.getDataTransferTest();
     if (test != null) {
-      test.fiPipelineAck.run(dr);
+      test.fiPipelineAck.run(dnId);
     }
   }
 
@@ -222,12 +223,12 @@ privileged public aspect BlockReceiverAs
       && this(blockreceiver);
 
   after(BlockReceiver blockreceiver) throws IOException : blockFileClose(blockreceiver) {
-    final DatanodeRegistration dr = blockreceiver.getDataNode().getDatanodeRegistration();
-    LOG.info("FI: blockFileClose, datanode=" + dr);
+    final DatanodeID dnId = blockreceiver.getDataNode().getDatanodeId();
+    LOG.info("FI: blockFileClose, datanode=" + dnId);
 
     final DataTransferTest test = DataTransferTestUtil.getDataTransferTest();
     if (test != null) {
-      test.fiBlockFileClose.run(dr);
+      test.fiBlockFileClose.run(dnId);
     }
   }
 }



Mime
View raw message