hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1101324 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/namenode/ src/webapps/hdfs/
Date Tue, 10 May 2011 06:10:03 GMT
Author: suresh
Date: Tue May 10 06:10:03 2011
New Revision: 1101324

URL: http://svn.apache.org/viewvc?rev=1101324&view=rev
Log:
HDFS-1873. Federation: Add cluster management web console. Contributed by Tanping Wang.

Added:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java
    hadoop/hdfs/trunk/src/webapps/hdfs/decommission.jsp
    hadoop/hdfs/trunk/src/webapps/hdfs/decommission.xsl
    hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.jsp
    hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.xsl
    hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth_utils.xsl
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1101324&r1=1101323&r2=1101324&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Tue May 10 06:10:03 2011
@@ -270,6 +270,9 @@ Trunk (unreleased changes)
     
     HDFS-1751. Intrinsic limits for HDFS files, directories (daryn via boryas).
 
+    HDFS-1873. Federation: Add cluster management web console.
+    (Tanping Wang via suresh)
+
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java?rev=1101324&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java Tue May 10 06:10:03 2011
@@ -0,0 +1,896 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import javax.management.JMX;
+import javax.management.MBeanServerConnection;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import javax.management.remote.JMXConnector;
+import javax.management.remote.JMXConnectorFactory;
+import javax.management.remote.JMXServiceURL;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.util.StringUtils;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.type.TypeReference;
+import org.znerd.xmlenc.XMLOutputter;
+
+/**
+ * This class generates the data that is needed to be displayed on cluster web 
+ * console by connecting to each namenode through JMX.
+ */
+@InterfaceAudience.Private
+class ClusterJspHelper {
+  private static final Log LOG = LogFactory.getLog(ClusterJspHelper.class);
+  public static final String OVERALL_STATUS = "overall-status";
+  public static final String DEAD = "Dead";
+  
+  /**
+   * JSP helper function that generates cluster health report.  When 
+   * encountering exception while getting Namenode status, the exception will 
+   * be listed in the page with corresponding stack trace.
+   */
+  ClusterStatus generateClusterHealthReport() {
+    ClusterStatus cs = new ClusterStatus();
+    Configuration conf = new Configuration();
+    List<InetSocketAddress> isas = null;
+    try {
+      isas = DFSUtil.getNNServiceRpcAddresses(conf);
+    } catch (Exception e) {
+      // Could not build cluster status
+      cs.setError(e);
+      return cs;
+    }
+    
+    // Process each namenode and add it to ClusterStatus
+    for (InetSocketAddress isa : isas) {
+      NamenodeMXBeanHelper nnHelper = null;
+      try {
+        nnHelper = new NamenodeMXBeanHelper(isa, conf);
+        NamenodeStatus nn = nnHelper.getNamenodeStatus();
+        if (cs.clusterid.isEmpty() || cs.clusterid.equals("")) { // Set clusterid only once
+          cs.clusterid = nnHelper.getClusterId();
+        }
+        cs.addNamenodeStatus(nn);
+      } catch ( Exception e ) {
+        // track exceptions encountered when connecting to namenodes
+        cs.addException(isa.getHostName(), e);
+        continue;
+      } finally {
+        if (nnHelper != null) {
+          nnHelper.cleanup();
+        }
+      }
+    }
+    return cs;
+  }
+
+  /**
+   * Helper function that generates the decommissioning report.
+   */
+  DecommissionStatus generateDecommissioningReport() {
+    String clusterid = "";
+    Configuration conf = new Configuration();
+    List<InetSocketAddress> isas = null;
+    try {
+      isas = DFSUtil.getNNServiceRpcAddresses(conf);
+    } catch (Exception e) {
+      // catch any exception encountered other than connecting to namenodes
+      DecommissionStatus dInfo = new DecommissionStatus(clusterid, e);
+      return dInfo;
+    }
+    
+    // Outer map key is datanode. Inner map key is namenode and the value is 
+    // decom status of the datanode for the corresponding namenode
+    Map<String, Map<String, String>> statusMap = 
+      new HashMap<String, Map<String, String>>();
+    
+    // Map of exceptions encountered when connecting to namenode
+    // key is namenode and value is exception
+    Map<String, Exception> decommissionExceptions = 
+      new HashMap<String, Exception>();
+    
+    List<String> unreportedNamenode = new ArrayList<String>();
+    for (InetSocketAddress isa : isas) {
+      NamenodeMXBeanHelper nnHelper = null;
+      try {
+        nnHelper = new NamenodeMXBeanHelper(isa, conf);
+        if (clusterid.equals("")) {
+          clusterid = nnHelper.getClusterId();
+        }
+        nnHelper.getDecomNodeInfoForReport(statusMap);
+      } catch (Exception e) {
+        // catch exceptions encountered while connecting to namenodes
+        String nnHost = isa.getHostName();
+        decommissionExceptions.put(nnHost, e);
+        unreportedNamenode.add(nnHost);
+        continue;
+      } finally {
+        if (nnHelper != null) {
+          nnHelper.cleanup();
+        }
+      }
+    }
+    updateUnknownStatus(statusMap, unreportedNamenode);
+    getDecommissionNodeClusterState(statusMap);
+    return new DecommissionStatus(statusMap, clusterid,
+        getDatanodeHttpPort(conf), decommissionExceptions);
+  }
+  
+  /**
+   * Based on the state of the datanode at each namenode, marks the overall
+   * state of the datanode across all the namenodes, to one of the following:
+   * <ol>
+   * <li>{@link DecommissionStates#DECOMMISSIONED}</li>
+   * <li>{@link DecommissionStates#DECOMMISSION_INPROGRESS}</li>
+   * <li>{@link DecommissionStates#PARTIALLY_DECOMMISSIONED}</li>
+   * <li>{@link DecommissionStates#UNKNOWN}</li>
+   * </ol>
+   * 
+   * @param statusMap
+   *          map whose key is datanode, value is an inner map with key being
+   *          namenode, value being decommission state.
+   */
+  private void getDecommissionNodeClusterState(
+      Map<String, Map<String, String>> statusMap) {
+    if (statusMap == null || statusMap.isEmpty()) {
+      return;
+    }
+    
+    // For each datanodes
+    Iterator<Entry<String, Map<String, String>>> it = 
+      statusMap.entrySet().iterator();
+    while (it.hasNext()) {
+      // Map entry for a datanode:
+      // key is namenode, value is datanode status at the namenode
+      Entry<String, Map<String, String>> entry = it.next();
+      Map<String, String> nnStatus = entry.getValue();
+      if (nnStatus == null || nnStatus.isEmpty()) {
+        continue;
+      }
+      
+      boolean isUnknown = false;
+      int unknown = 0;
+      int decommissioned = 0;
+      int decomInProg = 0;
+      int inservice = 0;
+      int dead = 0;
+      DecommissionStates overallState = DecommissionStates.UNKNOWN;
+      // Process a datanode state from each namenode
+      for (Map.Entry<String, String> m : nnStatus.entrySet()) {
+        String status = m.getValue();
+        if (status.equals(DecommissionStates.UNKNOWN.toString())) {
+          isUnknown = true;
+          unknown++;
+        } else 
+          if (status.equals(AdminStates.DECOMMISSION_INPROGRESS.toString())) {
+          decomInProg++;
+        } else if (status.equals(AdminStates.DECOMMISSIONED.toString())) {
+          decommissioned++;
+        } else if (status.equals(AdminStates.NORMAL.toString())) {
+          inservice++;
+        } else if (status.equals(DEAD)) {
+          // dead
+          dead++;
+        }
+      }
+      
+      // Consolidate all the states from namenode in to overall state
+      int nns = nnStatus.keySet().size();
+      if ((inservice + dead + unknown) == nns) {
+        // Do not display this data node. Remove this entry from status map.  
+        it.remove();
+      } else if (isUnknown) {
+        overallState = DecommissionStates.UNKNOWN;
+      } else if (decommissioned == nns) {
+        overallState = DecommissionStates.DECOMMISSIONED;
+      } else if ((decommissioned + decomInProg) == nns) {
+        overallState = DecommissionStates.DECOMMISSION_INPROGRESS;
+      } else if ((decommissioned + decomInProg < nns) 
+        && (decommissioned + decomInProg > 0)){
+        overallState = DecommissionStates.PARTIALLY_DECOMMISSIONED;
+      } else {
+        LOG.warn("Cluster console encounters a not handled situtation.");
+      }
+        
+      // insert overall state
+      nnStatus.put(OVERALL_STATUS, overallState.toString());
+    }
+  }
+
+  /**
+   * update unknown status in datanode status map for every unreported namenode
+   */
+  private void updateUnknownStatus(Map<String, Map<String, String>> statusMap,
+      List<String> unreportedNn) {
+    if (unreportedNn == null || unreportedNn.isEmpty()) {
+      // no unreported namenodes
+      return;
+    }
+    
+    for (Map.Entry<String, Map<String,String>> entry : statusMap.entrySet()) {
+      String dn = entry.getKey();
+      Map<String, String> nnStatus = entry.getValue();
+      for (String nn : unreportedNn) {
+        nnStatus.put(nn, DecommissionStates.UNKNOWN.toString());
+      }
+      statusMap.put(dn, nnStatus);
+    }
+  }
+
+  /**
+   * Get datanode http port from configration
+   */
+  private int getDatanodeHttpPort(Configuration conf) {
+    String address = conf.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "");
+    if (address.equals("")) {
+      return -1;
+    }
+    return Integer.parseInt(address.split(":")[1]);
+  }
+
+  /**
+   * Class for connecting to Namenode over JMX and get attributes
+   * exposed by the MXBean.
+   */
+  static class NamenodeMXBeanHelper {
+    private static final ObjectMapper mapper = new ObjectMapper();
+    private final InetSocketAddress rpcAddress;
+    private final String host;
+    private final Configuration conf;
+    private final JMXConnector connector;
+    private final NameNodeMXBean mxbeanProxy;
+    
+    NamenodeMXBeanHelper(InetSocketAddress addr, Configuration conf)
+        throws IOException, MalformedObjectNameException {
+      this.rpcAddress = addr;
+      this.host = addr.getHostName();
+      this.conf = conf;
+      int port = conf.getInt("dfs.namenode.jmxport", -1);
+      
+      JMXServiceURL jmxURL = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://"
+          + host + ":" + port + "/jmxrmi");
+      connector = JMXConnectorFactory.connect(jmxURL);
+      mxbeanProxy = getNamenodeMxBean();
+    }
+    
+    private NameNodeMXBean getNamenodeMxBean()
+        throws IOException, MalformedObjectNameException {
+      // Get an MBeanServerConnection on the remote VM.
+      MBeanServerConnection remote = connector.getMBeanServerConnection();
+      ObjectName mxbeanName = new ObjectName(
+          "HadoopInfo:type=NameNodeInfo");
+      return JMX.newMXBeanProxy(remote, mxbeanName, NameNodeMXBean.class);
+    }
+    
+    /** Get the map corresponding to the JSON string */
+    private static Map<String, Map<String, Object>> getNodeMap(String json)
+        throws IOException {
+      TypeReference<Map<String, Map<String, Object>>> type = 
+        new TypeReference<Map<String, Map<String, Object>>>() { };
+      return mapper.readValue(json, type);
+    }
+    
+    /**
+     * Process JSON string returned from JMX connection to get the number of
+     * live datanodes.
+     * 
+     * @param json JSON output from JMX call that contains live node status.
+     * @param nn namenode status to return information in
+     */
+    private static void getLiveNodeCount(String json, NamenodeStatus nn)
+        throws IOException {
+      // Map of datanode host to (map of attribute name to value)
+      Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
+      if (nodeMap == null || nodeMap.isEmpty()) {
+        return;
+      }
+      
+      nn.liveDatanodeCount = nodeMap.size();
+      for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
+        // Inner map of attribute name to value
+        Map<String, Object> innerMap = entry.getValue();
+        if (innerMap != null) {
+          if (((String) innerMap.get("adminState"))
+              .equals(AdminStates.DECOMMISSIONED.toString())) {
+            nn.liveDecomCount++;
+          }
+        }
+      }
+    }
+  
+    /**
+     * Count the number of dead datanode based on the JSON string returned from
+     * JMX call.
+     * 
+     * @param nn namenode
+     * @param json JSON string returned from JMX call
+     */
+    private static void getDeadNodeCount(String json, NamenodeStatus nn)
+        throws IOException {
+      Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
+      if (nodeMap == null || nodeMap.isEmpty()) {
+        return;
+      }
+      
+      nn.deadDatanodeCount = nodeMap.size();
+      for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
+        Map<String, Object> innerMap = entry.getValue();
+        if (innerMap != null && !innerMap.isEmpty()) {
+          if (((Boolean) innerMap.get("decommissioned"))
+              .booleanValue() == true) {
+            nn.deadDecomCount++;
+          }
+        }
+      }
+    }
+  
+    public String getClusterId() {
+      return mxbeanProxy.getClusterId();
+    }
+    
+    public NamenodeStatus getNamenodeStatus()
+        throws IOException, MalformedObjectNameException {
+      NamenodeStatus nn = new NamenodeStatus();
+      nn.host = host;
+      nn.filesAndDirectories = mxbeanProxy.getTotalFiles();
+      nn.capacity = mxbeanProxy.getTotal();
+      nn.free = mxbeanProxy.getFree();
+      nn.dfsUsed = mxbeanProxy.getUsed();
+      nn.nonDfsUsed = mxbeanProxy.getNonDfsUsedSpace();
+      nn.blocksCount = mxbeanProxy.getTotalBlocks();
+      nn.missingBlocksCount = mxbeanProxy.getNumberOfMissingBlocks();
+      nn.capacity = mxbeanProxy.getTotal();
+      nn.free = mxbeanProxy.getFree();
+      nn.httpAddress = DFSUtil.getInfoServer(rpcAddress, conf);
+      getLiveNodeCount(mxbeanProxy.getLiveNodes(), nn);
+      getDeadNodeCount(mxbeanProxy.getDeadNodes(), nn);
+      return nn;
+    }
+    
+    /**
+     * Connect to namenode to get decommission node information.
+     * @param statusMap data node status map
+     * @param connector JMXConnector
+     */
+    private void getDecomNodeInfoForReport(
+        Map<String, Map<String, String>> statusMap) throws IOException,
+        MalformedObjectNameException {
+      getLiveNodeStatus(statusMap, host, mxbeanProxy.getLiveNodes());
+      getDeadNodeStatus(statusMap, host, mxbeanProxy.getDeadNodes());
+      getDecommissionNodeStatus(statusMap, host, mxbeanProxy.getDecomNodes());
+    }
+  
+    /**
+     * Process the JSON string returned from JMX call to get live datanode
+     * status. Store the information into datanode status map and
+     * Decommissionnode.
+     * 
+     * @param statusMap Map of datanode status. Key is datanode, value
+     *          is an inner map whose key is namenode, value is datanode status.
+     *          reported by each namenode.
+     * @param namenodeHost host name of the namenode
+     * @param decomnode update Decommissionnode with alive node status
+     * @param json JSON string contains datanode status
+     * @throws IOException
+     */
+    private static void getLiveNodeStatus(
+        Map<String, Map<String, String>> statusMap, String namenodeHost,
+        String json) throws IOException {
+      Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
+      if (nodeMap != null && !nodeMap.isEmpty()) {
+        List<String> liveDecommed = new ArrayList<String>();
+        for (Map.Entry<String, Map<String, Object>> entry: nodeMap.entrySet()) {
+          Map<String, Object> innerMap = entry.getValue();
+          String dn = entry.getKey();
+          if (innerMap != null) {
+            if (innerMap.get("adminState").equals(
+                AdminStates.DECOMMISSIONED.toString())) {
+              liveDecommed.add(dn);
+            }
+            // the inner map key is namenode, value is datanode status.
+            Map<String, String> nnStatus = statusMap.get(dn);
+            if (nnStatus == null) {
+              nnStatus = new HashMap<String, String>();
+            }
+            nnStatus.put(namenodeHost, (String) innerMap.get("adminState"));
+            // map whose key is datanode, value is the inner map.
+            statusMap.put(dn, nnStatus);
+          }
+        }
+      }
+    }
+  
+    /**
+     * Process the JSON string returned from JMX connection to get the dead
+     * datanode information. Store the information into datanode status map and
+     * Decommissionnode.
+     * 
+     * @param statusMap map with key being datanode, value being an
+     *          inner map (key:namenode, value:decommisionning state).
+     * @param host datanode hostname
+     * @param decomnode
+     * @param json
+     * @throws IOException
+     */
+    private static void getDeadNodeStatus(
+        Map<String, Map<String, String>> statusMap, String host,
+        String json) throws IOException {
+      Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
+      if (nodeMap == null || nodeMap.isEmpty()) {
+        return;
+      }
+      List<String> deadDn = new ArrayList<String>();
+      List<String> deadDecommed = new ArrayList<String>();
+      for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
+        deadDn.add(entry.getKey());
+        Map<String, Object> deadNodeDetailMap = entry.getValue();
+        String dn = entry.getKey();
+        if (deadNodeDetailMap != null && !deadNodeDetailMap.isEmpty()) {
+          // NN - status
+          Map<String, String> nnStatus = statusMap.get(dn);
+          if (nnStatus == null) {
+            nnStatus = new HashMap<String, String>();
+          }
+          if (((Boolean) deadNodeDetailMap.get("decommissioned"))
+              .booleanValue() == true) {
+            deadDecommed.add(dn);
+            nnStatus.put(host, AdminStates.DECOMMISSIONED.toString());
+          } else {
+            nnStatus.put(host, DEAD);
+          }
+          // dn-nn-status
+          statusMap.put(dn, nnStatus);
+        }
+      }
+    }
+  
+    /**
+     * We process the JSON string returned from JMX connection to get the
+     * decommisioning datanode information.
+     * 
+     * @param dataNodeStatusMap map with key being datanode, value being an
+     *          inner map (key:namenode, value:decommisionning state).
+     * @param host datanode
+     * @param decomnode Decommissionnode
+     * @param json JSON string returned from JMX connection
+     */
+    private static void getDecommissionNodeStatus(
+        Map<String, Map<String, String>> dataNodeStatusMap, String host,
+        String json) throws IOException {
+      Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
+      if (nodeMap == null || nodeMap.isEmpty()) {
+        return;
+      }
+      List<String> decomming = new ArrayList<String>();
+      for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
+        String dn = entry.getKey();
+        decomming.add(dn);
+        // nn-status
+        Map<String, String> nnStatus = new HashMap<String, String>();
+        if (dataNodeStatusMap.containsKey(dn)) {
+          nnStatus = dataNodeStatusMap.get(dn);
+        }
+        nnStatus.put(host, AdminStates.DECOMMISSION_INPROGRESS.toString());
+        // dn-nn-status
+        dataNodeStatusMap.put(dn, nnStatus);
+      }
+    }
+  
+    
+    public void cleanup() {
+      if (connector != null) {
+        try {
+          connector.close();
+        } catch (Exception e) {
+          // log failure of close jmx connection
+          LOG.warn("Unable to close JMX connection. "
+              + StringUtils.stringifyException(e));
+        }
+      }
+    }
+  }
+
+  /**
+   * This class contains cluster statistics.
+   */
+  static class ClusterStatus {
+    /** Exception indicates failure to get cluster status */
+    Exception error = null;
+    
+    /** Cluster status information */
+    String clusterid = "";
+    long total_sum = 0;
+    long free_sum = 0;
+    long used = 0;
+    long nonDfsUsed_sum = 0;
+    long totalFilesAndBlocks = 0;
+    
+    /** List of namenodes in the cluster */
+    final List<NamenodeStatus> nnList = new ArrayList<NamenodeStatus>();
+    
+    /** Map of namenode host and exception encountered when getting status */
+    final Map<String, Exception> nnExceptions = new HashMap<String, Exception>();
+    
+    public void setError(Exception e) {
+      error = e;
+    }
+    
+    public void addNamenodeStatus(NamenodeStatus nn) {
+      nnList.add(nn);
+      
+      // Add namenode status to cluster status
+      totalFilesAndBlocks += nn.filesAndDirectories;
+      total_sum += nn.capacity;
+      free_sum += nn.free;
+      used += nn.dfsUsed;
+      nonDfsUsed_sum += nn.nonDfsUsed;
+    }
+
+    public void addException(String host, Exception e) {
+      nnExceptions.put(host, e);
+    }
+
+    public void toXML(XMLOutputter doc) throws IOException {
+      if (error != null) {
+        // general exception, only print exception message onto web page.
+        createGeneralException(doc, clusterid,
+            StringUtils.stringifyException(error));
+        doc.getWriter().flush();
+        return;
+      }
+      
+      int size = nnList.size();
+      long total = 0L, free = 0L, nonDfsUsed = 0l;
+      float dfsUsedPercent = 0.0f, dfsRemainingPercent = 0.0f;
+      if (size > 0) {
+        total = total_sum / size;
+        free = free_sum / size;
+        nonDfsUsed = nonDfsUsed_sum / size;
+        dfsUsedPercent = DFSUtil.getPercentUsed(used, total_sum);
+        dfsRemainingPercent = DFSUtil.getPercentRemaining(free, total);
+      }
+    
+      doc.startTag("cluster");
+      doc.attribute("clusterId", clusterid);
+    
+      doc.startTag("storage");
+    
+      toXmlItemBlock(doc, "Total Files And Blocks",
+          Long.toString(totalFilesAndBlocks));
+    
+      toXmlItemBlock(doc, "Configured Capacity", StringUtils.byteDesc(total));
+    
+      toXmlItemBlock(doc, "Used", StringUtils.byteDesc(used));
+    
+      toXmlItemBlock(doc, "Non DFS Used", StringUtils.byteDesc(nonDfsUsed));
+    
+      toXmlItemBlock(doc, "Remaining", StringUtils.byteDesc(free));
+    
+      // dfsUsedPercent
+      toXmlItemBlock(doc, "Used%", StringUtils.limitDecimalTo2(dfsUsedPercent)
+          + "%");
+    
+      // dfsRemainingPercent
+      toXmlItemBlock(doc, "Remaining%",
+          StringUtils.limitDecimalTo2(dfsRemainingPercent) + "%");
+    
+      doc.endTag(); // storage
+    
+      doc.startTag("namenodes");
+      // number of namenodes
+      toXmlItemBlock(doc, "NamenodesCount", Integer.toString(size));
+    
+      for (NamenodeStatus nn : nnList) {
+        doc.startTag("node");
+        toXmlItemBlockWithLink(doc, nn.host, nn.httpAddress, "NameNode");
+        toXmlItemBlock(doc, "Used",
+            StringUtils.byteDesc(nn.dfsUsed));
+        toXmlItemBlock(doc, "Files And Directories",
+            Long.toString(nn.filesAndDirectories));
+        toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount));
+        toXmlItemBlock(doc, "Missing Blocks",
+            Long.toString(nn.missingBlocksCount));
+        toXmlItemBlock(doc, "Live Datanode (Decommissioned)",
+            Integer.toString(nn.liveDatanodeCount) + " ("
+                + Integer.toString(nn.liveDecomCount) + ")");
+        toXmlItemBlock(doc, "Dead Datanode (Decommissioned)",
+            Integer.toString(nn.deadDatanodeCount) + " ("
+                + Integer.toString(nn.deadDecomCount) + ")");
+        doc.endTag(); // node
+      }
+      doc.endTag(); // namenodes
+    
+      createNamenodeExceptionMsg(doc, nnExceptions);
+      doc.endTag(); // cluster
+      doc.getWriter().flush();
+    }
+  }
+  
+  /**
+   * This class stores namenode statistics to be used to generate cluster
+   * web console report.
+   */
+  static class NamenodeStatus {
+    String host = "";
+    long capacity = 0L;
+    long free = 0L;
+    long dfsUsed = 0L;
+    long nonDfsUsed = 0L;
+    long filesAndDirectories = 0L;
+    long blocksCount = 0L;
+    long missingBlocksCount = 0L;
+    int liveDatanodeCount = 0;
+    int liveDecomCount = 0;
+    int deadDatanodeCount = 0;
+    int deadDecomCount = 0;
+    String httpAddress = null;
+  }
+
+  /**
+   * cluster-wide decommission state of a datanode
+   */
+  public enum DecommissionStates {
+    /*
+     * If datanode state is decommissioning at one or more namenodes and 
+     * decommissioned at the rest of the namenodes.
+     */
+    DECOMMISSION_INPROGRESS("Decommission In Progress"),
+    
+    /* If datanode state at all the namenodes is decommissioned */
+    DECOMMISSIONED("Decommissioned"),
+    
+    /*
+     * If datanode state is not decommissioning at one or more namenodes and 
+     * decommissioned/decommissioning at the rest of the namenodes.
+     */
+    PARTIALLY_DECOMMISSIONED("Partially Decommissioning"),
+    
+    /*
+     * If datanode state is not known at a namenode, due to problems in getting
+     * the datanode state from the namenode.
+     */
+    UNKNOWN("Unknown");
+
+    final String value;
+    
+    DecommissionStates(final String v) {
+      this.value = v;
+    }
+
+    public String toString() {
+      return value;
+    }
+  }
+
+  /**
+   * This class consolidates the decommissioning datanodes information in the
+   * cluster and generates decommissioning reports in XML.
+   */
+  static class DecommissionStatus {
+    /** Error when set indicates failure to get decomission status*/
+    final Exception error;
+    
+    /** Map of dn host <-> (Map of NN host <-> decommissioning state) */
+    final Map<String, Map<String, String>> statusMap;
+    final String clusterid;
+    final int httpPort;
+    int decommissioned = 0;   // total number of decommissioned nodes
+    int decommissioning = 0;  // total number of decommissioning datanodes
+    int partial = 0;          // total number of partially decommissioned nodes
+    
+    /** Map of namenode and exception encountered when getting decom status */
+    Map<String, Exception> exceptions = new HashMap<String, Exception>();
+
+    private DecommissionStatus(Map<String, Map<String, String>> statusMap,
+        String cid, int httpPort, Map<String, Exception> exceptions) {
+      this(statusMap, cid, httpPort, exceptions, null);
+    }
+
+    public DecommissionStatus(String cid, Exception e) {
+      this(null, cid, -1, null, e);
+    }
+    
+    private DecommissionStatus(Map<String, Map<String, String>> statusMap,
+        String cid, int httpPort, Map<String, Exception> exceptions,
+        Exception error) {
+      this.statusMap = statusMap;
+      this.clusterid = cid;
+      this.httpPort = httpPort;
+      this.exceptions = exceptions;
+      this.error = error;
+    }
+
+    /**
+     * Generate decommissioning datanode report in XML format
+     * 
+     * @param doc
+     *          , xmloutputter
+     * @throws IOException
+     */
+    public void toXML(XMLOutputter doc) throws IOException {
+      if (error != null) {
+        createGeneralException(doc, clusterid,
+            StringUtils.stringifyException(error));
+        doc.getWriter().flush();
+        return;
+      } 
+      if (statusMap == null || statusMap.isEmpty()) {
+        // none of the namenodes has reported, print exceptions from each nn.
+        doc.startTag("cluster");
+        createNamenodeExceptionMsg(doc, exceptions);
+        doc.endTag();
+        doc.getWriter().flush();
+        return;
+      }
+      doc.startTag("cluster");
+      doc.attribute("clusterId", clusterid);
+
+      doc.startTag("decommissioningReport");
+      countDecommissionDatanodes();
+      toXmlItemBlock(doc, DecommissionStates.DECOMMISSIONED.toString(),
+          Integer.toString(decommissioned));
+
+      toXmlItemBlock(doc,
+          DecommissionStates.DECOMMISSION_INPROGRESS.toString(),
+          Integer.toString(decommissioning));
+
+      toXmlItemBlock(doc,
+          DecommissionStates.PARTIALLY_DECOMMISSIONED.toString(),
+          Integer.toString(partial));
+
+      doc.endTag(); // decommissioningReport
+
+      doc.startTag("datanodes");
+      Set<String> dnSet = statusMap.keySet();
+      for (String dnhost : dnSet) {
+        Map<String, String> nnStatus = statusMap.get(dnhost);
+        if (nnStatus == null || nnStatus.isEmpty()) {
+          continue;
+        }
+        String overallStatus = nnStatus.get(OVERALL_STATUS);
+        // check if datanode is in decommission states
+        if (overallStatus != null
+            && (overallStatus.equals(AdminStates.DECOMMISSION_INPROGRESS
+                .toString())
+                || overallStatus.equals(AdminStates.DECOMMISSIONED.toString())
+                || overallStatus
+                    .equals(DecommissionStates.PARTIALLY_DECOMMISSIONED
+                        .toString()) || overallStatus
+                .equals(DecommissionStates.UNKNOWN.toString()))) {
+          doc.startTag("node");
+          // dn
+          toXmlItemBlockWithLink(doc, dnhost, (dnhost+":"+httpPort),"DataNode");
+
+          // overall status first
+          toXmlItemBlock(doc, OVERALL_STATUS, overallStatus);
+
+          for (Map.Entry<String, String> m : nnStatus.entrySet()) {
+            String nn = m.getKey();
+            if (nn.equals(OVERALL_STATUS)) {
+              continue;
+            }
+            // xml
+            toXmlItemBlock(doc, nn, nnStatus.get(nn));
+          }
+          doc.endTag(); // node
+        }
+      }
+      doc.endTag(); // datanodes
+
+      createNamenodeExceptionMsg(doc, exceptions);
+
+      doc.endTag();// cluster
+    } // toXML
+
+    /**
+     * Count the total number of decommissioned/decommission_inprogress/
+     * partially decommissioned datanodes.
+     */
+    private void countDecommissionDatanodes() {
+      for (String dn : statusMap.keySet()) {
+        Map<String, String> nnStatus = statusMap.get(dn);
+        String status = nnStatus.get(OVERALL_STATUS);
+        if (status.equals(DecommissionStates.DECOMMISSIONED.toString())) {
+          decommissioned++;
+        } else if (status.equals(DecommissionStates.DECOMMISSION_INPROGRESS
+            .toString())) {
+          decommissioning++;
+        } else if (status.equals(DecommissionStates.PARTIALLY_DECOMMISSIONED
+            .toString())) {
+          partial++;
+        }
+      }
+    }
+  }
+
+  /**
+   * Generate a XML block as such, <item label=key value=value/>
+   */
+  private static void toXmlItemBlock(XMLOutputter doc, String key, String value)
+      throws IOException {
+    doc.startTag("item");
+    doc.attribute("label", key);
+    doc.attribute("value", value);
+    doc.endTag();
+  }
+
+  /**
+   * Generate a XML block as such, <item label="Node" value="hostname"
+   * link="http://hostname:50070" />
+   */
+  private static void toXmlItemBlockWithLink(XMLOutputter doc, String host,
+      String url, String nodetag) throws IOException {
+    doc.startTag("item");
+    doc.attribute("label", nodetag);
+    doc.attribute("value", host);
+    doc.attribute("link", "http://" + url);
+    doc.endTag(); // item
+  }
+
+  /**
+   * create the XML for exceptions that we encountered when connecting to
+   * namenode.
+   */
+  private static void createNamenodeExceptionMsg(XMLOutputter doc,
+      Map<String, Exception> exceptionMsg) throws IOException {
+    if (exceptionMsg.size() > 0) {
+      doc.startTag("unreportedNamenodes");
+      for (Map.Entry<String, Exception> m : exceptionMsg.entrySet()) {
+        doc.startTag("node");
+        doc.attribute("name", m.getKey());
+        doc.attribute("exception",
+            StringUtils.stringifyException(m.getValue()));
+        doc.endTag();// node
+      }
+      doc.endTag(); // unreportedNamnodes
+    }
+  }
+
+  /**
+   * create XML block from general exception.
+   */
+  private static void createGeneralException(XMLOutputter doc,
+      String clusterid, String eMsg) throws IOException {
+    doc.startTag("cluster");
+    doc.attribute("clusterId", clusterid);
+    doc.startTag("message");
+    doc.startTag("item");
+    doc.attribute("msg", eMsg);
+    doc.endTag(); // item
+    doc.endTag(); // message
+    doc.endTag(); // cluster
+  }
+} 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1101324&r1=1101323&r2=1101324&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue May 10 06:10:03 2011
@@ -5504,6 +5504,11 @@ public class FSNamesystem implements FSC
   }
 
   @Override // NameNodeMXBean
+  public long getNumberOfMissingBlocks() {
+    return getMissingBlocksCount();
+  }
+  
+  @Override // NameNodeMXBean
   public int getThreads() {
     return ManagementFactory.getThreadMXBean().getThreadCount();
   }
@@ -5514,11 +5519,15 @@ public class FSNamesystem implements FSC
    */
   @Override // NameNodeMXBean
   public String getLiveNodes() {
-    final Map<String, Object> info = new HashMap<String, Object>();
-    final ArrayList<DatanodeDescriptor> aliveNodeList =
-      this.getDatanodeListForReport(DatanodeReportType.LIVE); 
-    removeDecomNodeFromList(aliveNodeList);
-    for (DatanodeDescriptor node : aliveNodeList) {
+    final Map<String, Map<String,Object>> info = 
+      new HashMap<String, Map<String,Object>>();
+    final ArrayList<DatanodeDescriptor> liveNodeList = 
+      new ArrayList<DatanodeDescriptor>();
+    final ArrayList<DatanodeDescriptor> deadNodeList =
+      new ArrayList<DatanodeDescriptor>();
+    DFSNodesStatus(liveNodeList, deadNodeList);
+    removeDecomNodeFromList(liveNodeList);
+    for (DatanodeDescriptor node : liveNodeList) {
       final Map<String, Object> innerinfo = new HashMap<String, Object>();
       innerinfo.put("lastContact", getLastContact(node));
       innerinfo.put("usedSpace", getDfsUsed(node));
@@ -5534,9 +5543,14 @@ public class FSNamesystem implements FSC
    */
   @Override // NameNodeMXBean
   public String getDeadNodes() {
-    final Map<String, Object> info = new HashMap<String, Object>();
+    final Map<String, Map<String, Object>> info = 
+      new HashMap<String, Map<String, Object>>();
+    final ArrayList<DatanodeDescriptor> liveNodeList =
+    new ArrayList<DatanodeDescriptor>();
     final ArrayList<DatanodeDescriptor> deadNodeList =
-      this.getDatanodeListForReport(DatanodeReportType.DEAD); 
+    new ArrayList<DatanodeDescriptor>();
+    // we need to call DFSNodeStatus to filter out the dead data nodes
+    DFSNodesStatus(liveNodeList, deadNodeList);
     removeDecomNodeFromList(deadNodeList);
     for (DatanodeDescriptor node : deadNodeList) {
       final Map<String, Object> innerinfo = new HashMap<String, Object>();
@@ -5553,7 +5567,8 @@ public class FSNamesystem implements FSC
    */
   @Override // NameNodeMXBean
   public String getDecomNodes() {
-    final Map<String, Object> info = new HashMap<String, Object>();
+    final Map<String, Map<String, Object>> info = 
+      new HashMap<String, Map<String, Object>>();
     final ArrayList<DatanodeDescriptor> decomNodeList = 
       this.getDecommissioningNodes();
     for (DatanodeDescriptor node : decomNodeList) {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java?rev=1101324&r1=1101323&r2=1101324&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java Tue May 10 06:10:03 2011
@@ -119,6 +119,13 @@ public interface NameNodeMXBean {
   public long getTotalFiles();
   
   /**
+   * Gets the total number of missing blocks on the cluster
+   * 
+   * @return the total number of files and blocks on the cluster
+   */
+  public long getNumberOfMissingBlocks();
+  
+  /**
    * Gets the number of threads.
    * 
    * @return the number of threads

Added: hadoop/hdfs/trunk/src/webapps/hdfs/decommission.jsp
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/webapps/hdfs/decommission.jsp?rev=1101324&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/webapps/hdfs/decommission.jsp (added)
+++ hadoop/hdfs/trunk/src/webapps/hdfs/decommission.jsp Tue May 10 06:10:03 2011
@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="decommission.xsl"?>
+<%!
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file 
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+%>
+<%@ page 
+  contentType="application/xml"
+
+  import="org.apache.hadoop.util.ServletUtil"
+  import="org.apache.hadoop.hdfs.server.namenode.ClusterJspHelper.DecommissionStatus"
+  import="java.util.List"
+  import="org.znerd.xmlenc.*"
+%>
+<%!
+  //for java.io.Serializable
+  private static final long serialVersionUID = 1L;
+%>
+<%
+  /**
+   * This JSP page provides decomission nodes information cross cluster. 
+   * It lists the date nodes with their decommission states/progress 
+   * reported by each name node. 
+   * It eleminates the data nodes who are not in decommission states.
+   */
+  final ClusterJspHelper clusterhealthjsp  = new ClusterJspHelper();
+   DecommissionStatus dInfo = clusterhealthjsp.generateDecommissioningReport();
+   XMLOutputter doc = new XMLOutputter(out, "UTF-8");
+   dInfo.toXML(doc);
+%>

Added: hadoop/hdfs/trunk/src/webapps/hdfs/decommission.xsl
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/webapps/hdfs/decommission.xsl?rev=1101324&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/webapps/hdfs/decommission.xsl (added)
+++ hadoop/hdfs/trunk/src/webapps/hdfs/decommission.xsl Tue May 10 06:10:03 2011
@@ -0,0 +1,139 @@
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<xsl:stylesheet version="1.0"
+  xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+
+  <xsl:include href="dfsclusterhealth_utils.xsl" />
+
+  <xsl:output method="html" encoding="UTF-8" />
+
+  <xsl:template match="/">
+    <html>
+      <head>
+        <link rel="stylesheet" type="text/css" href="static/hadoop.css" />
+        <title>
+          Hadoop cluster
+          <xsl:value-of select="cluster/@clusterId" />
+        </title>
+      </head>
+      <body>
+
+        <h1>
+          Cluster '
+          <xsl:value-of select="cluster/@clusterId" />
+          '
+        </h1>
+
+        <h2>Decommissioning Status</h2>
+
+        <xsl:if test="count(cluster/decommissioningReport/item)">
+          <div id="dfstable">
+            <table>
+              <tbody>
+                <xsl:for-each select="cluster/decommissioningReport/item">
+                  <tr class="rowNormal">
+                    <td id="col1">
+                      <xsl:value-of select="@label" />
+                    </td>
+                    <td id="col2">:</td>
+                    <td id="col3">
+                      <xsl:value-of select="@value" />
+                    </td>
+                  </tr>
+                </xsl:for-each>
+              </tbody>
+            </table>
+          </div>
+
+          <br />
+        </xsl:if>
+
+        <xsl:if test="count(cluster/datanodes/node)">
+
+          <div id="dfstable">
+            <table border="1" cellpadding="10" cellspacing="0">
+              <thead>
+                <xsl:for-each select="cluster/datanodes/node[1]/item">
+                  <th>
+                    <xsl:value-of select="@label" />
+                  </th>
+                </xsl:for-each>
+              </thead>
+              <tbody>
+                <xsl:for-each select="cluster/datanodes/node">
+                  <tr>
+                    <xsl:for-each select="item">
+                      <td>
+
+                        <xsl:call-template name="displayValue">
+                          <xsl:with-param name="value">
+                            <xsl:value-of select="@value" />
+                          </xsl:with-param>
+                          <xsl:with-param name="unit">
+                            <xsl:value-of select="@unit" />
+                          </xsl:with-param>
+                          <xsl:with-param name="link">
+                            <xsl:value-of select="@link" />
+
+                          </xsl:with-param>
+                        </xsl:call-template>
+                      </td>
+                    </xsl:for-each>
+                  </tr>
+                </xsl:for-each>
+              </tbody>
+            </table>
+          </div>
+
+        </xsl:if>
+
+        <xsl:if test="count(cluster/unreportedNamenodes/node)">
+          <h2>Unreported Namenodes</h2>
+          <div id="dfstable">
+            <table border="1" cellpadding="10" cellspacing="0">
+              <tbody>
+                <xsl:for-each select="cluster/unreportedNamenodes/node">
+                  <tr class="rowNormal">
+                    <td id="col1">
+                      <xsl:value-of select="@name" />
+                    </td>
+                    <td id="col2">
+                      <xsl:value-of select="@exception" />
+                    </td>
+                  </tr>
+                </xsl:for-each>
+              </tbody>
+            </table>
+          </div>
+        </xsl:if>
+
+        <xsl:if test="count(cluster/message/item)">
+          <h4>Exception</h4>
+          <xsl:for-each select="cluster/message/item">
+            <xsl:value-of select="@msg" />
+          </xsl:for-each>
+        </xsl:if>
+
+      </body>
+    </html>
+  </xsl:template>
+</xsl:stylesheet> 
+

Added: hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.jsp
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.jsp?rev=1101324&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.jsp (added)
+++ hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.jsp Tue May 10 06:10:03 2011
@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="dfsclusterhealth.xsl"?>
+<%!
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file 
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+%>
+<%@ page 
+  contentType="application/xml"
+
+  import="org.apache.hadoop.util.ServletUtil"
+  import="org.apache.hadoop.hdfs.server.namenode.ClusterJspHelper.ClusterStatus"
+  import="java.util.List"
+  import="org.znerd.xmlenc.*"
+%>
+<%!
+  //for java.io.Serializable
+  private static final long serialVersionUID = 1L;
+%>
+<%
+   /** 
+    * This JSP page provides cluster summary in XML format. It lists information
+    * such as total files and blocks, total capacity, total used/freed spaces,etc. 
+    * accorss cluster reported by all name nodes.  
+    * It also lists information such as used space per name node. 
+    */
+   final ClusterJspHelper clusterhealthjsp  = new ClusterJspHelper();
+   ClusterStatus cInfo = clusterhealthjsp.generateClusterHealthReport();
+   XMLOutputter doc = new XMLOutputter(out, "UTF-8");
+   cInfo.toXML(doc);
+%>

Added: hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.xsl
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.xsl?rev=1101324&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.xsl (added)
+++ hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.xsl Tue May 10 06:10:03 2011
@@ -0,0 +1,169 @@
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<xsl:stylesheet version="1.0"
+  xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+
+  <xsl:include href="dfsclusterhealth_utils.xsl" />
+
+  <xsl:output method="html" encoding="UTF-8" />
+
+  <xsl:template match="/">
+    <html>
+      <head>
+        <link rel="stylesheet" type="text/css" href="static/hadoop.css" />
+        <title>
+          Hadoop cluster
+          <xsl:value-of select="cluster/@clusterId" />
+        </title>
+      </head>
+      <body>
+
+        <h1>
+          Cluster '
+          <xsl:value-of select="cluster/@clusterId" />
+          '
+        </h1>
+
+        <h2>Cluster Summary</h2>
+        <xsl:if test="count(cluster/storage/item)">
+          <div id="dfstable">
+            <table>
+              <tbody>
+                <xsl:for-each select="cluster/storage/item">
+                  <tr class="rowNormal">
+                    <td id="col1">
+                      <xsl:value-of select="@label" />
+                    </td>
+                    <td id="col2">:</td>
+                    <td id="col3">
+
+                      <xsl:call-template name="displayValue">
+                        <xsl:with-param name="value">
+                          <xsl:value-of select="@value" />
+                        </xsl:with-param>
+                        <xsl:with-param name="unit">
+                          <xsl:value-of select="@unit" />
+                        </xsl:with-param>
+                        <xsl:with-param name="link">
+                          <xsl:value-of select="@link" />
+
+                        </xsl:with-param>
+                      </xsl:call-template>
+                    </td>
+                  </tr>
+                </xsl:for-each>
+              </tbody>
+            </table>
+          </div>
+
+          <br />
+          <hr />
+        </xsl:if>
+        <xsl:if test="count(cluster/namenodes/node)">
+          <h2>Namenodes</h2>
+
+          <div id="dfstable">
+            <table>
+              <tbody>
+                <tr class="rowNormal">
+                  <td id="col1">Number of namenodes</td>
+                  <td id="col2">:</td>
+                  <td id="col3">
+                    <xsl:value-of select="count(cluster/namenodes/node)" />
+                  </td>
+                </tr>
+              </tbody>
+            </table>
+          </div>
+
+          <br />
+
+          <div id="dfstable">
+            <table border="1" cellpadding="10" cellspacing="0">
+
+              <thead>
+                <xsl:for-each select="cluster/namenodes/node[1]/item">
+                  <th>
+                    <xsl:value-of select="@label" />
+                  </th>
+                </xsl:for-each>
+              </thead>
+
+              <tbody>
+                <xsl:for-each select="cluster/namenodes/node">
+                  <tr>
+                    <xsl:for-each select="item">
+                      <td>
+
+                        <xsl:call-template name="displayValue">
+                          <xsl:with-param name="value">
+                            <xsl:value-of select="@value" />
+                          </xsl:with-param>
+                          <xsl:with-param name="unit">
+                            <xsl:value-of select="@unit" />
+                          </xsl:with-param>
+                          <xsl:with-param name="link">
+                            <xsl:value-of select="@link" />
+
+                          </xsl:with-param>
+                        </xsl:call-template>
+                      </td>
+                    </xsl:for-each>
+                  </tr>
+                </xsl:for-each>
+              </tbody>
+
+            </table>
+          </div>
+        </xsl:if>
+
+        <xsl:if test="count(cluster/unreportedNamenodes/node)">
+          <h2>Unreported Namenodes</h2>
+          <div id="dfstable">
+            <table border="1" cellpadding="10" cellspacing="0">
+              <tbody>
+                <xsl:for-each select="cluster/unreportedNamenodes/node">
+                  <tr class="rowNormal">
+                    <td id="col1">
+                      <xsl:value-of select="@name" />
+                    </td>
+                    <td id="col2">
+                      <xsl:value-of select="@exception" />
+                    </td>
+                  </tr>
+                </xsl:for-each>
+              </tbody>
+            </table>
+          </div>
+        </xsl:if>
+
+        <xsl:if test="count(cluster/message/item)">
+          <h4>Exception</h4>
+          <xsl:for-each select="cluster/message/item">
+            <xsl:value-of select="@msg" />
+          </xsl:for-each>
+        </xsl:if>
+
+      </body>
+    </html>
+  </xsl:template>
+</xsl:stylesheet> 
+

Added: hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth_utils.xsl
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth_utils.xsl?rev=1101324&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth_utils.xsl (added)
+++ hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth_utils.xsl Tue May 10 06:10:03 2011
@@ -0,0 +1,88 @@
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+
+  <xsl:template name="humanReadableBytes">
+
+    <xsl:param name="number"/>
+
+    <xsl:variable name="kb" select="1024"/>
+    <xsl:variable name="Mb" select="$kb * 1024"/>
+    <xsl:variable name="Gb" select="$Mb * 1024"/>
+    <xsl:variable name="Tb" select="$Gb * 1024"/>
+    <xsl:variable name="Pb" select="$Tb * 1024"/>
+
+     
+    <xsl:choose>
+      <xsl:when test="$number &lt; $kb"><xsl:value-of select="format-number($number, '#,###.##')"/> b</xsl:when>
+      <xsl:when test="$number &lt; $Mb"><xsl:value-of select="format-number($number div $kb, '#,###.00')"/> kb</xsl:when>
+      <xsl:when test="$number &lt; $Gb"><xsl:value-of select="format-number($number div $Mb, '#,###.00')"/> Mb</xsl:when>
+      <xsl:when test="$number &lt; $Tb"><xsl:value-of select="format-number($number div $Gb, '#,###.00')"/> Gb</xsl:when>
+
+      <xsl:when test="$number &lt; $Pb"><xsl:value-of select="format-number($number div $Tb, '#,###.00')"/> Tb</xsl:when>
+      <xsl:when test="$number &lt; ($Pb * 1024)"><xsl:value-of select="format-number($number div $Pb, '#,###.00')"/> Pb</xsl:when>
+      <xsl:otherwise><xsl:value-of select="format-number($number, '#,###.00')"/> b</xsl:otherwise>
+    </xsl:choose>
+
+  </xsl:template>
+
+  <xsl:template name="percentage">
+    <xsl:param name="number"/>
+    <xsl:value-of select="format-number($number, '0.000%')"/>
+  </xsl:template>
+
+  <!--
+    Displays value:
+      - if it has parameter unit="b" then call humanReadableBytes
+      - if it has parameter link then call displayLink
+  -->
+  <xsl:template name="displayValue">
+    <xsl:param name="value"/>
+    <xsl:param name="unit"/>
+
+    <xsl:param name="link"/>
+    <xsl:choose>
+      <xsl:when test="$unit = 'b'">
+        <xsl:call-template name="humanReadableBytes">
+          <xsl:with-param name="number">
+            <xsl:value-of select="@value"/>
+          </xsl:with-param>
+        </xsl:call-template>
+      </xsl:when>
+
+      <xsl:when test="$unit = '%'">
+        <xsl:call-template name="percentage">
+          <xsl:with-param name="number">
+            <xsl:value-of select="@value"/>
+          </xsl:with-param>
+        </xsl:call-template>
+      </xsl:when>
+      <xsl:when test="string-length($link) &gt; 0">
+        <a href="{$link}"><xsl:value-of select="$value"/></a>
+
+      </xsl:when>
+      <xsl:otherwise><xsl:value-of select="$value"/></xsl:otherwise>
+    </xsl:choose>
+
+  </xsl:template>
+
+</xsl:stylesheet> 
+



Mime
View raw message