hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r1077592 [1/3] - in /hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server: datanode/ namenode/
Date Fri, 04 Mar 2011 04:32:58 GMT
Author: omalley
Date: Fri Mar  4 04:32:57 2011
New Revision: 1077592

URL: http://svn.apache.org/viewvc?rev=1077592&view=rev
Log:
commit b32cfbbc81665ca81b28e03570d5a321994b0bce
Author: Suresh Srinivas <sureshms@yahoo-inc.com>
Date:   Fri Jul 23 11:55:54 2010 -0700

    HDFS-1318 from https://issues.apache.org/jira/secure/attachment/12450341/HDFS-1318.y20.patch
    
    +++ b/YAHOO-CHANGES.txt
    +
    +    HDFS-1318. HDFS Namenode and Datanode WebUI information needs to be
    +    accessible programmatically for scripts. (Tanping Wang via suresh)
    +

Added:
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java.orig
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
Modified:
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1077592&r1=1077591&r2=1077592&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Fri Mar  4 04:32:57 2011
@@ -71,6 +71,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -105,8 +106,18 @@ import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+import org.mortbay.util.ajax.JSON;
+
+import java.lang.management.ManagementFactory;  
+
+import javax.management.InstanceAlreadyExistsException;
+import javax.management.MBeanRegistrationException;
+import javax.management.MBeanServer; 
+import javax.management.ObjectInstance;
+import javax.management.ObjectName;
 
 /**********************************************************
  * DataNode is a class (and program) that stores a set of
@@ -140,7 +151,8 @@ import org.apache.hadoop.util.DiskChecke
  *
  **********************************************************/
 public class DataNode extends Configured 
-    implements InterDatanodeProtocol, ClientDatanodeProtocol, FSConstants, Runnable {
+    implements InterDatanodeProtocol, ClientDatanodeProtocol, FSConstants, 
+    Runnable, DataNodeMXBean {
   public static final Log LOG = LogFactory.getLog(DataNode.class);
   
   static{
@@ -272,6 +284,9 @@ public class DataNode extends Configured
    * 
    * @param dataDirs - only for a non-simulated storage data node
    * @throws IOException
+   * @throws MalformedObjectNameException 
+   * @throws MBeanRegistrationException 
+   * @throws InstanceAlreadyExistsException 
    */
   void startDataNode(Configuration conf, 
                      AbstractList<File> dataDirs, SecureResources resources
@@ -344,8 +359,10 @@ public class DataNode extends Configured
       // initialize data node internal structure
       this.data = new FSDataset(storage, conf);
     }
-
       
+    // register datanode MBean
+    registerMBean();
+    
     // find free port or use privileged port provide
     ServerSocket ss;
     if(secureResources == null) {
@@ -446,6 +463,17 @@ public class DataNode extends Configured
     LOG.info("dnRegistration = " + dnRegistration);
   }
 
+  private void registerMBean() {
+    // register MXBean
+    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
+    try {
+      ObjectName mxbeanName = new ObjectName("hadoop:type=DataNodeInfo");
+      mbs.registerMBean(this, mxbeanName);
+    } catch (javax.management.JMException e1) {
+      LOG.warn("Failed to register DataNode MBean");
+    }
+  }
+  
   /**
    * Determine the http server's effective addr
    */
@@ -1797,4 +1825,64 @@ public class DataNode extends Configured
                                 "dfs.datanode.address");
     return NetUtils.createSocketAddr(address);
   }
+
+  /**
+   * Class for representing the Datanode volume information in MBean interface
+   */
+  class VolumeInfo{
+    private final String directory;
+    private final long usedSpace;
+    private final long freeSpace;
+    private final long reservedSpace;
+    
+    VolumeInfo(String dir, long usedSpace, long freeSpace, long reservedSpace) {
+      this.directory = dir;
+      this.usedSpace = usedSpace;
+      this.freeSpace = freeSpace;
+      this.reservedSpace = reservedSpace;
+    }
+  }
+
+  @Override
+  public String getVersion() {
+    return VersionInfo.getVersion();
+  }
+  
+  @Override
+  public String getRpcPort(){
+    InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
+        this.getConf().get("dfs.datanode.ipc.address"));
+    return Integer.toString(ipcAddr.getPort());
+  }
+
+  @Override
+  public String getHttpPort(){
+    return this.getConf().get("dfs.datanode.info.port");
+  }
+
+  @Override
+  public String getNamenodeAddress(){
+    return nameNodeAddr.getHostName();
+  }
+
+  @Override
+  public synchronized String getVolumeInfo() {
+    List<VolumeInfo> list = new ArrayList<VolumeInfo>(3);
+    FSVolume[] volumes = ((FSDataset) this.data).volumes.volumes;
+    for (int idx = 0; idx < volumes.length; idx++) {
+      try {
+        VolumeInfo info = new VolumeInfo(volumes[idx].toString(),
+                                         volumes[idx].getDfsUsed(),
+                                         volumes[idx].getAvailable(),
+                                         volumes[idx].getReserved());
+        list.add(info);
+      } catch (IOException e) {
+        LOG.warn("Exception while accessing volume info ", e);
+      }
+        
+    }
+    VolumeInfo[] result = new VolumeInfo[list.size()];
+    list.toArray(result);
+    return JSON.toString(result);
+  }
 }

Added: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java?rev=1077592&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
(added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
Fri Mar  4 04:32:57 2011
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+/**
+ * 
+ * This is the JMX management interface for data node information
+ */
+public interface DataNodeMXBean {
+  public String getVersion();
+  public String getRpcPort();
+  public String getHttpPort();
+  public String getNamenodeAddress();
+  public String getVolumeInfo();
+}

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1077592&r1=1077591&r2=1077592&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
Fri Mar  4 04:32:57 2011
@@ -375,6 +375,10 @@ public class FSDataset implements FSCons
       return (remaining > 0) ? remaining : 0;
     }
       
+    long getReserved(){
+      return reserved;
+    }
+    
     String getMount() throws IOException {
       return usage.getMount();
     }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1077592&r1=1077591&r2=1077592&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Fri Mar  4 04:32:57 2011
@@ -63,6 +63,7 @@ import org.apache.hadoop.fs.permission.*
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.Server;
+import org.mortbay.util.ajax.JSON;
 
 import java.io.BufferedWriter;
 import java.io.ByteArrayInputStream;
@@ -73,13 +74,16 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.DataOutputStream;
+import java.lang.management.ManagementFactory;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.util.*;
 import java.util.concurrent.TimeUnit;
 import java.util.Map.Entry;
 
+import javax.management.MBeanServer;
 import javax.management.NotCompliantMBeanException;
+import javax.management.ObjectInstance;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
@@ -95,7 +99,8 @@ import javax.management.StandardMBean;
  * 4)  machine --> blocklist (inverted #2)
  * 5)  LRU cache of updated-heartbeat machines
  ***************************************************/
-public class FSNamesystem implements FSConstants, FSNamesystemMBean {
+public class FSNamesystem implements FSConstants, FSNamesystemMBean,
+    NameNodeMXBean {
   public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
   public static final String AUDIT_FORMAT =
     "ugi=%s\t" +  // ugi
@@ -377,6 +382,15 @@ public class FSNamesystem implements FSC
     if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
       dnsToSwitchMapping.resolve(new ArrayList<String>(hostsReader.getHosts()));
     }
+    
+    // regist MXBean
+    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
+    try{
+      ObjectName mxbeanName = new ObjectName("hadoop:type=NameNodeInfo");
+      ObjectInstance oi = mbs.registerMBean(this, mxbeanName);
+    }catch(Exception e){
+      LOG.warn("caught exception while registrate MXBean for NameNodeInfo : " + e.getMessage());
+    }
   }
 
   public static Collection<File> getNamespaceDirs(Configuration conf) {
@@ -5134,4 +5148,166 @@ public class FSNamesystem implements FSC
     }
     return authMethod;
   }
+
+  //implement NameNodeMXBean
+  /**
+   * Class representing Namenode information for JMX interfaces
+   */
+  class NodeInfo{
+    private Map<String, Map<String,String>> ni;
+
+    public void setNodeInfo(Map<String, Map<String,String>>  input){
+      this.ni = input;
+    }
+    Map<String, Map<String, String>> getNodeInfo(){
+      return ni;
+    }
+
+    @Override
+    public String toString(){
+      return new JSON().toJSON(ni);
+    }
+  }
+  @Override
+  public String getVersion() {
+    return VersionInfo.getVersion();
+  }
+
+  @Override
+  public String getUsed(){
+    return Long.toString(this.getCapacityUsed());
+  }
+
+  @Override
+  public String getFree(){
+    return Long.toString(this.getCapacityRemaining());
+  }
+
+  @Override
+  public String getTotal(){
+    return Long.toString(this.getCapacityTotal());
+  }
+
+  @Override
+  public String getSafemode() {
+    if (!this.isInSafeMode())
+      return "";
+    return "Safe mode is ON." + this.getSafeModeTip();
+  }
+
+  @Override
+  public boolean isUpgradeFinalized(){
+    return this.getFSImage().isUpgradeFinalized();
+  }
+  
+  @Override
+  public String getNondfs(){
+    return Long.toString(getCapacityUsedNonDFS());
+  }
+  
+  @Override
+  public String getPercentused(){
+    return Float.toString(getCapacityUsedPercent());
+  }
+  
+  @Override
+  public String getPercentRemaining(){
+    return Float.toString(getCapacityRemainingPercent());
+  }
+  
+  @Override
+  public String getTotalblocks(){
+    return Long.toString(getBlocksTotal());
+  }
+  
+  @Override
+  public String getTotalfiles(){
+    return Long.toString(getFilesTotal());
+  }
+  
+  @Override
+  public String getAliveNodeInfo(){
+    NodeInfo ni = new NodeInfo();
+    ArrayList<DatanodeDescriptor> aliveNodeList = new ArrayList<DatanodeDescriptor>();
+    ArrayList<DatanodeDescriptor> deadNodeList = new ArrayList<DatanodeDescriptor>();
+    this.DFSNodesStatus(aliveNodeList, deadNodeList);
+    Map<String, Map<String,String>>  info = new HashMap<String, Map<String,String>>();
+    for (DatanodeDescriptor node : aliveNodeList ){
+      // key -- hostname
+      String hostname = node.getHostName();
+      // value -- Map<String, String> innerinfo
+      Map<String, String> innerinfo = new HashMap<String, String>();
+      // lastcontact
+      String lastContactKey = "lastcontact";
+      String lastContactValue = new Long(getLastContact(node)).toString();
+      innerinfo.put(lastContactKey, lastContactValue);
+      // usedspace
+      String usedspaceKey = "usedspace";
+      String usedspaceValue = getDfsUsed(node);
+      innerinfo.put(usedspaceKey, usedspaceValue);
+      info.put(hostname, innerinfo);
+    }
+    ni.setNodeInfo(info);
+    return ni.toString();
+  }
+  
+  @Override
+  public String getDeadNodeInfo(){
+    NodeInfo ni = new NodeInfo();
+    ArrayList<DatanodeDescriptor> aliveNodeList = new ArrayList<DatanodeDescriptor>();
+    ArrayList<DatanodeDescriptor> deadNodeList = new ArrayList<DatanodeDescriptor>();
+    this.DFSNodesStatus(aliveNodeList, deadNodeList);
+    Map<String, Map<String,String>>  info = new HashMap<String, Map<String,String>>();
+    for (DatanodeDescriptor node : deadNodeList ){
+      // key -- hostname
+      String hostname = node.getHostName();
+      // value -- Map<String, String> innerinfo
+      Map<String, String> innerinfo = new HashMap<String, String>();
+      // lastcontact
+      String lastContactKey = "lastcontact";
+      String lastContactValue = new Long(getLastContact(node)).toString();
+      innerinfo.put(lastContactKey, lastContactValue);
+      info.put(hostname, innerinfo);
+    }
+    ni.setNodeInfo(info);
+    return ni.toString();
+  }
+
+  @Override
+  public String getDecomNodeInfo(){
+    NodeInfo ni = new NodeInfo();
+    ArrayList<DatanodeDescriptor> decomNodeList = new ArrayList<DatanodeDescriptor>();
+    decomNodeList = this.getDecommissioningNodes();
+    Map<String, Map<String,String>>  info = new HashMap<String, Map<String,String>>();
+    for (DatanodeDescriptor node : decomNodeList ){
+      // key -- hostname
+      String hostname = node.getHostName();
+      // value -- Map<String, String> innerinfo
+      Map<String, String> innerinfo = new HashMap<String, String>();
+      // UnderReplicatedBlocks 
+      String underReplicatedBlocksKey = "underReplicatedBlocksValue";
+      String underReplicatedBlocksValue = new Integer(node.decommissioningStatus.getUnderReplicatedBlocks()).toString()
;
+      innerinfo.put(underReplicatedBlocksKey, underReplicatedBlocksValue);
+      // decommissionOnlyReplicas
+      String decommissionOnlyReplicasKey = "decommissionOnlyReplicas";
+      String decommissionOnlyReplicasValue = new Integer(node.decommissioningStatus.getDecommissionOnlyReplicas()
).toString() ;
+      innerinfo.put(decommissionOnlyReplicasKey, decommissionOnlyReplicasValue);
+      // decommissionOnlyReplicas
+      String underReplicatedInOpenFilesKey = "underReplicatedInOpenFiles";
+      String underReplicatedInOpenFilesValue = new Integer(node.decommissioningStatus.getUnderReplicatedInOpenFiles()
 ).toString() ;
+      innerinfo.put(underReplicatedInOpenFilesKey, underReplicatedInOpenFilesValue);
+      info.put(hostname, innerinfo);
+    }
+    ni.setNodeInfo(info);
+    return ni.toString();
+  }
+  
+  private String getLastContact (DatanodeDescriptor alivenode) {
+    return Long.toString((System.currentTimeMillis() - alivenode.getLastUpdate())/1000);
   
+  }
+
+  private String getDfsUsed(DatanodeDescriptor alivenode){
+    return Long.toString(alivenode.getDfsUsed());
+  }
+
 }



Mime
View raw message