hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r986522 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/
Date Tue, 17 Aug 2010 23:15:35 GMT
Author: suresh
Date: Tue Aug 17 23:15:35 2010
New Revision: 986522

URL: http://svn.apache.org/viewvc?rev=986522&view=rev
Log:
HDFS-1318. Add JMX interface for read access to namenode and datanode web UI information.
Contributed by Tanping Wang.

Added:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=986522&r1=986521&r2=986522&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Tue Aug 17 23:15:35 2010
@@ -113,6 +113,9 @@ Trunk (unreleased changes)
 
     HDFS-1036. docs for fetchdt
 
+    HDFS-1318. Add JMX interface for read access to namenode and datanode
+    web UI information. (Tanping Wang via suresh).
+
   OPTIMIZATIONS
 
     HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=986522&r1=986521&r2=986522&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Tue Aug
17 23:15:35 2010
@@ -41,8 +41,10 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Map;
 import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -80,6 +82,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -117,8 +120,15 @@ import org.apache.hadoop.util.GenericOpt
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+import org.mortbay.util.ajax.JSON;
+
+import java.lang.management.ManagementFactory;  
+
+import javax.management.MBeanServer; 
+import javax.management.ObjectName;
 
 /**********************************************************
  * DataNode is a class (and program) that stores a set of
@@ -153,7 +163,8 @@ import org.apache.hadoop.util.DiskChecke
  **********************************************************/
 @InterfaceAudience.Private
 public class DataNode extends Configured 
-    implements InterDatanodeProtocol, ClientDatanodeProtocol, FSConstants, Runnable {
+    implements InterDatanodeProtocol, ClientDatanodeProtocol, FSConstants,
+    Runnable, DataNodeMXBean {
   public static final Log LOG = LogFactory.getLog(DataNode.class);
   
   static{
@@ -354,6 +365,8 @@ public class DataNode extends Configured
       this.data = new FSDataset(storage, conf);
     }
 
+    // register datanode MXBean
+    registerMXBean();
       
     // find free port or use privileged port provide
     ServerSocket ss;
@@ -476,6 +489,17 @@ public class DataNode extends Configured
         conf.get("dfs.datanode.http.address", "0.0.0.0:50075"));
   }
   
+  private void registerMXBean() {
+    // register MXBean
+    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
+    try {
+      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=DataNodeInfo");
+      mbs.registerMBean(this, mxbeanName);
+    } catch ( javax.management.JMException e ) {
+      LOG.warn("Failed to register NameNode MXBean", e);
+    }
+  }
+
   /**
    * Creates either NIO or regular depending on socketWriteTimeout.
    */
@@ -1893,4 +1917,44 @@ public class DataNode extends Configured
     return NetUtils.createSocketAddr(
         conf.get("dfs.datanode.address", "0.0.0.0:50010"));
   }
+  @Override // DataNodeMXBean
+  public String getVersion() {
+    return VersionInfo.getVersion();
+  }
+  
+  @Override // DataNodeMXBean
+  public String getRpcPort(){
+    InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
+        this.getConf().get("dfs.datanode.ipc.address"));
+    return Integer.toString(ipcAddr.getPort());
+  }
+
+  @Override // DataNodeMXBean
+  public String getHttpPort(){
+    return this.getConf().get("dfs.datanode.info.port");
+  }
+
+  @Override // DataNodeMXBean
+  public String getNamenodeAddress(){
+    return nameNodeAddr.getHostName();
+  }
+
+  /**
+   * Returned information is a JSON representation of a map with 
+   * volume name as the key and value is a map of volume attribute 
+   * keys to its values
+   */
+  @Override // DataNodeMXBean
+  public String getVolumeInfo() {
+    final Map<String, Object> info = new HashMap<String, Object>();
+    Collection<VolumeInfo> volumes = ((FSDataset)this.data).getVolumeInfo();
+    for (VolumeInfo v : volumes) {
+      final Map<String, Object> innerInfo = new HashMap<String, Object>();
+      innerInfo.put("usedSpace", v.usedSpace);
+      innerInfo.put("freeSpace", v.freeSpace);
+      innerInfo.put("reservedSpace", v.reservedSpace);
+      info.put(v.directory, innerInfo);
+    }
+    return JSON.toString(info);
+  }
 }

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java?rev=986522&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
(added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
Tue Aug 17 23:15:35 2010
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * 
+ * This is the JMX management interface for data node information
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public interface DataNodeMXBean {
+  
+  /**
+   * Gets the version of Hadoop.
+   * 
+   * @return the version of Hadoop
+   */
+  public String getVersion();
+  
+  /**
+   * Gets the rpc port.
+   * 
+   * @return the rpc port
+   */
+  public String getRpcPort();
+  
+  /**
+   * Gets the http port.
+   * 
+   * @return the http port
+   */
+  public String getHttpPort();
+  
+  /**
+   * Gets the namenode IP address.
+   * 
+   * @return the namenode IP address
+   */
+  public String getNamenodeAddress();
+  
+  /**
+   * Gets the information of each volume on the Datanode. Please
+   * see the implementation for the format of returned information.
+   * 
+   * @return the volume info
+   */
+  public String getVolumeInfo();
+}

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=986522&r1=986521&r2=986522&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Tue Aug
17 23:15:35 2010
@@ -30,6 +30,7 @@ import java.io.RandomAccessFile;
 import java.nio.channels.FileChannel;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Random;
@@ -377,6 +378,10 @@ public class FSDataset implements FSCons
       return (remaining > 0) ? remaining : 0;
     }
       
+    long getReserved(){
+      return reserved;
+    }
+    
     String getMount() throws IOException {
       return usage.getMount();
     }
@@ -2103,4 +2108,45 @@ public class FSDataset implements FSCons
     }
     return replica.getVisibleLength();
   }
+  /**
+   * Class for representing the Datanode volume information
+   */
+  static class VolumeInfo {
+    final String directory;
+    final long usedSpace;
+    final long freeSpace;
+    final long reservedSpace;
+
+    VolumeInfo(String dir, long usedSpace, long freeSpace, long reservedSpace) {
+      this.directory = dir;
+      this.usedSpace = usedSpace;
+      this.freeSpace = freeSpace;
+      this.reservedSpace = reservedSpace;
+    }
+  }  
+  
+  synchronized Collection<VolumeInfo> getVolumeInfo() {
+    Collection<VolumeInfo> info = new ArrayList<VolumeInfo>();
+    synchronized(volumes.volumes) {
+      for (FSVolume volume : volumes.volumes) {
+        long used = 0;
+        try {
+          used = volume.getDfsUsed();
+        } catch (IOException e) {
+          DataNode.LOG.warn(e.getMessage());
+        }
+        
+        long free= 0;
+        try {
+          free = volume.getAvailable();
+        } catch (IOException e) {
+          DataNode.LOG.warn(e.getMessage());
+        }
+        
+        info.add(new VolumeInfo(volume.toString(), used, free, 
+            volume.getReserved()));
+      }
+      return info;
+    }
+  }
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=986522&r1=986521&r2=986522&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue
Aug 17 23:15:35 2010
@@ -80,6 +80,7 @@ import org.apache.hadoop.fs.permission.*
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
+import org.mortbay.util.ajax.JSON;
 
 import java.io.BufferedWriter;
 import java.io.ByteArrayInputStream;
@@ -90,6 +91,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.DataOutputStream;
+import java.lang.management.ManagementFactory;
 import java.net.InetAddress;
 import java.net.URI;
 import java.util.*;
@@ -98,6 +100,7 @@ import java.util.Map.Entry;
 import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
+import javax.management.MBeanServer;
 
 /***************************************************
  * FSNamesystem does the actual bookkeeping work for the
@@ -112,7 +115,8 @@ import javax.management.StandardMBean;
  * 5)  LRU cache of updated-heartbeat machines
  ***************************************************/
 @InterfaceAudience.Private
-public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterStats {
+public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterStats,
+    NameNodeMXBean {
   public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
 
   private static final ThreadLocal<StringBuilder> auditBuffer =
@@ -348,6 +352,7 @@ public class FSNamesystem implements FSC
     if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
       dnsToSwitchMapping.resolve(new ArrayList<String>(hostsReader.getHosts()));
     }
+    registerMXBean();
   }
 
   public static Collection<URI> getNamespaceDirs(Configuration conf) {
@@ -4710,4 +4715,147 @@ public class FSNamesystem implements FSC
                     "fsck", src, null, null);
     }
   }
+  /**
+   * Register NameNodeMXBean
+   */
+  private void registerMXBean() {
+    // register MXBean
+    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+    try {
+      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=NameNodeInfo");
+      mbs.registerMBean(this, mxbeanName);
+    } catch ( javax.management.JMException e ) {
+      LOG.warn("Failed to register NameNodeMXBean", e);
+    }
+  }
+
+  /**
+   * Class representing Namenode information for JMX interfaces
+   */
+  @Override // NameNodeMXBean
+  public String getVersion() {
+    return VersionInfo.getVersion();
+  }
+
+  @Override // NameNodeMXBean
+  public long getUsed() {
+    return this.getCapacityUsed();
+  }
+
+  @Override // NameNodeMXBean
+  public long getFree() {
+    return this.getCapacityRemaining();
+  }
+
+  @Override // NameNodeMXBean
+  public long getTotal() {
+    return this.getCapacityTotal();
+  }
+
+  @Override // NameNodeMXBean
+  public String getSafemode() {
+    if (!this.isInSafeMode())
+      return "";
+    return "Safe mode is ON." + this.getSafeModeTip();
+  }
+
+  @Override // NameNodeMXBean
+  public boolean isUpgradeFinalized() {
+    return this.getFSImage().isUpgradeFinalized();
+  }
+
+  @Override // NameNodeMXBean
+  public long getNonDfsUsedSpace() {
+    return getCapacityUsedNonDFS();
+  }
+
+  @Override // NameNodeMXBean
+  public float getPercentUsed() {
+    return getCapacityUsedPercent();
+  }
+
+  @Override // NameNodeMXBean
+  public float getPercentRemaining() {
+    return getCapacityRemainingPercent();
+  }
+
+  @Override // NameNodeMXBean
+  public long getTotalBlocks() {
+    return getBlocksTotal();
+  }
+
+  @Override // NameNodeMXBean
+  public long getTotalFiles() {
+    return getFilesTotal();
+  }
+
+  @Override // NameNodeMXBean
+  public int getThreads() {
+    return ManagementFactory.getThreadMXBean().getThreadCount();
+  }
+
+  /**
+   * Returned information is a JSON representation of map with host name as the
+   * key and value is a map of live node attribute keys to its values
+   */
+  @Override // NameNodeMXBean
+  public String getLiveNodes() {
+    final Map<String, Object> info = new HashMap<String, Object>();
+    final ArrayList<DatanodeDescriptor> aliveNodeList =
+      this.getDatanodeListForReport(DatanodeReportType.LIVE); 
+    for (DatanodeDescriptor node : aliveNodeList) {
+      final Map<String, Object> innerinfo = new HashMap<String, Object>();
+      innerinfo.put("lastContact", getLastContact(node));
+      innerinfo.put("usedSpace", getDfsUsed(node));
+      info.put(node.getHostName(), innerinfo);
+    }
+    return JSON.toString(info);
+  }
+
+  /**
+   * Returned information is a JSON representation of map with host name as the
+   * key and value is a map of dead node attribute keys to its values
+   */
+  @Override // NameNodeMXBean
+  public String getDeadNodes() {
+    final Map<String, Object> info = new HashMap<String, Object>();
+    final ArrayList<DatanodeDescriptor> deadNodeList =
+      this.getDatanodeListForReport(DatanodeReportType.DEAD); 
+    for (DatanodeDescriptor node : deadNodeList) {
+      final Map<String, Object> innerinfo = new HashMap<String, Object>();
+      innerinfo.put("lastContact", getLastContact(node));
+      info.put(node.getHostName(), innerinfo);
+    }
+    return JSON.toString(info);
+  }
+
+  /**
+   * Returned information is a JSON representation of map with host name as the
+   * key and value is a map of decomisioning node attribute keys to its values
+   */
+  @Override // NameNodeMXBean
+  public String getDecomNodes() {
+    final Map<String, Object> info = new HashMap<String, Object>();
+    final ArrayList<DatanodeDescriptor> decomNodeList = 
+      this.getDecommissioningNodes();
+    for (DatanodeDescriptor node : decomNodeList) {
+      final Map<String, Object> innerinfo = new HashMap<String, Object>();
+      innerinfo.put("underReplicatedBlocks", node.decommissioningStatus
+          .getUnderReplicatedBlocks());
+      innerinfo.put("decommissionOnlyReplicas", node.decommissioningStatus
+          .getDecommissionOnlyReplicas());
+      innerinfo.put("underReplicateInOpenFiles", node.decommissioningStatus
+          .getUnderReplicatedInOpenFiles());
+      info.put(node.getHostName(), innerinfo);
+    }
+    return JSON.toString(info);
+  }
+
+  private long getLastContact(DatanodeDescriptor alivenode) {
+    return (System.currentTimeMillis() - alivenode.getLastUpdate())/1000;
+  }
+
+  private long getDfsUsed(DatanodeDescriptor alivenode) {
+    return alivenode.getDfsUsed();
+  }
 }

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java?rev=986522&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
(added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
Tue Aug 17 23:15:35 2010
@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * 
+ * This is the JMX management interface for namenode information
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public interface NameNodeMXBean {
+
+  /**
+   * Gets the version of Hadoop.
+   * 
+   * @return the version
+   */
+  public String getVersion();
+  
+  /**
+   * Gets the used space by data nodes.
+   * 
+   * @return the used space by data nodes
+   */
+  public long getUsed();
+  
+  /**
+   * Gets total non-used raw bytes.
+   * 
+   * @return total non-used raw bytes
+   */
+  public long getFree();
+  
+  /**
+   * Gets total raw bytes including non-dfs used space.
+   * 
+   * @return the total raw bytes including non-dfs used space
+   */
+  public long getTotal();
+  
+  /**
+   * Gets the safemode status
+   * 
+   * @return the safemode status
+   * 
+   */
+  public String getSafemode();
+  
+  /**
+   * Checks if upgrade is finalized.
+   * 
+   * @return true, if upgrade is finalized
+   */
+  public boolean isUpgradeFinalized();
+  
+  /**
+   * Gets total used space by data nodes for non DFS purposes such as storing
+   * temporary files on the local file system
+   * 
+   * @return the non dfs space of the cluster
+   */
+  public long getNonDfsUsedSpace();
+  
+  /**
+   * Gets the total used space by data nodes as percentage of total capacity
+   * 
+   * @return the percentage of used space on the cluster.
+   */
+  public float getPercentUsed();
+  
+  /**
+   * Gets the total remaining space by data nodes as percentage of total 
+   * capacity
+   * 
+   * @return the percentage of the remaining space on the cluster
+   */
+  public float getPercentRemaining();
+  
+  /**
+   * Gets the total numbers of blocks on the cluster.
+   * 
+   * @return the total number of blocks of the cluster
+   */
+  public long getTotalBlocks();
+  
+  /**
+   * Gets the total number of files on the cluster
+   * 
+   * @return the total number of files on the cluster
+   */
+  public long getTotalFiles();
+  
+  /**
+   * Gets the number of threads.
+   * 
+   * @return the number of threads
+   */
+  public int getThreads();
+
+  /**
+   * Gets the live node information of the cluster.
+   * 
+   * @return the live node information
+   */
+  public String getLiveNodes();
+  
+  /**
+   * Gets the dead node information of the cluster.
+   * 
+   * @return the dead node information
+   */
+  public String getDeadNodes();
+  
+  /**
+   * Gets the decommissioning node information of the cluster.
+   * 
+   * @return the decommissioning node information
+   */
+  public String getDecomNodes();
+}

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java?rev=986522&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
(added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
Tue Aug 17 23:15:35 2010
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.lang.management.ManagementFactory;
+import java.util.List;
+
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+import junit.framework.Assert;
+
+/**
+ * Class for testing {@link DataNodeMXBean} implementation
+ */
+public class TestDataNodeMXBean {
+  @Test
+  public void testDataNodeMXBean() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+
+    try {
+      List<DataNode> datanodes = cluster.getDataNodes();
+      Assert.assertEquals(datanodes.size(), 1);
+      DataNode datanode = datanodes.get(0);
+
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
+      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=DataNodeInfo");
+      // get attribute "Version"
+      String version = (String)mbs.getAttribute(mxbeanName, "Version");
+      Assert.assertEquals(datanode.getVersion(),version);
+      // get attribute "RpcPort"
+      String rpcPort = (String)mbs.getAttribute(mxbeanName, "RpcPort");
+      Assert.assertEquals(datanode.getRpcPort(),rpcPort);
+      // get attribute "HttpPort"
+      String httpPort = (String)mbs.getAttribute(mxbeanName, "HttpPort");
+      Assert.assertEquals(datanode.getHttpPort(),httpPort);
+      // get attribute "NamenodeAddress"
+      String namenodeAddress = (String)mbs.getAttribute(mxbeanName, 
+          "NamenodeAddress");
+      Assert.assertEquals(datanode.getNamenodeAddress(),namenodeAddress);
+      // get attribute "getVolumeInfo"
+      String volumeInfo = (String)mbs.getAttribute(mxbeanName, "VolumeInfo");
+      Assert.assertEquals(datanode.getVolumeInfo(),volumeInfo);
+    } finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+}

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=986522&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
(added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
Tue Aug 17 23:15:35 2010
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.lang.management.ManagementFactory;
+
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+
+import org.junit.Test;
+import junit.framework.Assert;
+
+/**
+ * Class for testing {@link NameNodeMXBean} implementation
+ */
+public class TestNameNodeMXBean {
+  @Test
+  public void testNameNodeMXBeanInfo() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+
+    try {
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster.waitActive();
+
+      FSNamesystem fsn = cluster.getNameNode().namesystem;
+
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=NameNodeInfo");
+      // get attribute "Version"
+      String version = (String) mbs.getAttribute(mxbeanName, "Version");
+      Assert.assertEquals(fsn.getVersion(), version);
+      // get attribute "Used"
+      Long used = (Long) mbs.getAttribute(mxbeanName, "Used");
+      Assert.assertEquals(fsn.getUsed(), used.longValue());
+      // get attribute "Total"
+      Long total = (Long) mbs.getAttribute(mxbeanName, "Total");
+      Assert.assertEquals(fsn.getTotal(), total.longValue());
+      // get attribute "safemode"
+      String safemode = (String) mbs.getAttribute(mxbeanName, "Safemode");
+      Assert.assertEquals(fsn.getSafemode(), safemode);
+      // get attribute nondfs
+      Long nondfs = (Long) (mbs.getAttribute(mxbeanName, "NonDfsUsedSpace"));
+      Assert.assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
+      // get attribute percentremaining
+      Float percentremaining = (Float) (mbs.getAttribute(mxbeanName,
+          "PercentRemaining"));
+      Assert.assertEquals(fsn.getPercentRemaining(), percentremaining
+          .floatValue());
+      // get attribute Totalblocks
+      Long totalblocks = (Long) (mbs.getAttribute(mxbeanName, "TotalBlocks"));
+      Assert.assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
+      // get attribute alivenodeinfo
+      String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName,
+          "LiveNodes"));
+      Assert.assertEquals(fsn.getLiveNodes(), alivenodeinfo);
+      // get attribute deadnodeinfo
+      String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
+          "DeadNodes"));
+      Assert.assertEquals(fsn.getDeadNodes(), deadnodeinfo);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+}



Mime
View raw message