hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1103834 [1/2] - in /hadoop/hdfs/trunk: ./ conf/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/datanode/metrics/ src/java/org/apache/hadoop/hdfs/server/namenode/ src...
Date Mon, 16 May 2011 18:47:30 GMT
Author: suresh
Date: Mon May 16 18:47:28 2011
New Revision: 1103834

URL: http://svn.apache.org/viewvc?rev=1103834&view=rev
Log:
HDFS-1117. Metrics 2.0 HDFS instrumentation. Contributed by Luke Lu.

Added:
    hadoop/hdfs/trunk/conf/hadoop-metrics2.properties
Removed:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeActivityMBean.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/build.xml
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/JMXGet.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/tools/TestJMXGet.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Mon May 16 18:47:28 2011
@@ -286,7 +286,8 @@ Trunk (unreleased changes)
     HDFS-1628. Display full path in AccessControlException.  (John George
     via szetszwo)
 
-    HDFS-1707. Federation: Failure in browsing data on new namenodes. (jitendra)
+    HDFS-1707. Federation: Failure in browsing data on new namenodes. 
+    (jitendra)
 
     HDFS-1683. Test Balancer with multiple NameNodes.  (szetszwo)
 
@@ -413,6 +414,8 @@ Trunk (unreleased changes)
     HDFS-1899. GenericTestUtils.formatNamenode should be moved to DFSTestUtil
     (Ted Yu via todd)
 
+    HDFS-1117. Metrics 2.0 HDFS instrumentation. (Luke Lu via suresh)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

Modified: hadoop/hdfs/trunk/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/build.xml?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/build.xml (original)
+++ hadoop/hdfs/trunk/build.xml Mon May 16 18:47:28 2011
@@ -92,7 +92,7 @@
   <property name="test.junit.fork.mode" value="perTest" />
   <property name="test.junit.printsummary" value="yes" />
   <property name="test.junit.haltonfailure" value="no" />
-  <property name="test.junit.maxmemory" value="512m" />
+  <property name="test.junit.maxmemory" value="1024m" />
   <property name="test.conf.dir" value="${build.dir}/test/conf" />
 
   <property name="test.hdfs.build.classes" value="${test.build.dir}/hdfs/classes"/>

Added: hadoop/hdfs/trunk/conf/hadoop-metrics2.properties
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/conf/hadoop-metrics2.properties?rev=1103834&view=auto
==============================================================================
--- hadoop/hdfs/trunk/conf/hadoop-metrics2.properties (added)
+++ hadoop/hdfs/trunk/conf/hadoop-metrics2.properties Mon May 16 18:47:28 2011
@@ -0,0 +1,27 @@
+# syntax: [prefix].[source|sink].[instance].[options]
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
+
+*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
+# default sampling period
+*.period=10
+
+# The namenode-metrics.out will contain metrics from all context
+#namenode.sink.file.filename=namenode-metrics.out
+# Specifying a special sampling period for namenode:
+#namenode.sink.*.period=8
+
+#datanode.sink.file.filename=datanode-metrics.out
+
+# the following example split metrics of different
+# context to different sinks (in this case files)
+#jobtracker.sink.file_jvm.context=jvm
+#jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out
+#jobtracker.sink.file_mapred.context=mapred
+#jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out
+
+#tasktracker.sink.file.filename=tasktracker-metrics.out
+
+#maptask.sink.file.filename=maptask-metrics.out
+
+#reducetask.sink.file.filename=reducetask-metrics.out
+

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSUtil.java Mon May 16 18:47:28 2011
@@ -576,4 +576,13 @@ public class DFSUtil {
     return new InetSocketAddress(address.substring(0, colon), 
         Integer.parseInt(address.substring(colon + 1)));
   }
+
+  /**
+   * Round bytes to GiB (gibibyte)
+   * @param bytes number of bytes
+   * @return number of GiB
+   */
+  public static int roundBytesToGB(long bytes) {
+    return Math.round((float)bytes/ 1024 / 1024 / 1024);
+  }
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Mon May 16 18:47:28 2011
@@ -440,13 +440,13 @@ class BlockPoolSliceScanner {
         
         if (second) {
           totalScanErrors++;
-          datanode.getMetrics().blockVerificationFailures.inc(); 
+          datanode.getMetrics().incrBlockVerificationFailures();
           handleScanFailure(block);
           return;
         } 
       } finally {
         IOUtils.closeStream(blockSender);
-        datanode.getMetrics().blocksVerified.inc();
+        datanode.getMetrics().incrBlocksVerified();
         totalScans++;
       }
     }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Mon May 16 18:47:28 2011
@@ -628,7 +628,7 @@ class BlockReceiver implements Closeable
             offsetInBlock, lastChunkChecksum
           );
           
-          datanode.myMetrics.bytesWritten.inc(len);
+          datanode.metrics.incrBytesWritten(len);
         }
       } catch (IOException iex) {
         datanode.checkDiskError(iex);
@@ -696,7 +696,7 @@ class BlockReceiver implements Closeable
           // Finalize the block. Does this fsync()?
           datanode.data.finalizeBlock(block);
         }
-        datanode.myMetrics.blocksWritten.inc();
+        datanode.metrics.incrBlocksWritten();
       }
 
     } catch (IOException ioe) {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Mon May 16 18:47:28 2011
@@ -29,7 +29,6 @@ import java.io.DataOutputStream;
 import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
-import java.lang.management.ManagementFactory;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.Socket;
@@ -54,9 +53,6 @@ import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -118,6 +114,8 @@ import org.apache.hadoop.ipc.ProtocolSig
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
@@ -353,7 +351,7 @@ public class DataNode extends Configured
   long heartBeatInterval;
   private DataStorage storage = null;
   private HttpServer infoServer = null;
-  DataNodeMetrics myMetrics;
+  DataNodeMetrics metrics;
   private InetSocketAddress selfAddr;
   
   private static volatile DataNode datanodeObject = null;
@@ -925,7 +923,7 @@ public class DataNode extends Configured
         cmd = bpNamenode.blockReport(bpRegistration, blockPoolId, bReport
             .getBlockListAsLongs());
         long brTime = now() - brStartTime;
-        myMetrics.blockReports.inc(brTime);
+        metrics.addBlockReport(brTime);
         LOG.info("BlockReport of " + bReport.getNumberOfBlocks() +
             " blocks got processed in " + brTime + " msecs");
         //
@@ -1036,7 +1034,7 @@ public class DataNode extends Configured
             //
             lastHeartbeat = startTime;
             DatanodeCommand[] cmds = sendHeartBeat();
-            myMetrics.heartbeats.inc(now() - startTime);
+            metrics.addHeartbeat(now() - startTime);
             if (!processCommand(cmds))
               continue;
           }
@@ -1258,7 +1256,7 @@ public class DataNode extends Configured
       case DatanodeProtocol.DNA_TRANSFER:
         // Send a copy of a block to another datanode
         transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(), bcmd.getTargets());
-        myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
+        metrics.incrBlocksReplicated(bcmd.getBlocks().length);
         break;
       case DatanodeProtocol.DNA_INVALIDATE:
         //
@@ -1276,7 +1274,7 @@ public class DataNode extends Configured
           checkDiskError();
           throw e;
         }
-        myMetrics.blocksRemoved.inc(toDelete.length);
+        metrics.incrBlocksRemoved(toDelete.length);
         break;
       case DatanodeProtocol.DNA_SHUTDOWN:
         // shut down the data node
@@ -1377,7 +1375,7 @@ public class DataNode extends Configured
     this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
     initIpcServer(conf);
 
-    myMetrics = new DataNodeMetrics(conf, getMachineName());
+    metrics = DataNodeMetrics.create(conf, getMachineName());
 
     blockPoolManager = new BlockPoolManager(conf);
   }
@@ -1427,17 +1425,7 @@ public class DataNode extends Configured
   }
   
   private void registerMXBean() {
-    // register MXBean
-    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
-    try {
-      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=DataNodeInfo");
-      mbs.registerMBean(this, mxbeanName);
-    } catch ( javax.management.InstanceAlreadyExistsException iaee ) {
-      // in unit tests, we may have multiple datanodes in the same JVM
-      LOG.info("DataNode MXBean already registered");
-    } catch ( javax.management.JMException e ) {
-      LOG.warn("Failed to register DataNode MXBean", e);
-    }
+    MBeans.register("DataNode", "DataNodeInfo", this);
   }
   
   int getPort() {
@@ -1551,7 +1539,7 @@ public class DataNode extends Configured
   }
     
   DataNodeMetrics getMetrics() {
-    return myMetrics;
+    return metrics;
   }
   
   public static void setNewStorageID(DatanodeID dnId) {
@@ -1668,8 +1656,8 @@ public class DataNode extends Configured
     if (data != null) {
       data.shutdown();
     }
-    if (myMetrics != null) {
-      myMetrics.shutdown();
+    if (metrics != null) {
+      metrics.shutdown();
     }
   }
   
@@ -1709,7 +1697,7 @@ public class DataNode extends Configured
     // shutdown the DN completely.
     int dpError = hasEnoughResources ? DatanodeProtocol.DISK_ERROR  
                                      : DatanodeProtocol.FATAL_DISK_ERROR;  
-    myMetrics.volumeFailures.inc(1);
+    metrics.incrVolumeFailures();
 
     //inform NameNodes
     for(BPOfferService bpos: blockPoolManager.getAllNamenodeThreads()) {
@@ -2003,7 +1991,7 @@ public class DataNode extends Configured
    * @param delHint
    */
   void closeBlock(ExtendedBlock block, String delHint) {
-    myMetrics.blocksWritten.inc();
+    metrics.incrBlocksWritten();
     BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
     if(bpos != null) {
       bpos.notifyNamenodeReceivedBlock(block, delHint);
@@ -2138,6 +2126,7 @@ public class DataNode extends Configured
         conf.get(DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
                  DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
     ArrayList<File> dirs = getDataDirsFromURIs(dataDirs, localFS, permission);
+    DefaultMetricsSystem.initialize("DataNode");
 
     assert dirs.size() > 0 : "number of data directories should be > 0";
     return new DataNode(conf, dirs, resources);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Mon May 16 18:47:28 2011
@@ -48,8 +48,8 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
@@ -183,11 +183,11 @@ class DataXceiver extends DataTransferPr
       SUCCESS.write(out); // send op status
       long read = blockSender.sendBlock(out, baseStream, null); // send data
       
-      datanode.myMetrics.bytesRead.inc((int) read);
-      datanode.myMetrics.blocksRead.inc();
+      datanode.metrics.incrBytesRead((int) read);
+      datanode.metrics.incrBlocksRead();
     } catch ( SocketException ignored ) {
       // Its ok for remote side to close the connection anytime.
-      datanode.myMetrics.blocksRead.inc();
+      datanode.metrics.incrBlocksRead();
     } catch ( IOException ioe ) {
       /* What exactly should we do here?
        * Earlier version shutdown() datanode if there is disk error.
@@ -203,9 +203,8 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics
-    updateDuration(datanode.myMetrics.readBlockOp);
-    updateCounter(datanode.myMetrics.readsFromLocalClient,
-                  datanode.myMetrics.readsFromRemoteClient);
+    datanode.metrics.addReadBlockOp(elapsed());
+    datanode.metrics.incrReadsFromClient(isLocal);
   }
 
   /**
@@ -409,9 +408,8 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics
-    updateDuration(datanode.myMetrics.writeBlockOp);
-    updateCounter(datanode.myMetrics.writesFromLocalClient,
-                  datanode.myMetrics.writesFromRemoteClient);
+    datanode.metrics.addWriteBlockOp(elapsed());
+    datanode.metrics.incrWritesFromClient(isLocal);
   }
 
   @Override
@@ -482,7 +480,7 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics
-    updateDuration(datanode.myMetrics.blockChecksumOp);
+    datanode.metrics.addBlockChecksumOp(elapsed());
   }
 
   /**
@@ -535,8 +533,8 @@ class DataXceiver extends DataTransferPr
       long read = blockSender.sendBlock(reply, baseStream, 
                                         dataXceiverServer.balanceThrottler);
 
-      datanode.myMetrics.bytesRead.inc((int) read);
-      datanode.myMetrics.blocksRead.inc();
+      datanode.metrics.incrBytesRead((int) read);
+      datanode.metrics.incrBlocksRead();
       
       LOG.info("Copied block " + block + " to " + s.getRemoteSocketAddress());
     } catch (IOException ioe) {
@@ -556,7 +554,7 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics    
-    updateDuration(datanode.myMetrics.copyBlockOp);
+    datanode.metrics.addCopyBlockOp(elapsed());
   }
 
   /**
@@ -670,16 +668,16 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics
-    updateDuration(datanode.myMetrics.replaceBlockOp);
+    datanode.metrics.addReplaceBlockOp(elapsed());
   }
 
-  private void updateDuration(MetricsTimeVaryingRate mtvr) {
-    mtvr.inc(now() - opStartTime);
+  private long elapsed() {
+    return now() - opStartTime;
   }
 
-  private void updateCounter(MetricsTimeVaryingInt localCounter,
-      MetricsTimeVaryingInt remoteCounter) {
-    (isLocal? localCounter: remoteCounter).inc();
+  private void updateCounter(MutableCounterLong localCounter,
+      MutableCounterLong remoteCounter) {
+    (isLocal? localCounter: remoteCounter).incr();
   }
 
   /**

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Mon May 16 18:47:28 2011
@@ -59,7 +59,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
-import org.apache.hadoop.metrics.util.MBeanUtil;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.StringUtils;
@@ -2186,18 +2186,17 @@ public class FSDataset implements FSCons
     }
     try {
       bean = new StandardMBean(this,FSDatasetMBean.class);
-      mbeanName = MBeanUtil.registerMBean("DataNode", "FSDatasetState-" + storageName, bean);
+      mbeanName = MBeans.register("DataNode", "FSDatasetState-" + storageName, bean);
     } catch (NotCompliantMBeanException e) {
-      e.printStackTrace();
+      DataNode.LOG.warn("Error registering FSDatasetState MBean", e);
     }
- 
-    DataNode.LOG.info("Registered FSDatasetStatusMBean");
+    DataNode.LOG.info("Registered FSDatasetState MBean");
   }
 
   @Override // FSDatasetInterface
   public void shutdown() {
     if (mbeanName != null)
-      MBeanUtil.unregisterMBean(mbeanName);
+      MBeans.unregister(mbeanName);
     
     if (asyncDiskService != null) {
       asyncDiskService.shutdown();

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java Mon May 16 18:47:28 2011
@@ -17,23 +17,22 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.metrics;
 
+import java.util.Random;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.Updater;
-import org.apache.hadoop.metrics.jvm.JvmMetrics;
-import org.apache.hadoop.metrics.util.MetricsBase;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+import static org.apache.hadoop.metrics2.impl.MsInfo.*;
 
 /**
- * 
+ *
  * This class is for maintaining  the various DataNode statistics
  * and publishing them through the metrics interfaces.
  * This also registers the JMX MBean for RPC.
@@ -45,97 +44,125 @@ import org.apache.hadoop.hdfs.DFSConfigK
  *
  */
 @InterfaceAudience.Private
-public class DataNodeMetrics implements Updater {
-  private final MetricsRecord metricsRecord;
-  private DataNodeActivityMBean datanodeActivityMBean;
-  public MetricsRegistry registry = new MetricsRegistry();
-  
-  
-  public MetricsTimeVaryingLong bytesWritten = 
-                      new MetricsTimeVaryingLong("bytes_written", registry);
-  public MetricsTimeVaryingLong bytesRead = 
-                      new MetricsTimeVaryingLong("bytes_read", registry);
-  public MetricsTimeVaryingInt blocksWritten = 
-                      new MetricsTimeVaryingInt("blocks_written", registry);
-  public MetricsTimeVaryingInt blocksRead = 
-                      new MetricsTimeVaryingInt("blocks_read", registry);
-  public MetricsTimeVaryingInt blocksReplicated =
-                      new MetricsTimeVaryingInt("blocks_replicated", registry);
-  public MetricsTimeVaryingInt blocksRemoved =
-                       new MetricsTimeVaryingInt("blocks_removed", registry);
-  public MetricsTimeVaryingInt blocksVerified = 
-                        new MetricsTimeVaryingInt("blocks_verified", registry);
-  public MetricsTimeVaryingInt blockVerificationFailures =
-                       new MetricsTimeVaryingInt("block_verification_failures", registry);
-  
-  public MetricsTimeVaryingInt readsFromLocalClient = 
-                new MetricsTimeVaryingInt("reads_from_local_client", registry);
-  public MetricsTimeVaryingInt readsFromRemoteClient = 
-                new MetricsTimeVaryingInt("reads_from_remote_client", registry);
-  public MetricsTimeVaryingInt writesFromLocalClient = 
-              new MetricsTimeVaryingInt("writes_from_local_client", registry);
-  public MetricsTimeVaryingInt writesFromRemoteClient = 
-              new MetricsTimeVaryingInt("writes_from_remote_client", registry);
+@Metrics(about="DataNode metrics", context="dfs")
+public class DataNodeMetrics {
 
-  public MetricsTimeVaryingInt volumeFailures =
-    new MetricsTimeVaryingInt("volumeFailures", registry);
+  @Metric MutableCounterLong bytesWritten;
+  @Metric MutableCounterLong bytesRead;
+  @Metric MutableCounterLong blocksWritten;
+  @Metric MutableCounterLong blocksRead;
+  @Metric MutableCounterLong blocksReplicated;
+  @Metric MutableCounterLong blocksRemoved;
+  @Metric MutableCounterLong blocksVerified;
+  @Metric MutableCounterLong blockVerificationFailures;
+  @Metric MutableCounterLong readsFromLocalClient;
+  @Metric MutableCounterLong readsFromRemoteClient;
+  @Metric MutableCounterLong writesFromLocalClient;
+  @Metric MutableCounterLong writesFromRemoteClient;
   
-  public MetricsTimeVaryingRate readBlockOp = 
-                new MetricsTimeVaryingRate("readBlockOp", registry);
-  public MetricsTimeVaryingRate writeBlockOp = 
-                new MetricsTimeVaryingRate("writeBlockOp", registry);
-  public MetricsTimeVaryingRate blockChecksumOp = 
-                new MetricsTimeVaryingRate("blockChecksumOp", registry);
-  public MetricsTimeVaryingRate copyBlockOp = 
-                new MetricsTimeVaryingRate("copyBlockOp", registry);
-  public MetricsTimeVaryingRate replaceBlockOp = 
-                new MetricsTimeVaryingRate("replaceBlockOp", registry);
-  public MetricsTimeVaryingRate heartbeats = 
-                    new MetricsTimeVaryingRate("heartBeats", registry);
-  public MetricsTimeVaryingRate blockReports = 
-                    new MetricsTimeVaryingRate("blockReports", registry);
-
-    
-  public DataNodeMetrics(Configuration conf, String datanodeName) {
-    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); 
-    // Initiate reporting of Java VM metrics
-    JvmMetrics.init("DataNode", sessionId);
-    
-
-    // Now the MBean for the data node
-    datanodeActivityMBean = new DataNodeActivityMBean(registry, datanodeName);
-    
-    // Create record for DataNode metrics
-    MetricsContext context = MetricsUtil.getContext("dfs");
-    metricsRecord = MetricsUtil.createRecord(context, "datanode");
-    metricsRecord.setTag("sessionId", sessionId);
-    context.registerUpdater(this);
+  @Metric MutableCounterLong volumeFailures;
+
+  @Metric MutableRate readBlockOp;
+  @Metric MutableRate writeBlockOp;
+  @Metric MutableRate blockChecksumOp;
+  @Metric MutableRate copyBlockOp;
+  @Metric MutableRate replaceBlockOp;
+  @Metric MutableRate heartbeats;
+  @Metric MutableRate blockReports;
+
+  final MetricsRegistry registry = new MetricsRegistry("datanode");
+  final String name;
+  static final Random rng = new Random();
+
+  public DataNodeMetrics(String name, String sessionId) {
+    this.name = name;
+    registry.tag(SessionId, sessionId);
   }
-  
+
+  public static DataNodeMetrics create(Configuration conf, String dnName) {
+    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    JvmMetrics.create("DataNode", sessionId, ms);
+    String name = "DataNodeActivity-"+ (dnName.isEmpty()
+        ? "UndefinedDataNodeName"+ rng.nextInt() : dnName.replace(':', '-'));
+    return ms.register(name, null, new DataNodeMetrics(name, sessionId));
+  }
+
+  public String name() { return name; }
+
+  public void addHeartbeat(long latency) {
+    heartbeats.add(latency);
+  }
+
+  public void addBlockReport(long latency) {
+    blockReports.add(latency);
+  }
+
+  public void incrBlocksReplicated(int delta) {
+    blocksReplicated.incr(delta);
+  }
+
+  public void incrBlocksWritten() {
+    blocksWritten.incr();
+  }
+
+  public void incrBlocksRemoved(int delta) {
+    blocksRemoved.incr(delta);
+  }
+
+  public void incrBytesWritten(int delta) {
+    bytesWritten.incr(delta);
+  }
+
+  public void incrBlockVerificationFailures() {
+    blockVerificationFailures.incr();
+  }
+
+  public void incrBlocksVerified() {
+    blocksVerified.incr();
+  }
+
+  public void addReadBlockOp(long latency) {
+    readBlockOp.add(latency);
+  }
+
+  public void addWriteBlockOp(long latency) {
+    writeBlockOp.add(latency);
+  }
+
+  public void addReplaceBlockOp(long latency) {
+    replaceBlockOp.add(latency);
+  }
+
+  public void addCopyBlockOp(long latency) {
+    copyBlockOp.add(latency);
+  }
+
+  public void addBlockChecksumOp(long latency) {
+    blockChecksumOp.add(latency);
+  }
+
+  public void incrBytesRead(int delta) {
+    bytesRead.incr(delta);
+  }
+
+  public void incrBlocksRead() {
+    blocksRead.incr();
+  }
+
   public void shutdown() {
-    if (datanodeActivityMBean != null) 
-      datanodeActivityMBean.shutdown();
+    DefaultMetricsSystem.shutdown();
   }
-    
-  /**
-   * Since this object is a registered updater, this method will be called
-   * periodically, e.g. every 5 seconds.
-   */
-  public void doUpdates(MetricsContext unused) {
-    synchronized (this) {
-      for (MetricsBase m : registry.getMetricsList()) {
-        m.pushMetric(metricsRecord);
-      }
-    }
-    metricsRecord.update();
-  }
-  public void resetAllMinMax() {
-    readBlockOp.resetMinMax();
-    writeBlockOp.resetMinMax();
-    blockChecksumOp.resetMinMax();
-    copyBlockOp.resetMinMax();
-    replaceBlockOp.resetMinMax();
-    heartbeats.resetMinMax();
-    blockReports.resetMinMax();
+
+  public void incrWritesFromClient(boolean local) {
+    (local ? writesFromLocalClient : writesFromRemoteClient).incr();
+  }
+
+  public void incrReadsFromClient(boolean local) {
+    (local ? readsFromLocalClient : readsFromRemoteClient).incr();
+  }
+  
+  public void incrVolumeFailures() {
+    volumeFailures.incr();
   }
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java Mon May 16 18:47:28 2011
@@ -33,7 +33,7 @@ import org.apache.hadoop.classification.
  * 
  * <p>
  * Data Node runtime statistic  info is report in another MBean
- * @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean
+ * @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics
  *
  */
 @InterfaceAudience.Private

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Mon May 16 18:47:28 2011
@@ -185,7 +185,7 @@ class FSDirectory implements Closeable {
 
   private void incrDeletedFileCount(int count) {
     if (getFSNamesystem() != null)
-      NameNode.getNameNodeMetrics().numFilesDeleted.inc(count);
+      NameNode.getNameNodeMetrics().incrFilesDeleted(count);
   }
     
   /**
@@ -1484,7 +1484,7 @@ class FSDirectory implements Closeable {
         // Directory creation also count towards FilesCreated
         // to match count of FilesDeleted metric.
         if (getFSNamesystem() != null)
-          NameNode.getNameNodeMetrics().numFilesCreated.inc();
+          NameNode.getNameNodeMetrics().incrFilesCreated();
         fsImage.getEditLog().logMkDir(cur, inodes[i]);
         if(NameNode.stateChangeLog.isDebugEnabled()) {
           NameNode.stateChangeLog.debug(

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Mon May 16 18:47:28 2011
@@ -400,7 +400,7 @@ public class FSEditLog implements NNStor
     numTransactions++;
     totalTimeTransactions += (end-start);
     if (metrics != null) // Metrics is non-null only when used inside name node
-      metrics.transactions.inc((end-start));
+      metrics.addTransaction(end-start);
   }
 
   /**
@@ -476,7 +476,7 @@ public class FSEditLog implements NNStor
         if (mytxid <= synctxid) {
           numTransactionsBatchedInSync++;
           if (metrics != null) // Metrics is non-null only when used inside name node
-            metrics.transactionsBatchedInSync.inc();
+            metrics.incrTransactionsBatchedInSync();
           return;
         }
      
@@ -528,7 +528,7 @@ public class FSEditLog implements NNStor
       disableAndReportErrorOnStreams(errorStreams);
   
       if (metrics != null) // Metrics non-null only when used inside name node
-        metrics.syncs.inc(elapsed);
+        metrics.addSync(elapsed);
     } finally {
       // Prevent RuntimeException from blocking other log edit sync 
       synchronized (this) {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon May 16 18:47:28 2011
@@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Util;
 import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
-import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
@@ -45,7 +44,6 @@ import org.apache.hadoop.security.token.
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.util.*;
-import org.apache.hadoop.metrics.util.MBeanUtil;
 import org.apache.hadoop.net.CachedDNSToSwitchMapping;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetworkTopology;
@@ -85,6 +83,11 @@ import org.apache.hadoop.fs.permission.*
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterInt;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.mortbay.util.ajax.JSON;
 
 import java.io.BufferedWriter;
@@ -103,11 +106,9 @@ import java.util.*;
 import java.util.concurrent.TimeUnit;
 import java.util.Map.Entry;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
-
 import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
-import javax.management.MBeanServer;
 
 /***************************************************
  * FSNamesystem does the actual bookkeeping work for the
@@ -122,8 +123,9 @@ import javax.management.MBeanServer;
  * 5)  LRU cache of updated-heartbeat machines
  ***************************************************/
 @InterfaceAudience.Private
-public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterStats,
-    NameNodeMXBean {
+@Metrics(context="dfs")
+public class FSNamesystem implements FSConstants, FSNamesystemMBean,
+    FSClusterStats, NameNodeMXBean {
   public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
 
   private static final ThreadLocal<StringBuilder> auditBuffer =
@@ -177,7 +179,7 @@ public class FSNamesystem implements FSC
   private String supergroup;
   private PermissionStatus defaultPermission;
   // FSNamesystemMetrics counter variables
-  private FSNamesystemMetrics myFSMetrics;
+  @Metric private MutableCounterInt expiredHeartbeats;
   private long capacityTotal = 0L, capacityUsed = 0L, capacityRemaining = 0L;
   private long blockPoolUsed = 0L;
   private int totalLoad = 0;
@@ -325,7 +327,7 @@ public class FSNamesystem implements FSC
     this.fsLock = new ReentrantReadWriteLock(true); // fair locking
     setConfigurationParameters(conf);
     dtSecretManager = createDelegationTokenSecretManager(conf);
-    this.registerMBean(conf); // register the MBean for the FSNamesystemStutus
+    this.registerMBean(); // register the MBean for the FSNamesystemState
     if(fsImage == null) {
       this.dir = new FSDirectory(this, conf);
       StartupOption startOpt = NameNode.getStartupOption(conf);
@@ -333,7 +335,7 @@ public class FSNamesystem implements FSC
                            getNamespaceEditsDirs(conf), startOpt);
       long timeTakenToLoadFSImage = now() - systemStart;
       LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
-      NameNode.getNameNodeMetrics().fsImageLoadTime.set(
+      NameNode.getNameNodeMetrics().setFsImageLoadTime(
                                 (int) timeTakenToLoadFSImage);
     } else {
       this.dir = new FSDirectory(fsImage, this, conf);
@@ -391,6 +393,7 @@ public class FSNamesystem implements FSC
       dnsToSwitchMapping.resolve(new ArrayList<String>(hostsReader.getHosts()));
     }
     registerMXBean();
+    DefaultMetricsSystem.instance().register(this);
   }
 
   public static Collection<URI> getNamespaceDirs(Configuration conf) {
@@ -3185,7 +3188,7 @@ public class FSNamesystem implements FSC
              it.hasNext();) {
           DatanodeDescriptor nodeInfo = it.next();
           if (isDatanodeDead(nodeInfo)) {
-            myFSMetrics.numExpiredHeartbeats.inc();
+            expiredHeartbeats.incr();
             foundDead = true;
             nodeID = nodeInfo;
             break;
@@ -3252,7 +3255,7 @@ public class FSNamesystem implements FSC
     }
 
     // Log the block report processing stats from Namenode perspective
-    NameNode.getNameNodeMetrics().blockReport.inc((int) (endTime - startTime));
+    NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime));
     NameNode.stateChangeLog.info("BLOCK* NameSystem.processReport: from "
         + nodeID.getName() + ", blocks: " + newReport.getNumberOfBlocks()
         + ", processing time: " + (endTime - startTime) + " msecs");
@@ -3396,6 +3399,7 @@ public class FSNamesystem implements FSC
     }
   }
 
+  @Metric({"MissingBlocks", "Number of missing blocks"})
   public long getMissingBlocksCount() {
     // not locking
     return blockManager.getMissingBlocksCount();
@@ -3422,6 +3426,11 @@ public class FSNamesystem implements FSC
     }
   }
 
+  @Metric
+  public float getCapacityTotalGB() {
+    return DFSUtil.roundBytesToGB(getCapacityTotal());
+  }
+
   /**
    * Total used space by data nodes
    */
@@ -3431,6 +3440,12 @@ public class FSNamesystem implements FSC
       return capacityUsed;
     }
   }
+
+  @Metric
+  public float getCapacityUsedGB() {
+    return DFSUtil.roundBytesToGB(getCapacityUsed());
+  }
+
   /**
    * Total used space by data nodes as percentage of total capacity
    */
@@ -3459,6 +3474,11 @@ public class FSNamesystem implements FSC
     }
   }
 
+  @Metric
+  public float getCapacityRemainingGB() {
+    return DFSUtil.roundBytesToGB(getCapacityRemaining());
+  }
+
   /**
    * Total remaining space by data nodes as percentage of total capacity
    */
@@ -3471,6 +3491,7 @@ public class FSNamesystem implements FSC
    * Total number of connections.
    */
   @Override // FSNamesystemMBean
+  @Metric
   public int getTotalLoad() {
     synchronized (heartbeats) {
       return this.totalLoad;
@@ -4038,7 +4059,7 @@ public class FSNamesystem implements FSC
       long timeInSafemode = now() - systemStart;
       NameNode.stateChangeLog.info("STATE* Leaving safe mode after " 
                                     + timeInSafemode/1000 + " secs.");
-      NameNode.getNameNodeMetrics().safeModeTime.set((int) timeInSafemode);
+      NameNode.getNameNodeMetrics().setSafeModeTime((int) timeInSafemode);
       
       if (reached >= 0) {
         NameNode.stateChangeLog.info("STATE* Safe mode is OFF."); 
@@ -4408,6 +4429,7 @@ public class FSNamesystem implements FSC
    * Get the total number of blocks in the system. 
    */
   @Override // FSNamesystemMBean
+  @Metric
   public long getBlocksTotal() {
     return blockManager.getTotalBlocks();
   }
@@ -4682,16 +4704,19 @@ public class FSNamesystem implements FSC
   }
 
   @Override // FSNamesystemMBean
+  @Metric
   public long getFilesTotal() {
     return this.dir.totalInodes();
   }
 
   @Override // FSNamesystemMBean
+  @Metric
   public long getPendingReplicationBlocks() {
     return blockManager.pendingReplicationBlocksCount;
   }
 
   @Override // FSNamesystemMBean
+  @Metric
   public long getUnderReplicatedBlocks() {
     return blockManager.underReplicatedBlocksCount;
   }
@@ -4702,23 +4727,28 @@ public class FSNamesystem implements FSC
   }
 
   /** Returns number of blocks with corrupt replicas */
+  @Metric({"CorruptBlocks", "Number of blocks with corrupt replicas"})
   public long getCorruptReplicaBlocks() {
     return blockManager.corruptReplicaBlocksCount;
   }
 
   @Override // FSNamesystemMBean
+  @Metric
   public long getScheduledReplicationBlocks() {
     return blockManager.scheduledReplicationBlocksCount;
   }
 
+  @Metric
   public long getPendingDeletionBlocks() {
     return blockManager.pendingDeletionBlocksCount;
   }
 
+  @Metric
   public long getExcessBlocks() {
     return blockManager.excessBlocksCount;
   }
   
+  @Metric
   public int getBlockCapacity() {
     return blockManager.getCapacity();
   }
@@ -4733,28 +4763,16 @@ public class FSNamesystem implements FSC
    * Register the FSNamesystem MBean using the name
    *        "hadoop:service=NameNode,name=FSNamesystemState"
    */
-  void registerMBean(Configuration conf) {
-    // We wrap to bypass standard mbean naming convention.
-    // This wraping can be removed in java 6 as it is more flexible in 
-    // package naming for mbeans and their impl.
-    StandardMBean bean;
-    try {
-      myFSMetrics = new FSNamesystemMetrics(this, conf);
-      bean = new StandardMBean(this,FSNamesystemMBean.class);
-      mbeanName = MBeanUtil.registerMBean("NameNode", "FSNamesystemState", bean);
+  void registerMBean() {
+    // We can only implement one MXBean interface, so we keep the old one.
+    try {
+      StandardMBean bean = new StandardMBean(this, FSNamesystemMBean.class);
+      mbeanName = MBeans.register("NameNode", "FSNamesystemState", bean);
     } catch (NotCompliantMBeanException e) {
-      LOG.warn("Exception in initializing StandardMBean as FSNamesystemMBean",
-	  e);
+      throw new RuntimeException("Bad MBean setup", e);
     }
 
-    LOG.info("Registered FSNamesystemStatusMBean");
-  }
-
-  /**
-   * get FSNamesystemMetrics
-   */
-  public FSNamesystemMetrics getFSNamesystemMetrics() {
-    return myFSMetrics;
+    LOG.info("Registered FSNamesystemState MBean");
   }
 
   /**
@@ -4762,7 +4780,7 @@ public class FSNamesystem implements FSC
    */
   public void shutdown() {
     if (mbeanName != null)
-      MBeanUtil.unregisterMBean(mbeanName);
+      MBeans.unregister(mbeanName);
   }
   
 
@@ -5416,17 +5434,7 @@ public class FSNamesystem implements FSC
    * Register NameNodeMXBean
    */
   private void registerMXBean() {
-    // register MXBean
-    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
-    try {
-      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=NameNodeInfo");
-      mbs.registerMBean(this, mxbeanName);
-    } catch ( javax.management.InstanceAlreadyExistsException iaee ) {
-      // in unit tests, we may run and restart the NN within the same JVM
-      LOG.info("NameNode MXBean already registered");
-    } catch ( javax.management.JMException e ) {
-      LOG.warn("Failed to register NameNodeMXBean", e);
-    }
+    MBeans.register("NameNode", "NameNodeInfo", this);
   }
 
   /**
@@ -5499,6 +5507,7 @@ public class FSNamesystem implements FSC
   }
 
   @Override // NameNodeMXBean
+  @Metric
   public long getTotalFiles() {
     return getFilesTotal();
   }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Mon May 16 18:47:28 2011
@@ -86,6 +86,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
@@ -213,7 +214,7 @@ public class NameNode implements Namenod
     format(conf, false);
   }
 
-  static NameNodeMetrics myMetrics;
+  static NameNodeMetrics metrics;
 
   /** Return the {@link FSNamesystem} object.
    * @return {@link FSNamesystem} object.
@@ -223,11 +224,11 @@ public class NameNode implements Namenod
   }
 
   static void initMetrics(Configuration conf, NamenodeRole role) {
-    myMetrics = new NameNodeMetrics(conf, role);
+    metrics = NameNodeMetrics.create(conf, role);
   }
 
   public static NameNodeMetrics getNameNodeMetrics() {
-    return myMetrics;
+    return metrics;
   }
   
   public static InetSocketAddress getAddress(String address) {
@@ -639,8 +640,8 @@ public class NameNode implements Namenod
     if(emptier != null) emptier.interrupt();
     if(server != null) server.stop();
     if(serviceRpcServer != null) serviceRpcServer.stop();
-    if (myMetrics != null) {
-      myMetrics.shutdown();
+    if (metrics != null) {
+      metrics.shutdown();
     }
     if (namesystem != null) {
       namesystem.shutdown();
@@ -750,7 +751,7 @@ public class NameNode implements Namenod
                                           long offset, 
                                           long length) 
       throws IOException {
-    myMetrics.numGetBlockLocations.inc();
+    metrics.incrGetBlockLocations();
     return namesystem.getBlockLocations(getClientMachine(), 
                                         src, offset, length);
   }
@@ -789,8 +790,8 @@ public class NameNode implements Namenod
         new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(),
             null, masked),
         clientName, clientMachine, flag.get(), createParent, replication, blockSize);
-    myMetrics.numFilesCreated.inc();
-    myMetrics.numCreateFileOps.inc();
+    metrics.incrFilesCreated();
+    metrics.incrCreateFileOps();
   }
 
   /** {@inheritDoc} */
@@ -802,7 +803,7 @@ public class NameNode implements Namenod
           +src+" for "+clientName+" at "+clientMachine);
     }
     LocatedBlock info = namesystem.appendFile(src, clientName, clientMachine);
-    myMetrics.numFilesAppended.inc();
+    metrics.incrFilesAppended();
     return info;
   }
 
@@ -844,7 +845,7 @@ public class NameNode implements Namenod
     LocatedBlock locatedBlock = 
       namesystem.getAdditionalBlock(src, clientName, previous, excludedNodesSet);
     if (locatedBlock != null)
-      myMetrics.numAddBlockOps.inc();
+      metrics.incrAddBlockOps();
     return locatedBlock;
   }
 
@@ -862,7 +863,7 @@ public class NameNode implements Namenod
           + ", clientName=" + clientName);
     }
 
-    myMetrics.numGetAdditionalDatanodeOps.inc();
+    metrics.incrGetAdditionalDatanodeOps();
 
     HashMap<Node, Node> excludeSet = null;
     if (excludes != null) {
@@ -959,7 +960,7 @@ public class NameNode implements Namenod
     }
     boolean ret = namesystem.renameTo(src, dst);
     if (ret) {
-      myMetrics.numFilesRenamed.inc();
+      metrics.incrFilesRenamed();
     }
     return ret;
   }
@@ -983,7 +984,7 @@ public class NameNode implements Namenod
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
     namesystem.renameTo(src, dst, options);
-    myMetrics.numFilesRenamed.inc();
+    metrics.incrFilesRenamed();
   }
 
   /**
@@ -1001,7 +1002,7 @@ public class NameNode implements Namenod
     }
     boolean ret = namesystem.delete(src, recursive);
     if (ret) 
-      myMetrics.numDeleteFileOps.inc();
+      metrics.incrDeleteFileOps();
     return ret;
   }
 
@@ -1047,8 +1048,8 @@ public class NameNode implements Namenod
     DirectoryListing files = namesystem.getListing(
         src, startAfter, needLocation);
     if (files != null) {
-      myMetrics.numGetListingOps.inc();
-      myMetrics.numFilesInGetListingOps.inc(files.getPartialListing().length);
+      metrics.incrGetListingOps();
+      metrics.incrFilesInGetListingOps(files.getPartialListing().length);
     }
     return files;
   }
@@ -1060,7 +1061,7 @@ public class NameNode implements Namenod
    *         or null if file not found
    */
   public HdfsFileStatus getFileInfo(String src)  throws IOException {
-    myMetrics.numFileInfoOps.inc();
+    metrics.incrFileInfoOps();
     return namesystem.getFileInfo(src, true);
   }
 
@@ -1072,11 +1073,11 @@ public class NameNode implements Namenod
    *         or null if file not found
    */
   public HdfsFileStatus getFileLinkInfo(String src) throws IOException { 
-    myMetrics.numFileInfoOps.inc();
+    metrics.incrFileInfoOps();
     return namesystem.getFileInfo(src, false);
   }
   
-  /** @inheritDoc */
+  @Override
   public long[] getStats() {
     return namesystem.getStats();
   }
@@ -1092,9 +1093,7 @@ public class NameNode implements Namenod
     return results;
   }
     
-  /**
-   * @inheritDoc
-   */
+  @Override
   public boolean setSafeMode(SafeModeAction action) throws IOException {
     return namesystem.setSafeMode(action);
   }
@@ -1106,18 +1105,13 @@ public class NameNode implements Namenod
     return namesystem.isInSafeMode();
   }
 
-  /**
-   * @throws AccessControlException 
-   * @inheritDoc
-   */
+  @Override
   public boolean restoreFailedStorage(String arg) 
       throws AccessControlException {
     return namesystem.restoreFailedStorage(arg);
   }
 
-  /**
-   * @inheritDoc
-   */
+  @Override
   public void saveNamespace() throws IOException {
     namesystem.saveNamespace();
   }
@@ -1207,17 +1201,17 @@ public class NameNode implements Namenod
     namesystem.fsync(src, clientName);
   }
 
-  /** @inheritDoc */
+  @Override
   public void setTimes(String src, long mtime, long atime) 
       throws IOException {
     namesystem.setTimes(src, mtime, atime);
   }
 
-  /** @inheritDoc */
+  @Override
   public void createSymlink(String target, String link, FsPermission dirPerms, 
                             boolean createParent) 
       throws IOException {
-    myMetrics.numcreateSymlinkOps.inc();
+    metrics.incrCreateSymlinkOps();
     /* We enforce the MAX_PATH_LENGTH limit even though a symlink target 
      * URI may refer to a non-HDFS file system. 
      */
@@ -1234,9 +1228,9 @@ public class NameNode implements Namenod
       new PermissionStatus(ugi.getShortUserName(), null, dirPerms), createParent);
   }
 
-  /** @inheritDoc */
+  @Override
   public String getLinkTarget(String path) throws IOException {
-    myMetrics.numgetLinkTargetOps.inc();
+    metrics.incrGetLinkTargetOps();
     /* Resolves the first symlink in the given path, returning a
      * new path consisting of the target of the symlink and any 
      * remaining path components from the original path.
@@ -1645,8 +1639,11 @@ public class NameNode implements Namenod
         return null; // avoid javac warning
       case BACKUP:
       case CHECKPOINT:
-        return new BackupNode(conf, startOpt.toNodeRole());
+        NamenodeRole role = startOpt.toNodeRole();
+        DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
+        return new BackupNode(conf, role);
       default:
+        DefaultMetricsSystem.initialize("NameNode");
         return new NameNode(conf);
     }
   }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Mon May 16 18:47:28 2011
@@ -49,7 +49,8 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.metrics.jvm.JvmMetrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
 import org.apache.hadoop.security.SecurityUtil;
@@ -154,7 +155,9 @@ public class SecondaryNameNode implement
           infoBindAddress);
     }
     // initiate Java VM metrics
-    JvmMetrics.init("SecondaryNameNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY));
+    JvmMetrics.create("SecondaryNameNode",
+        conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
+        DefaultMetricsSystem.instance());
     
     // Create connection to the namenode.
     shouldRun = true;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java Mon May 16 18:47:28 2011
@@ -31,8 +31,8 @@ import org.apache.hadoop.classification.
  * be published as an interface.
  * 
  * <p>
- * Name Node runtime activity statistic  info is report in another MBean
- * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean
+ * Name Node runtime activity statistic  info is reported in
+ * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics
  *
  */
 @InterfaceAudience.Private

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java Mon May 16 18:47:28 2011
@@ -17,128 +17,146 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.metrics;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.metrics.*;
-import org.apache.hadoop.metrics.jvm.JvmMetrics;
-import org.apache.hadoop.metrics.util.MetricsBase;
-import org.apache.hadoop.metrics.util.MetricsIntValue;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import static org.apache.hadoop.metrics2.impl.MsInfo.*;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
 
 /**
- * 
  * This class is for maintaining  the various NameNode activity statistics
  * and publishing them through the metrics interfaces.
- * This also registers the JMX MBean for RPC.
- * <p>
- * This class has a number of metrics variables that are publicly accessible;
- * these variables (objects) have methods to update their values;
- *  for example:
- *  <p> {@link #syncs}.inc()
- *
  */
-@InterfaceAudience.Private
-public class NameNodeMetrics implements Updater {
-    private static Log log = LogFactory.getLog(NameNodeMetrics.class);
-    private final MetricsRecord metricsRecord;
-    public MetricsRegistry registry = new MetricsRegistry();
-    
-    private NameNodeActivityMBean namenodeActivityMBean;
-    
-    public MetricsTimeVaryingInt numCreateFileOps = 
-                    new MetricsTimeVaryingInt("CreateFileOps", registry);
-    public MetricsTimeVaryingInt numFilesCreated =
-                          new MetricsTimeVaryingInt("FilesCreated", registry);
-    public MetricsTimeVaryingInt numFilesAppended =
-                          new MetricsTimeVaryingInt("FilesAppended", registry);
-    public MetricsTimeVaryingInt numGetBlockLocations = 
-                    new MetricsTimeVaryingInt("GetBlockLocations", registry);
-    public MetricsTimeVaryingInt numFilesRenamed =
-                    new MetricsTimeVaryingInt("FilesRenamed", registry);
-    public MetricsTimeVaryingInt numGetListingOps = 
-                    new MetricsTimeVaryingInt("GetListingOps", registry);
-    public MetricsTimeVaryingInt numDeleteFileOps = 
-                          new MetricsTimeVaryingInt("DeleteFileOps", registry);
-    public MetricsTimeVaryingInt numFilesDeleted = new MetricsTimeVaryingInt(
-        "FilesDeleted", registry, 
-        "Number of files and directories deleted by delete or rename operation");
-    public MetricsTimeVaryingInt numFileInfoOps =
-                          new MetricsTimeVaryingInt("FileInfoOps", registry);
-    public MetricsTimeVaryingInt numAddBlockOps = 
-                          new MetricsTimeVaryingInt("AddBlockOps", registry);
-    public final MetricsTimeVaryingInt numGetAdditionalDatanodeOps
-        = new MetricsTimeVaryingInt("GetAdditionalDatanodeOps", registry);
-    public MetricsTimeVaryingInt numcreateSymlinkOps = 
-                          new MetricsTimeVaryingInt("CreateSymlinkOps", registry);
-    public MetricsTimeVaryingInt numgetLinkTargetOps = 
-                          new MetricsTimeVaryingInt("GetLinkTargetOps", registry);
-
-    public MetricsTimeVaryingRate transactions = new MetricsTimeVaryingRate(
-      "Transactions", registry, "Journal Transaction");
-    public MetricsTimeVaryingRate syncs =
-                    new MetricsTimeVaryingRate("Syncs", registry, "Journal Sync");
-    public MetricsTimeVaryingInt transactionsBatchedInSync = new MetricsTimeVaryingInt(
-      "JournalTransactionsBatchedInSync", registry,
-      "Journal Transactions Batched In Sync");
-    public MetricsTimeVaryingRate blockReport =
-                    new MetricsTimeVaryingRate("blockReport", registry, "Block Report");
-    public MetricsIntValue safeModeTime =
-                    new MetricsIntValue("SafemodeTime", registry, "Duration in SafeMode at Startup");
-    public MetricsIntValue fsImageLoadTime = 
-                    new MetricsIntValue("fsImageLoadTime", registry, "Time loading FS Image at Startup");
-    public MetricsIntValue numBlocksCorrupted =
-                    new MetricsIntValue("BlocksCorrupted", registry);
-    public MetricsTimeVaryingInt numFilesInGetListingOps = 
-                    new MetricsTimeVaryingInt("FilesInGetListingOps", registry);
-
-      
-    public NameNodeMetrics(Configuration conf, NamenodeRole nameNodeRole) {
-      String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
-      // Initiate Java VM metrics
-      String processName = nameNodeRole.toString();
-      JvmMetrics.init(processName, sessionId);
-
-      // Now the Mbean for the name node - this also registers the MBean
-      namenodeActivityMBean = new NameNodeActivityMBean(registry);
-      
-      // Create a record for NameNode metrics
-      MetricsContext metricsContext = MetricsUtil.getContext("dfs");
-      metricsRecord = MetricsUtil.createRecord(metricsContext, processName.toLowerCase());
-      metricsRecord.setTag("sessionId", sessionId);
-      metricsContext.registerUpdater(this);
-      log.info("Initializing NameNodeMeterics using context object:" +
-                metricsContext.getClass().getName());
-    }
-    
-
-    
-    public void shutdown() {
-      if (namenodeActivityMBean != null) 
-        namenodeActivityMBean.shutdown();
-    }
-      
-    /**
-     * Since this object is a registered updater, this method will be called
-     * periodically, e.g. every 5 seconds.
-     */
-    public void doUpdates(MetricsContext unused) {
-      synchronized (this) {
-        for (MetricsBase m : registry.getMetricsList()) {
-          m.pushMetric(metricsRecord);
-        }
-      }
-      metricsRecord.update();
-    }
-
-    public void resetAllMinMax() {
-      transactions.resetMinMax();
-      syncs.resetMinMax();
-      blockReport.resetMinMax();
-    }
+@Metrics(name="NameNodeActivity", about="NameNode metrics", context="dfs")
+public class NameNodeMetrics {
+  final MetricsRegistry registry = new MetricsRegistry("namenode");
+
+  @Metric MutableCounterLong createFileOps;
+  @Metric MutableCounterLong filesCreated;
+  @Metric MutableCounterLong filesAppended;
+  @Metric MutableCounterLong getBlockLocations;
+  @Metric MutableCounterLong filesRenamed;
+  @Metric MutableCounterLong getListingOps;
+  @Metric MutableCounterLong deleteFileOps;
+  @Metric("Number of files/dirs deleted by delete or rename operations")
+  MutableCounterLong filesDeleted;
+  @Metric MutableCounterLong fileInfoOps;
+  @Metric MutableCounterLong addBlockOps;
+  @Metric MutableCounterLong getAdditionalDatanodeOps;
+  @Metric MutableCounterLong createSymlinkOps;
+  @Metric MutableCounterLong getLinkTargetOps;
+  @Metric MutableCounterLong filesInGetListingOps;
+
+  @Metric("Journal transactions") MutableRate transactions;
+  @Metric("Journal syncs") MutableRate syncs;
+  @Metric("Journal transactions batched in sync")
+  MutableCounterLong transactionsBatchedInSync;
+  @Metric("Block report") MutableRate blockReport;
+
+  @Metric("Duration in SafeMode at startup") MutableGaugeInt safeModeTime;
+  @Metric("Time loading FS Image at startup") MutableGaugeInt fsImageLoadTime;
+
+  NameNodeMetrics(String processName, String sessionId) {
+    registry.tag(ProcessName, processName).tag(SessionId, sessionId);
+  }
+
+  public static NameNodeMetrics create(Configuration conf, NamenodeRole r) {
+    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+    String processName = r.toString();
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    JvmMetrics.create(processName, sessionId, ms);
+    return ms.register(new NameNodeMetrics(processName, sessionId));
+  }
+
+  public void shutdown() {
+    DefaultMetricsSystem.shutdown();
+  }
+
+  public void incrGetBlockLocations() {
+    getBlockLocations.incr();
+  }
+
+  public void incrFilesCreated() {
+    filesCreated.incr();
+  }
+
+  public void incrCreateFileOps() {
+    createFileOps.incr();
+  }
+
+  public void incrFilesAppended() {
+    filesAppended.incr();
+  }
+
+  public void incrAddBlockOps() {
+    addBlockOps.incr();
+  }
+  
+  public void incrGetAdditionalDatanodeOps() {
+    getAdditionalDatanodeOps.incr();
+  }
+
+  public void incrFilesRenamed() {
+    filesRenamed.incr();
+  }
+
+  public void incrFilesDeleted(int delta) {
+    filesDeleted.incr(delta);
+  }
+
+  public void incrDeleteFileOps() {
+    deleteFileOps.incr();
+  }
+
+  public void incrGetListingOps() {
+    getListingOps.incr();
+  }
+
+  public void incrFilesInGetListingOps(int delta) {
+    filesInGetListingOps.incr(delta);
+  }
+
+  public void incrFileInfoOps() {
+    fileInfoOps.incr();
+  }
+
+  public void incrCreateSymlinkOps() {
+    createSymlinkOps.incr();
+  }
+
+  public void incrGetLinkTargetOps() {
+    getLinkTargetOps.incr();
+  }
+
+  public void addTransaction(long latency) {
+    transactions.add(latency);
+  }
+
+  public void incrTransactionsBatchedInSync() {
+    transactionsBatchedInSync.incr();
+  }
+
+  public void addSync(long elapsed) {
+    syncs.add(elapsed);
+  }
+
+  public void setFsImageLoadTime(long elapsed) {
+    fsImageLoadTime.set((int) elapsed);
+  }
+
+  public void addBlockReport(long latency) {
+    blockReport.add(latency);
+  }
+
+  public void setSafeModeTime(long elapsed) {
+    safeModeTime.set((int) elapsed);
+  }
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/JMXGet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/JMXGet.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/JMXGet.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/JMXGet.java Mon May 16 18:47:28 2011
@@ -126,7 +126,7 @@ public class JMXGet {
           continue;
         }
       }
-      err("Info: key = " + key + "; val = " + val);
+      err("Info: key = " + key + "; val = "+ val.getClass() +":"+ val);
       break;
     }
 
@@ -193,7 +193,7 @@ public class JMXGet {
     err("\nMBean count = " + mbsc.getMBeanCount());
 
     // Query MBean names for specific domain "hadoop" and service
-    ObjectName query = new ObjectName("hadoop:service=" + service + ",*");
+    ObjectName query = new ObjectName("Hadoop:service=" + service + ",*");
     hadoopObjectNames = new ArrayList<ObjectName>(5);
     err("\nQuery MBeanServer MBeans:");
     Set<ObjectName> names = new TreeSet<ObjectName>(mbsc
@@ -201,7 +201,7 @@ public class JMXGet {
 
     for (ObjectName name : names) {
       hadoopObjectNames.add(name);
-      err("hadoop services: " + name);
+      err("Hadoop service: " + name);
     }
 
   }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Mon May 16 18:47:28 2011
@@ -61,6 +61,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.StaticMapping;
@@ -83,6 +84,8 @@ public class MiniDFSCluster {
   private static final String NAMESERVICE_ID_PREFIX = "nameserviceId";
   private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class);
 
+  static { DefaultMetricsSystem.setMiniClusterMode(true); }
+
   /**
    * Class to construct instances of MiniDFSClusters with specific options.
    */

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java Mon May 16 18:47:28 2011
@@ -26,7 +26,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 /**
  * This test ensures the all types of data node report work correctly.
@@ -77,9 +77,7 @@ public class TestDatanodeReport extends 
                    NUM_OF_DATANODES);
 
       Thread.sleep(5000);
-      FSNamesystemMetrics fsMetrics = 
-                     cluster.getNamesystem().getFSNamesystemMetrics();
-      assertEquals(1,fsMetrics.numExpiredHeartbeats.getCurrentIntervalValue());
+      assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
     }finally {
       cluster.shutdown();
     }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java Mon May 16 18:47:28 2011
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.namenode.BackupNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.test.GenericTestUtils;
 
@@ -52,6 +53,9 @@ public class TestHDFSServerPorts extends
   
   // reset default 0.0.0.0 addresses in order to avoid IPv6 problem
   static final String THIS_HOST = getFullHostName() + ":0";
+  static {
+    DefaultMetricsSystem.setMiniClusterMode(true);
+  }
 
   Configuration config;
   File hdfsDir;

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Mon May 16 18:47:28 2011
@@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
-import org.apache.hadoop.metrics.util.MBeanUtil;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 
@@ -909,18 +909,17 @@ public class SimulatedFSDataset  impleme
 
     try {
       bean = new StandardMBean(this,FSDatasetMBean.class);
-      mbeanName = MBeanUtil.registerMBean("DataNode",
-          "FSDatasetState-" + storageId, bean);
+      mbeanName = MBeans.register("DataNode", "FSDatasetState-"+
+                                  storageId, bean);
     } catch (NotCompliantMBeanException e) {
-      e.printStackTrace();
+      DataNode.LOG.warn("Error registering FSDatasetState MBean", e);
     }
  
-    DataNode.LOG.info("Registered FSDatasetStatusMBean");
+    DataNode.LOG.info("Registered FSDatasetState MBean");
   }
 
   public void shutdown() {
-    if (mbeanName != null)
-      MBeanUtil.unregisterMBean(mbeanName);
+    if (mbeanName != null) MBeans.unregister(mbeanName);
   }
 
   public String getStorageInfo() {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java Mon May 16 18:47:28 2011
@@ -43,7 +43,8 @@ public class TestDataNodeMXBean {
       DataNode datanode = datanodes.get(0);
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
-      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=DataNodeInfo");
+      ObjectName mxbeanName = new ObjectName(
+          "Hadoop:service=DataNode,name=DataNodeInfo");
       // get attribute "ClusterId"
       String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
       Assert.assertEquals(datanode.getClusterId(), clusterId);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java Mon May 16 18:47:28 2011
@@ -24,8 +24,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import static org.apache.hadoop.test.MetricsAsserts.*;
+
 import junit.framework.TestCase;
 
 public class TestDataNodeMetrics extends TestCase {
@@ -42,8 +44,8 @@ public class TestDataNodeMetrics extends
       List<DataNode> datanodes = cluster.getDataNodes();
       assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
-      DataNodeMetrics metrics = datanode.getMetrics();
-      assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
+      MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
+      assertCounter("BytesWritten", LONG_FILE_LEN, rb);
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java Mon May 16 18:47:28 2011
@@ -19,18 +19,17 @@ package org.apache.hadoop.hdfs.server.da
 
 import java.io.File;
 import java.util.ArrayList;
-import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -149,15 +148,12 @@ public class TestDataNodeVolumeFailureRe
     /*
      * The metrics should confirm the volume failures.
      */
-    DataNodeMetrics metrics1 = dns.get(0).getMetrics();
-    DataNodeMetrics metrics2 = dns.get(1).getMetrics();
-    DataNodeMetrics metrics3 = dns.get(2).getMetrics();
-    assertEquals("Vol1 should report 1 failure",
-        1, metrics1.volumeFailures.getCurrentIntervalValue());
-    assertEquals("Vol2 should report 1 failure",
-        1, metrics2.volumeFailures.getCurrentIntervalValue());
-    assertEquals("Vol3 should have no failures",
-        0, metrics3.volumeFailures.getCurrentIntervalValue());
+    assertCounter("VolumeFailures", 1L, 
+        getMetrics(dns.get(0).getMetrics().name()));
+    assertCounter("VolumeFailures", 1L, 
+        getMetrics(dns.get(1).getMetrics().name()));
+    assertCounter("VolumeFailures", 0L, 
+        getMetrics(dns.get(2).getMetrics().name()));
 
     // Ensure we wait a sufficient amount of time
     assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH;
@@ -175,8 +171,8 @@ public class TestDataNodeVolumeFailureRe
     DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
     DFSTestUtil.waitReplication(fs, file2, (short)3);
     assertTrue("DN3 should still be up", dns.get(2).isDatanodeUp());
-    assertEquals("Vol3 should report 1 failure",
-        1, metrics3.volumeFailures.getCurrentIntervalValue());
+    assertCounter("VolumeFailures", 1L, 
+        getMetrics(dns.get(2).getMetrics().name()));
 
     ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
     ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
@@ -211,9 +207,8 @@ public class TestDataNodeVolumeFailureRe
     DFSTestUtil.waitForDatanodeDeath(dns.get(2));
 
     // And report two failed volumes
-    metrics3 = dns.get(2).getMetrics();
-    assertEquals("DN3 should report 2 vol failures",
-        2, metrics3.volumeFailures.getCurrentIntervalValue());
+    assertCounter("VolumeFailures", 2L, 
+        getMetrics(dns.get(2).getMetrics().name()));
 
     // The NN considers the DN dead
     DFSTestUtil.waitForDatanodeStatus(ns, 2, 1, 2, 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Mon May 16 18:47:28 2011
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.na
 import junit.framework.TestCase;
 import java.io.*;
 import java.net.URI;
-import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
@@ -38,9 +37,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
- 
-import org.mockito.Mockito;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 /**
  * This class tests the creation and validation of a checkpoint.
@@ -239,16 +236,13 @@ public class TestEditLog extends TestCas
 
       // Now ask to sync edit from A, which was already batched in - thus
       // it should increment the batch count metric
-      NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
-      metrics.transactionsBatchedInSync = Mockito.mock(MetricsTimeVaryingInt.class);
-
       doCallLogSync(threadA, editLog);
       assertEquals("logSync from first thread shouldn't change txid",
         2, editLog.getSyncTxId());
 
       //Should have incremented the batch count exactly once
-      Mockito.verify(metrics.transactionsBatchedInSync,
-                    Mockito.times(1)).inc();
+      assertCounter("TransactionsBatchedInSync", 1L, 
+        getMetrics("NameNodeActivity"));
     } finally {
       threadA.shutdown();
       threadB.shutdown();

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Mon May 16 18:47:28 2011
@@ -45,7 +45,8 @@ public class TestNameNodeMXBean {
       FSNamesystem fsn = cluster.getNameNode().namesystem;
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
-      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=NameNodeInfo");
+      ObjectName mxbeanName = new ObjectName(
+          "Hadoop:service=NameNode,name=NameNodeInfo");
       // get attribute "ClusterId"
       String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
       Assert.assertEquals(fsn.getClusterId(), clusterId);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java?rev=1103834&r1=1103833&r2=1103834&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java Mon May 16 18:47:28 2011
@@ -28,15 +28,16 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 /**
  * Test case for FilesInGetListingOps metric in Namenode
  */
 public class TestNNMetricFilesInGetListingOps extends TestCase {
   private static final Configuration CONF = new HdfsConfiguration();
+  private static final String NN_METRICS = "NameNodeActivity";
   static {
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
     CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
@@ -45,7 +46,6 @@ public class TestNNMetricFilesInGetListi
   }
      
   private MiniDFSCluster cluster;
-  private NameNodeMetrics nnMetrics;
   private DistributedFileSystem fs;
   private Random rand = new Random();
 
@@ -54,7 +54,6 @@ public class TestNNMetricFilesInGetListi
     cluster = new MiniDFSCluster.Builder(CONF).build();
     cluster.waitActive();
     cluster.getNameNode();
-    nnMetrics = NameNode.getNameNodeMetrics();
     fs = (DistributedFileSystem) cluster.getFileSystem();
   }
 
@@ -76,9 +75,9 @@ public class TestNNMetricFilesInGetListi
     createFile("/tmp2/t1", 3200, (short)3);
     createFile("/tmp2/t2", 3200, (short)3);
     cluster.getNameNode().getListing("/tmp1", HdfsFileStatus.EMPTY_NAME, false);
-    assertEquals(2,nnMetrics.numFilesInGetListingOps.getCurrentIntervalValue());
-    cluster.getNameNode().getListing("/tmp2", HdfsFileStatus.EMPTY_NAME, false) ;
-    assertEquals(4,nnMetrics.numFilesInGetListingOps.getCurrentIntervalValue());
+    assertCounter("FilesInGetListingOps", 2L, getMetrics(NN_METRICS));
+    cluster.getNameNode().getListing("/tmp2", HdfsFileStatus.EMPTY_NAME, false);
+    assertCounter("FilesInGetListingOps", 4L, getMetrics(NN_METRICS));
   }
 }
 



Mime
View raw message