hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r1089315 [1/2] - in /hadoop/hdfs/branches/yahoo-merge: ./ conf/ ivy/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/datanode/metrics/ src/java/org/apache/hadoop/hdfs/...
Date Wed, 06 Apr 2011 06:22:33 GMT
Author: omalley
Date: Wed Apr  6 06:22:32 2011
New Revision: 1089315

URL: http://svn.apache.org/viewvc?rev=1089315&view=rev
Log:
commit 62e2e92262251998e31fc90d9329644976d95213
Author: Owen O'Malley <omalley@apache.org>
Date:   Tue Apr 5 14:44:49 2011 -0700

    HDFS-1117 Metrics 2.0 HDFS instrumentation (Luke Lu)
    
    Note, this change depends on HADOOP:6919 and HADOOP:6920 in common.
    
    Conflicts:
    
    	src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    	src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java

Added:
    hadoop/hdfs/branches/yahoo-merge/conf/hadoop-metrics2.properties
Removed:
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeActivityMBean.java
Modified:
    hadoop/hdfs/branches/yahoo-merge/CHANGES.txt
    hadoop/hdfs/branches/yahoo-merge/ivy.xml
    hadoop/hdfs/branches/yahoo-merge/ivy/libraries.properties
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
    hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/tools/JMXGet.java
    hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
    hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
    hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
    hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
    hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
    hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
    hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/tools/TestJMXGet.java

Modified: hadoop/hdfs/branches/yahoo-merge/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/CHANGES.txt?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/CHANGES.txt (original)
+++ hadoop/hdfs/branches/yahoo-merge/CHANGES.txt Wed Apr  6 06:22:32 2011
@@ -216,7 +216,8 @@ Trunk (unreleased changes)
     HDFS-1628. Display full path in AccessControlException.  (John George
     via szetszwo)
 
-    HDFS-1707. Federation: Failure in browsing data on new namenodes. (jitendra)
+    HDFS-1707. Federation: Failure in browsing data on new namenodes. 
+    (jitendra)
 
     HDFS-1683. Test Balancer with multiple NameNodes.  (szetszwo)
 
@@ -225,6 +226,8 @@ Trunk (unreleased changes)
     HDFS-1588. Remove hardcoded strings for configuration keys, "dfs.hosts"
     and "dfs.hosts.exlude". (Erik Steffl via suresh)
 
+    HDFS-1117. Metrics 2.0 HDFS instrumentation. (Luke Lu)
+
   OPTIMIZATIONS
 
   BUG FIXES

Added: hadoop/hdfs/branches/yahoo-merge/conf/hadoop-metrics2.properties
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/conf/hadoop-metrics2.properties?rev=1089315&view=auto
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/conf/hadoop-metrics2.properties (added)
+++ hadoop/hdfs/branches/yahoo-merge/conf/hadoop-metrics2.properties Wed Apr  6 06:22:32 2011
@@ -0,0 +1,8 @@
+# syntax: [prefix].[source|sink].[instance].[options]
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
+
+*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
+
+#namenode.sink.file.filename=namenode-metrics.out
+
+#datanode.sink.file.filename=datanode-metrics.out

Modified: hadoop/hdfs/branches/yahoo-merge/ivy.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/ivy.xml?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/ivy.xml (original)
+++ hadoop/hdfs/branches/yahoo-merge/ivy.xml Wed Apr  6 06:22:32 2011
@@ -84,8 +84,6 @@
 
     <dependency org="org.apache.lucene" name="lucene-core" rev="${lucene-core.version}" conf="javadoc->default"/> 
 
-    <dependency org="org.mockito" name="mockito-all" rev="${mockito-all.version}" conf="common->master"/>
-
    </dependencies>
   
 </ivy-module>

Modified: hadoop/hdfs/branches/yahoo-merge/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/ivy/libraries.properties?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/ivy/libraries.properties (original)
+++ hadoop/hdfs/branches/yahoo-merge/ivy/libraries.properties Wed Apr  6 06:22:32 2011
@@ -71,5 +71,3 @@ xerces.version=1.4.4
 
 #This property has to be updated synchronously with aop.xml
 aspectj.version=1.6.5
-
-mockito-all.version=1.8.2

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/DFSUtil.java Wed Apr  6 06:22:32 2011
@@ -533,4 +533,13 @@ public class DFSUtil {
     return new InetSocketAddress(address.substring(0, colon), 
         Integer.parseInt(address.substring(colon + 1)));
   }
+
+  /**
+   * Round bytes to GiB (gibibyte)
+   * @param bytes number of bytes
+   * @return number of GiB
+   */
+  public static int roundBytesToGB(long bytes) {
+    return Math.round((float)bytes/ 1024 / 1024 / 1024);
+  }
 }

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Wed Apr  6 06:22:32 2011
@@ -438,13 +438,13 @@ class BlockPoolSliceScanner {
         
         if (second) {
           totalScanErrors++;
-          datanode.getMetrics().blockVerificationFailures.inc(); 
+          datanode.getMetrics().incrBlockVerificationFailures();
           handleScanFailure(block);
           return;
         } 
       } finally {
         IOUtils.closeStream(blockSender);
-        datanode.getMetrics().blocksVerified.inc();
+        datanode.getMetrics().incrBlocksVerified();
         totalScans++;
       }
     }

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Wed Apr  6 06:22:32 2011
@@ -585,7 +585,7 @@ class BlockReceiver implements java.io.C
             offsetInBlock, lastChunkChecksum
           );
           
-          datanode.myMetrics.bytesWritten.inc(len);
+          datanode.metrics.incrBytesWritten(len);
         }
       } catch (IOException iex) {
         datanode.checkDiskError(iex);
@@ -648,7 +648,7 @@ class BlockReceiver implements java.io.C
         // Finalize the block. Does this fsync()?
         block.setNumBytes(replicaInfo.getNumBytes());
         datanode.data.finalizeBlock(block);
-        datanode.myMetrics.blocksWritten.inc();
+        datanode.metrics.incrBlocksWritten();
       }
 
     } catch (IOException ioe) {

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Apr  6 06:22:32 2011
@@ -114,6 +114,8 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
@@ -337,7 +339,7 @@ public class DataNode extends Configured
   long heartBeatInterval;
   private DataStorage storage = null;
   private HttpServer infoServer = null;
-  DataNodeMetrics myMetrics;
+  DataNodeMetrics metrics;
   private InetSocketAddress selfAddr;
   
   private static volatile DataNode datanodeObject = null;
@@ -903,7 +905,7 @@ public class DataNode extends Configured
         cmd = bpNamenode.blockReport(bpRegistration, blockPoolId, bReport
             .getBlockListAsLongs());
         long brTime = now() - brStartTime;
-        myMetrics.blockReports.inc(brTime);
+        metrics.addBlockReport(brTime);
         LOG.info("BlockReport of " + bReport.getNumberOfBlocks() +
             " blocks got processed in " + brTime + " msecs");
         //
@@ -1011,7 +1013,7 @@ public class DataNode extends Configured
             //
             lastHeartbeat = startTime;
             DatanodeCommand[] cmds = sendHeartBeat();
-            myMetrics.heartbeats.inc(now() - startTime);
+            metrics.addHeartbeat(now() - startTime);
             if (!processCommand(cmds))
               continue;
           }
@@ -1232,7 +1234,7 @@ public class DataNode extends Configured
       case DatanodeProtocol.DNA_TRANSFER:
         // Send a copy of a block to another datanode
         transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(), bcmd.getTargets());
-        myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
+        metrics.incrBlocksReplicated(bcmd.getBlocks().length);
         break;
       case DatanodeProtocol.DNA_INVALIDATE:
         //
@@ -1250,7 +1252,7 @@ public class DataNode extends Configured
           checkDiskError();
           throw e;
         }
-        myMetrics.blocksRemoved.inc(toDelete.length);
+        metrics.incrBlocksRemoved(toDelete.length);
         break;
       case DatanodeProtocol.DNA_SHUTDOWN:
         // shut down the data node
@@ -1348,7 +1350,7 @@ public class DataNode extends Configured
     startInfoServer(conf);
     initIpcServer(conf);
 
-    myMetrics = new DataNodeMetrics(conf, getMachineName());
+    metrics = DataNodeMetrics.create(conf, datanodeId.getName());
 
     blockPoolManager = new BlockPoolManager(conf);
   }
@@ -1398,14 +1400,7 @@ public class DataNode extends Configured
   }
   
   private void registerMXBean() {
-    // register MXBean
-    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
-    try {
-      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=DataNodeInfo");
-      mbs.registerMBean(this, mxbeanName);
-    } catch ( javax.management.JMException e ) {
-      LOG.warn("Failed to register NameNode MXBean", e);
-    }
+    MBeans.register("DataNode", "DataNodeInfo", this);
   }
   
   int getPort() {
@@ -1519,7 +1514,7 @@ public class DataNode extends Configured
   }
     
   DataNodeMetrics getMetrics() {
-    return myMetrics;
+    return metrics;
   }
   
   public static void setNewStorageID(DatanodeID dnId) {
@@ -1642,8 +1637,8 @@ public class DataNode extends Configured
     if (data != null) {
       data.shutdown();
     }
-    if (myMetrics != null) {
-      myMetrics.shutdown();
+    if (metrics != null) {
+      metrics.shutdown();
     }
   }
   
@@ -1716,6 +1711,25 @@ public class DataNode extends Configured
     return bpos.getUpgradeManager();
   }
 
+  private void processDistributedUpgradeCommand(UpgradeCommand comm
+                                               ) throws IOException {
+    assert upgradeManager != null : "DataNode.upgradeManager is null.";
+    upgradeManager.processUpgradeCommand(comm);
+  }
+
+  /**
+   * Start distributed upgrade if it should be initiated by the data-node.
+   */
+  private void startDistributedUpgradeIfNeeded() throws IOException {
+    UpgradeManagerDatanode um = DataNode.getDataNode().upgradeManager;
+    assert um != null : "DataNode.upgradeManager is null.";
+    if(!um.getUpgradeState())
+      return;
+    um.setUpgradeState(false, um.getUpgradeVersion());
+    um.startUpgrade();
+    return;
+  }
+
   private void transferBlock( ExtendedBlock block, 
                               DatanodeInfo xferTargets[] 
                               ) throws IOException {
@@ -1941,7 +1955,7 @@ public class DataNode extends Configured
    * @param delHint
    */
   void closeBlock(ExtendedBlock block, String delHint) {
-    myMetrics.blocksWritten.inc();
+    metrics.incrBlocksWritten();
     BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
     if(bpos != null) {
       bpos.notifyNamenodeReceivedBlock(block, delHint);
@@ -2080,7 +2094,7 @@ public class DataNode extends Configured
         conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
                  DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
     ArrayList<File> dirs = getDataDirsFromURIs(dataDirs, localFS, permission);
-
+    DefaultMetricsSystem.initialize("DataNode");
     if (dirs.size() > 0) {
       return new DataNode(conf, dirs, resources);
     }

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Wed Apr  6 06:22:32 2011
@@ -47,8 +47,8 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -178,11 +178,11 @@ class DataXceiver extends DataTransferPr
       SUCCESS.write(out); // send op status
       long read = blockSender.sendBlock(out, baseStream, null); // send data
       
-      datanode.myMetrics.bytesRead.inc((int) read);
-      datanode.myMetrics.blocksRead.inc();
+      datanode.metrics.incrBytesRead((int) read);
+      datanode.metrics.incrBlocksRead();
     } catch ( SocketException ignored ) {
       // Its ok for remote side to close the connection anytime.
-      datanode.myMetrics.blocksRead.inc();
+      datanode.metrics.incrBlocksRead();
     } catch ( IOException ioe ) {
       /* What exactly should we do here?
        * Earlier version shutdown() datanode if there is disk error.
@@ -198,9 +198,8 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics
-    updateDuration(datanode.myMetrics.readBlockOp);
-    updateCounter(datanode.myMetrics.readsFromLocalClient,
-                  datanode.myMetrics.readsFromRemoteClient);
+    datanode.metrics.addReadBlockOp(elapsed());
+    datanode.metrics.incrReadsFromClient(isLocal);
   }
 
   /**
@@ -395,9 +394,8 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics
-    updateDuration(datanode.myMetrics.writeBlockOp);
-    updateCounter(datanode.myMetrics.writesFromLocalClient,
-                  datanode.myMetrics.writesFromRemoteClient);
+    datanode.metrics.addWriteBlockOp(elapsed());
+    datanode.metrics.incrWritesFromClient(isLocal);
   }
 
   /**
@@ -461,7 +459,7 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics
-    updateDuration(datanode.myMetrics.blockChecksumOp);
+    datanode.metrics.addBlockChecksumOp(elapsed());
   }
 
   /**
@@ -513,8 +511,8 @@ class DataXceiver extends DataTransferPr
       long read = blockSender.sendBlock(reply, baseStream, 
                                         dataXceiverServer.balanceThrottler);
 
-      datanode.myMetrics.bytesRead.inc((int) read);
-      datanode.myMetrics.blocksRead.inc();
+      datanode.metrics.incrBytesRead((int) read);
+      datanode.metrics.incrBlocksRead();
       
       LOG.info("Copied block " + block + " to " + s.getRemoteSocketAddress());
     } catch (IOException ioe) {
@@ -534,7 +532,7 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics    
-    updateDuration(datanode.myMetrics.copyBlockOp);
+    datanode.metrics.addCopyBlockOp(elapsed());
   }
 
   /**
@@ -646,16 +644,16 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics
-    updateDuration(datanode.myMetrics.replaceBlockOp);
+    datanode.metrics.addReplaceBlockOp(elapsed());
   }
 
-  private void updateDuration(MetricsTimeVaryingRate mtvr) {
-    mtvr.inc(now() - opStartTime);
+  private long elapsed() {
+    return now() - opStartTime;
   }
 
-  private void updateCounter(MetricsTimeVaryingInt localCounter,
-      MetricsTimeVaryingInt remoteCounter) {
-    (isLocal? localCounter: remoteCounter).inc();
+  private void updateCounter(MutableCounterLong localCounter,
+      MutableCounterLong remoteCounter) {
+    (isLocal? localCounter: remoteCounter).incr();
   }
 
   /**

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Wed Apr  6 06:22:32 2011
@@ -59,7 +59,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
-import org.apache.hadoop.metrics.util.MBeanUtil;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.StringUtils;
@@ -2030,18 +2030,17 @@ public class FSDataset implements FSCons
     }
     try {
       bean = new StandardMBean(this,FSDatasetMBean.class);
-      mbeanName = MBeanUtil.registerMBean("DataNode", "FSDatasetState-" + storageName, bean);
+      mbeanName = MBeans.register("DataNode", "FSDatasetState-" + storageName, bean);
     } catch (NotCompliantMBeanException e) {
-      e.printStackTrace();
+      DataNode.LOG.warn("Error registering FSDatasetState MBean", e);
     }
- 
-    DataNode.LOG.info("Registered FSDatasetStatusMBean");
+    DataNode.LOG.info("Registered FSDatasetState MBean");
   }
 
   @Override // FSDatasetInterface
   public void shutdown() {
     if (mbeanName != null)
-      MBeanUtil.unregisterMBean(mbeanName);
+      MBeans.unregister(mbeanName);
     
     if (asyncDiskService != null) {
       asyncDiskService.shutdown();

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java Wed Apr  6 06:22:32 2011
@@ -17,23 +17,22 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.metrics;
 
+import java.util.Random;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.Updater;
-import org.apache.hadoop.metrics.jvm.JvmMetrics;
-import org.apache.hadoop.metrics.util.MetricsBase;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+import static org.apache.hadoop.metrics2.impl.MsInfo.*;
 
 /**
- * 
+ *
  * This class is for maintaining  the various DataNode statistics
  * and publishing them through the metrics interfaces.
  * This also registers the JMX MBean for RPC.
@@ -45,94 +44,119 @@ import org.apache.hadoop.hdfs.DFSConfigK
  *
  */
 @InterfaceAudience.Private
-public class DataNodeMetrics implements Updater {
-  private final MetricsRecord metricsRecord;
-  private DataNodeActivityMBean datanodeActivityMBean;
-  public MetricsRegistry registry = new MetricsRegistry();
-  
-  
-  public MetricsTimeVaryingLong bytesWritten = 
-                      new MetricsTimeVaryingLong("bytes_written", registry);
-  public MetricsTimeVaryingLong bytesRead = 
-                      new MetricsTimeVaryingLong("bytes_read", registry);
-  public MetricsTimeVaryingInt blocksWritten = 
-                      new MetricsTimeVaryingInt("blocks_written", registry);
-  public MetricsTimeVaryingInt blocksRead = 
-                      new MetricsTimeVaryingInt("blocks_read", registry);
-  public MetricsTimeVaryingInt blocksReplicated =
-                      new MetricsTimeVaryingInt("blocks_replicated", registry);
-  public MetricsTimeVaryingInt blocksRemoved =
-                       new MetricsTimeVaryingInt("blocks_removed", registry);
-  public MetricsTimeVaryingInt blocksVerified = 
-                        new MetricsTimeVaryingInt("blocks_verified", registry);
-  public MetricsTimeVaryingInt blockVerificationFailures =
-                       new MetricsTimeVaryingInt("block_verification_failures", registry);
-  
-  public MetricsTimeVaryingInt readsFromLocalClient = 
-                new MetricsTimeVaryingInt("reads_from_local_client", registry);
-  public MetricsTimeVaryingInt readsFromRemoteClient = 
-                new MetricsTimeVaryingInt("reads_from_remote_client", registry);
-  public MetricsTimeVaryingInt writesFromLocalClient = 
-              new MetricsTimeVaryingInt("writes_from_local_client", registry);
-  public MetricsTimeVaryingInt writesFromRemoteClient = 
-              new MetricsTimeVaryingInt("writes_from_remote_client", registry);
-  
-  public MetricsTimeVaryingRate readBlockOp = 
-                new MetricsTimeVaryingRate("readBlockOp", registry);
-  public MetricsTimeVaryingRate writeBlockOp = 
-                new MetricsTimeVaryingRate("writeBlockOp", registry);
-  public MetricsTimeVaryingRate blockChecksumOp = 
-                new MetricsTimeVaryingRate("blockChecksumOp", registry);
-  public MetricsTimeVaryingRate copyBlockOp = 
-                new MetricsTimeVaryingRate("copyBlockOp", registry);
-  public MetricsTimeVaryingRate replaceBlockOp = 
-                new MetricsTimeVaryingRate("replaceBlockOp", registry);
-  public MetricsTimeVaryingRate heartbeats = 
-                    new MetricsTimeVaryingRate("heartBeats", registry);
-  public MetricsTimeVaryingRate blockReports = 
-                    new MetricsTimeVaryingRate("blockReports", registry);
-
-    
-  public DataNodeMetrics(Configuration conf, String datanodeName) {
-    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); 
-    // Initiate reporting of Java VM metrics
-    JvmMetrics.init("DataNode", sessionId);
-    
-
-    // Now the MBean for the data node
-    datanodeActivityMBean = new DataNodeActivityMBean(registry, datanodeName);
-    
-    // Create record for DataNode metrics
-    MetricsContext context = MetricsUtil.getContext("dfs");
-    metricsRecord = MetricsUtil.createRecord(context, "datanode");
-    metricsRecord.setTag("sessionId", sessionId);
-    context.registerUpdater(this);
+@Metrics(about="DataNode metrics", context="dfs")
+public class DataNodeMetrics {
+
+  @Metric MutableCounterLong bytesWritten;
+  @Metric MutableCounterLong bytesRead;
+  @Metric MutableCounterLong blocksWritten;
+  @Metric MutableCounterLong blocksRead;
+  @Metric MutableCounterLong blocksReplicated;
+  @Metric MutableCounterLong blocksRemoved;
+  @Metric MutableCounterLong blocksVerified;
+  @Metric MutableCounterLong blockVerificationFailures;
+  @Metric MutableCounterLong readsFromLocalClient;
+  @Metric MutableCounterLong readsFromRemoteClient;
+  @Metric MutableCounterLong writesFromLocalClient;
+  @Metric MutableCounterLong writesFromRemoteClient;
+
+  @Metric MutableRate readBlockOp;
+  @Metric MutableRate writeBlockOp;
+  @Metric MutableRate blockChecksumOp;
+  @Metric MutableRate copyBlockOp;
+  @Metric MutableRate replaceBlockOp;
+  @Metric MutableRate heartbeats;
+  @Metric MutableRate blockReports;
+
+  final MetricsRegistry registry = new MetricsRegistry("datanode");
+  final String name;
+  static final Random rng = new Random();
+
+  public DataNodeMetrics(String name, String sessionId) {
+    this.name = name;
+    registry.tag(SessionId, sessionId);
+  }
+
+  public static DataNodeMetrics create(Configuration conf, String dnName) {
+    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    JvmMetrics.create("DataNode", sessionId, ms);
+    String name = "DataNodeActivity-"+ (dnName.isEmpty()
+        ? "UndefinedDataNodeName"+ rng.nextInt() : dnName.replace(':', '-'));
+    return ms.register(name, null, new DataNodeMetrics(name, sessionId));
+  }
+
+  public String name() { return name; }
+
+  public void addHeartbeat(long latency) {
+    heartbeats.add(latency);
   }
-  
+
+  public void addBlockReport(long latency) {
+    blockReports.add(latency);
+  }
+
+  public void incrBlocksReplicated(int delta) {
+    blocksReplicated.incr(delta);
+  }
+
+  public void incrBlocksWritten() {
+    blocksWritten.incr();
+  }
+
+  public void incrBlocksRemoved(int delta) {
+    blocksRemoved.incr(delta);
+  }
+
+  public void incrBytesWritten(int delta) {
+    bytesWritten.incr(delta);
+  }
+
+  public void incrBlockVerificationFailures() {
+    blockVerificationFailures.incr();
+  }
+
+  public void incrBlocksVerified() {
+    blocksVerified.incr();
+  }
+
+  public void addReadBlockOp(long latency) {
+    readBlockOp.add(latency);
+  }
+
+  public void addWriteBlockOp(long latency) {
+    writeBlockOp.add(latency);
+  }
+
+  public void addReplaceBlockOp(long latency) {
+    replaceBlockOp.add(latency);
+  }
+
+  public void addCopyBlockOp(long latency) {
+    copyBlockOp.add(latency);
+  }
+
+  public void addBlockChecksumOp(long latency) {
+    blockChecksumOp.add(latency);
+  }
+
+  public void incrBytesRead(int delta) {
+    bytesRead.incr(delta);
+  }
+
+  public void incrBlocksRead() {
+    blocksRead.incr();
+  }
+
   public void shutdown() {
-    if (datanodeActivityMBean != null) 
-      datanodeActivityMBean.shutdown();
+    DefaultMetricsSystem.shutdown();
   }
-    
-  /**
-   * Since this object is a registered updater, this method will be called
-   * periodically, e.g. every 5 seconds.
-   */
-  public void doUpdates(MetricsContext unused) {
-    synchronized (this) {
-      for (MetricsBase m : registry.getMetricsList()) {
-        m.pushMetric(metricsRecord);
-      }
-    }
-    metricsRecord.update();
-  }
-  public void resetAllMinMax() {
-    readBlockOp.resetMinMax();
-    writeBlockOp.resetMinMax();
-    blockChecksumOp.resetMinMax();
-    copyBlockOp.resetMinMax();
-    replaceBlockOp.resetMinMax();
-    heartbeats.resetMinMax();
-    blockReports.resetMinMax();
+
+  public void incrWritesFromClient(boolean local) {
+    (local ? writesFromLocalClient : writesFromRemoteClient).incr();
+  }
+
+  public void incrReadsFromClient(boolean local) {
+    (local ? readsFromLocalClient : readsFromRemoteClient).incr();
   }
 }

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed Apr  6 06:22:32 2011
@@ -168,7 +168,7 @@ class FSDirectory implements Closeable {
 
   private void incrDeletedFileCount(int count) {
     if (getFSNamesystem() != null)
-      NameNode.getNameNodeMetrics().numFilesDeleted.inc(count);
+      NameNode.getNameNodeMetrics().incrFilesDeleted(count);
   }
     
   /**
@@ -1471,7 +1471,7 @@ class FSDirectory implements Closeable {
         // Directory creation also count towards FilesCreated
         // to match count of FilesDeleted metric.
         if (getFSNamesystem() != null)
-          NameNode.getNameNodeMetrics().numFilesCreated.inc();
+          NameNode.getNameNodeMetrics().incrFilesCreated();
         fsImage.getEditLog().logMkDir(cur, inodes[i]);
         if(NameNode.stateChangeLog.isDebugEnabled()) {
           NameNode.stateChangeLog.debug(

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Wed Apr  6 06:22:32 2011
@@ -457,7 +457,7 @@ public class FSEditLog {
     numTransactions++;
     totalTimeTransactions += (end-start);
     if (metrics != null) // Metrics is non-null only when used inside name node
-      metrics.transactions.inc((end-start));
+      metrics.addTransaction(end-start);
   }
 
   /**
@@ -534,7 +534,7 @@ public class FSEditLog {
         if (mytxid <= synctxid) {
           numTransactionsBatchedInSync++;
           if (metrics != null) // Metrics is non-null only when used inside name node
-            metrics.transactionsBatchedInSync.inc();
+            metrics.incrTransactionsBatchedInSync();
           return;
         }
      
@@ -585,7 +585,7 @@ public class FSEditLog {
       processIOError(errorStreams, true);
   
       if (metrics != null) // Metrics non-null only when used inside name node
-        metrics.syncs.inc(elapsed);
+        metrics.addSync(elapsed);
     } finally {
       // Prevent RuntimeException from blocking other log edit sync 
       synchronized (this) {

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Apr  6 06:22:32 2011
@@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Util;
 import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
-import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
@@ -45,7 +44,6 @@ import org.apache.hadoop.security.token.
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.util.*;
-import org.apache.hadoop.metrics.util.MBeanUtil;
 import org.apache.hadoop.net.CachedDNSToSwitchMapping;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetworkTopology;
@@ -85,6 +83,11 @@ import org.apache.hadoop.fs.permission.*
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterInt;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.mortbay.util.ajax.JSON;
 
 import java.io.BufferedWriter;
@@ -102,11 +105,9 @@ import java.net.URI;
 import java.util.*;
 import java.util.Map.Entry;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
-
 import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
-import javax.management.MBeanServer;
 
 /***************************************************
  * FSNamesystem does the actual bookkeeping work for the
@@ -121,8 +122,9 @@ import javax.management.MBeanServer;
  * 5)  LRU cache of updated-heartbeat machines
  ***************************************************/
 @InterfaceAudience.Private
-public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterStats,
-    NameNodeMXBean {
+@Metrics(context="dfs")
+public class FSNamesystem implements FSConstants, FSNamesystemMBean,
+    FSClusterStats, NameNodeMXBean {
   public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
 
   private static final ThreadLocal<StringBuilder> auditBuffer =
@@ -176,7 +178,7 @@ public class FSNamesystem implements FSC
   private String supergroup;
   private PermissionStatus defaultPermission;
   // FSNamesystemMetrics counter variables
-  private FSNamesystemMetrics myFSMetrics;
+  @Metric private MutableCounterInt expiredHeartbeats;
   private long capacityTotal = 0L, capacityUsed = 0L, capacityRemaining = 0L;
   private long blockPoolUsed = 0L;
   private int totalLoad = 0;
@@ -307,7 +309,7 @@ public class FSNamesystem implements FSC
     this.fsLock = new ReentrantReadWriteLock(true); // fair locking
     setConfigurationParameters(conf);
     dtSecretManager = createDelegationTokenSecretManager(conf);
-    this.registerMBean(conf); // register the MBean for the FSNamesystemStutus
+    this.registerMBean(); // register the MBean for the FSNamesystemState
     if(fsImage == null) {
       this.dir = new FSDirectory(this, conf);
       StartupOption startOpt = NameNode.getStartupOption(conf);
@@ -315,7 +317,7 @@ public class FSNamesystem implements FSC
                            getNamespaceEditsDirs(conf), startOpt);
       long timeTakenToLoadFSImage = now() - systemStart;
       LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
-      NameNode.getNameNodeMetrics().fsImageLoadTime.set(
+      NameNode.getNameNodeMetrics().setFsImageLoadTime(
                                 (int) timeTakenToLoadFSImage);
     } else {
       this.dir = new FSDirectory(fsImage, this, conf);
@@ -368,6 +370,7 @@ public class FSNamesystem implements FSC
       dnsToSwitchMapping.resolve(new ArrayList<String>(hostsReader.getHosts()));
     }
     registerMXBean();
+    DefaultMetricsSystem.instance().register(this);
   }
 
   public static Collection<URI> getNamespaceDirs(Configuration conf) {
@@ -3049,7 +3052,7 @@ public class FSNamesystem implements FSC
              it.hasNext();) {
           DatanodeDescriptor nodeInfo = it.next();
           if (isDatanodeDead(nodeInfo)) {
-            myFSMetrics.numExpiredHeartbeats.inc();
+            expiredHeartbeats.incr();
             foundDead = true;
             nodeID = nodeInfo;
             break;
@@ -3106,7 +3109,7 @@ public class FSNamesystem implements FSC
     }
 
     blockManager.processReport(node, newReport);
-    NameNode.getNameNodeMetrics().blockReport.inc((int) (now() - startTime));
+    NameNode.getNameNodeMetrics().addBlockReport((int) (now() - startTime));
     } finally {
       writeUnlock();
     }
@@ -3250,6 +3253,7 @@ public class FSNamesystem implements FSC
     }
   }
 
+  @Metric({"MissingBlocks", "Number of missing blocks"})
   public long getMissingBlocksCount() {
     // not locking
     return blockManager.getMissingBlocksCount();
@@ -3276,6 +3280,11 @@ public class FSNamesystem implements FSC
     }
   }
 
+  @Metric
+  public float getCapacityTotalGB() {
+    return DFSUtil.roundBytesToGB(getCapacityTotal());
+  }
+
   /**
    * Total used space by data nodes
    */
@@ -3285,6 +3294,12 @@ public class FSNamesystem implements FSC
       return capacityUsed;
     }
   }
+
+  @Metric
+  public float getCapacityUsedGB() {
+    return DFSUtil.roundBytesToGB(getCapacityUsed());
+  }
+
   /**
    * Total used space by data nodes as percentage of total capacity
    */
@@ -3313,6 +3328,11 @@ public class FSNamesystem implements FSC
     }
   }
 
+  @Metric
+  public float getCapacityRemainingGB() {
+    return DFSUtil.roundBytesToGB(getCapacityRemaining());
+  }
+
   /**
    * Total remaining space by data nodes as percentage of total capacity
    */
@@ -3325,6 +3345,7 @@ public class FSNamesystem implements FSC
    * Total number of connections.
    */
   @Override // FSNamesystemMBean
+  @Metric
   public int getTotalLoad() {
     synchronized (heartbeats) {
       return this.totalLoad;
@@ -3862,7 +3883,7 @@ public class FSNamesystem implements FSC
       long timeInSafemode = now() - systemStart;
       NameNode.stateChangeLog.info("STATE* Leaving safe mode after " 
                                     + timeInSafemode/1000 + " secs.");
-      NameNode.getNameNodeMetrics().safeModeTime.set((int) timeInSafemode);
+      NameNode.getNameNodeMetrics().setSafeModeTime((int) timeInSafemode);
       
       if (reached >= 0) {
         NameNode.stateChangeLog.info("STATE* Safe mode is OFF."); 
@@ -4136,6 +4157,7 @@ public class FSNamesystem implements FSC
    * Get the total number of blocks in the system. 
    */
   @Override // FSNamesystemMBean
+  @Metric
   public long getBlocksTotal() {
     return blockManager.getTotalBlocks();
   }
@@ -4407,38 +4429,45 @@ public class FSNamesystem implements FSC
   }
 
   @Override // FSNamesystemMBean
+  @Metric
   public long getFilesTotal() {
     return this.dir.totalInodes();
   }
 
   @Override // FSNamesystemMBean
+  @Metric
   public long getPendingReplicationBlocks() {
     return blockManager.pendingReplicationBlocksCount;
   }
 
   @Override // FSNamesystemMBean
+  @Metric
   public long getUnderReplicatedBlocks() {
     return blockManager.underReplicatedBlocksCount;
   }
 
-  /** Returns number of blocks with corrupt replicas */
+  @Metric({"CorruptBlocks", "Number of blocks with corrupt replicas"})
   public long getCorruptReplicaBlocks() {
     return blockManager.corruptReplicaBlocksCount;
   }
 
   @Override // FSNamesystemMBean
+  @Metric
   public long getScheduledReplicationBlocks() {
     return blockManager.scheduledReplicationBlocksCount;
   }
 
+  @Metric
   public long getPendingDeletionBlocks() {
     return blockManager.pendingDeletionBlocksCount;
   }
 
+  @Metric
   public long getExcessBlocks() {
     return blockManager.excessBlocksCount;
   }
-  
+
+  @Metric
   public int getBlockCapacity() {
     return blockManager.getCapacity();
   }
@@ -4453,27 +4482,15 @@ public class FSNamesystem implements FSC
    * Register the FSNamesystem MBean using the name
    *        "hadoop:service=NameNode,name=FSNamesystemState"
    */
-  void registerMBean(Configuration conf) {
-    // We wrap to bypass standard mbean naming convention.
-    // This wraping can be removed in java 6 as it is more flexible in 
-    // package naming for mbeans and their impl.
-    StandardMBean bean;
-    try {
-      myFSMetrics = new FSNamesystemMetrics(this, conf);
-      bean = new StandardMBean(this,FSNamesystemMBean.class);
-      mbeanName = MBeanUtil.registerMBean("NameNode", "FSNamesystemState", bean);
+  void registerMBean() {
+    // We can only implement one MXBean interface, so we keep the old one.
+    try {
+      StandardMBean bean = new StandardMBean(this, FSNamesystemMBean.class);
+      mbeanName = MBeans.register("NameNode", "FSNamesystemState", bean);
     } catch (NotCompliantMBeanException e) {
-      e.printStackTrace();
+      throw new RuntimeException("Bad MBean setup", e);
     }
-
-    LOG.info("Registered FSNamesystemStatusMBean");
-  }
-
-  /**
-   * get FSNamesystemMetrics
-   */
-  public FSNamesystemMetrics getFSNamesystemMetrics() {
-    return myFSMetrics;
+    LOG.info("Registered FSNamesystemState MBean");
   }
 
   /**
@@ -4481,7 +4498,7 @@ public class FSNamesystem implements FSC
    */
   public void shutdown() {
     if (mbeanName != null)
-      MBeanUtil.unregisterMBean(mbeanName);
+      MBeans.unregister(mbeanName);
   }
   
 
@@ -5123,18 +5140,12 @@ public class FSNamesystem implements FSC
                     "fsck", src, null, null);
     }
   }
+
   /**
    * Register NameNodeMXBean
    */
   private void registerMXBean() {
-    // register MXBean
-    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
-    try {
-      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=NameNodeInfo");
-      mbs.registerMBean(this, mxbeanName);
-    } catch ( javax.management.JMException e ) {
-      LOG.warn("Failed to register NameNodeMXBean", e);
-    }
+    MBeans.register("NameNode", "NameNodeInfo", this);
   }
 
   /**

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Apr  6 06:22:32 2011
@@ -83,6 +83,7 @@ import org.apache.hadoop.io.EnumSetWrita
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
@@ -199,7 +200,7 @@ public class NameNode implements Namenod
     format(conf, false);
   }
 
-  static NameNodeMetrics myMetrics;
+  static NameNodeMetrics metrics;
 
   /** Return the {@link FSNamesystem} object.
    * @return {@link FSNamesystem} object.
@@ -209,11 +210,11 @@ public class NameNode implements Namenod
   }
 
   static void initMetrics(Configuration conf, NamenodeRole role) {
-    myMetrics = new NameNodeMetrics(conf, role);
+    metrics = NameNodeMetrics.create(conf, role);
   }
 
   public static NameNodeMetrics getNameNodeMetrics() {
-    return myMetrics;
+    return metrics;
   }
   
   public static InetSocketAddress getAddress(String address) {
@@ -629,8 +630,8 @@ public class NameNode implements Namenod
     if(emptier != null) emptier.interrupt();
     if(server != null) server.stop();
     if(serviceRpcServer != null) serviceRpcServer.stop();
-    if (myMetrics != null) {
-      myMetrics.shutdown();
+    if (metrics != null) {
+      metrics.shutdown();
     }
     if (namesystem != null) {
       namesystem.shutdown();
@@ -740,7 +741,7 @@ public class NameNode implements Namenod
                                           long offset, 
                                           long length) 
       throws IOException {
-    myMetrics.numGetBlockLocations.inc();
+    metrics.incrGetBlockLocations();
     return namesystem.getBlockLocations(getClientMachine(), 
                                         src, offset, length);
   }
@@ -779,8 +780,8 @@ public class NameNode implements Namenod
         new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(),
             null, masked),
         clientName, clientMachine, flag.get(), createParent, replication, blockSize);
-    myMetrics.numFilesCreated.inc();
-    myMetrics.numCreateFileOps.inc();
+    metrics.incrFilesCreated();
+    metrics.incrCreateFileOps();
   }
 
   /** {@inheritDoc} */
@@ -792,7 +793,7 @@ public class NameNode implements Namenod
           +src+" for "+clientName+" at "+clientMachine);
     }
     LocatedBlock info = namesystem.appendFile(src, clientName, clientMachine);
-    myMetrics.numFilesAppended.inc();
+    metrics.incrFilesAppended();
     return info;
   }
 
@@ -834,7 +835,7 @@ public class NameNode implements Namenod
     LocatedBlock locatedBlock = 
       namesystem.getAdditionalBlock(src, clientName, previous, excludedNodesSet);
     if (locatedBlock != null)
-      myMetrics.numAddBlockOps.inc();
+      metrics.incrAddBlockOps();
     return locatedBlock;
   }
 
@@ -922,7 +923,7 @@ public class NameNode implements Namenod
     }
     boolean ret = namesystem.renameTo(src, dst);
     if (ret) {
-      myMetrics.numFilesRenamed.inc();
+      metrics.incrFilesRenamed();
     }
     return ret;
   }
@@ -946,7 +947,7 @@ public class NameNode implements Namenod
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
     namesystem.renameTo(src, dst, options);
-    myMetrics.numFilesRenamed.inc();
+    metrics.incrFilesRenamed();
   }
 
   /**
@@ -964,7 +965,7 @@ public class NameNode implements Namenod
     }
     boolean ret = namesystem.delete(src, recursive);
     if (ret) 
-      myMetrics.numDeleteFileOps.inc();
+      metrics.incrDeleteFileOps();
     return ret;
   }
 
@@ -1010,8 +1011,8 @@ public class NameNode implements Namenod
     DirectoryListing files = namesystem.getListing(
         src, startAfter, needLocation);
     if (files != null) {
-      myMetrics.numGetListingOps.inc();
-      myMetrics.numFilesInGetListingOps.inc(files.getPartialListing().length);
+      metrics.incrGetListingOps();
+      metrics.incrFilesInGetListingOps(files.getPartialListing().length);
     }
     return files;
   }
@@ -1023,7 +1024,7 @@ public class NameNode implements Namenod
    *         or null if file not found
    */
   public HdfsFileStatus getFileInfo(String src)  throws IOException {
-    myMetrics.numFileInfoOps.inc();
+    metrics.incrFileInfoOps();
     return namesystem.getFileInfo(src, true);
   }
 
@@ -1035,7 +1036,7 @@ public class NameNode implements Namenod
    *         or null if file not found
    */
   public HdfsFileStatus getFileLinkInfo(String src) throws IOException { 
-    myMetrics.numFileInfoOps.inc();
+    metrics.incrFileInfoOps();
     return namesystem.getFileInfo(src, false);
   }
   
@@ -1176,7 +1177,7 @@ public class NameNode implements Namenod
   public void createSymlink(String target, String link, FsPermission dirPerms, 
                             boolean createParent) 
       throws IOException {
-    myMetrics.numcreateSymlinkOps.inc();
+    metrics.incrCreateSymlinkOps();
     /* We enforce the MAX_PATH_LENGTH limit even though a symlink target 
      * URI may refer to a non-HDFS file system. 
      */
@@ -1195,7 +1196,7 @@ public class NameNode implements Namenod
 
   /** @inheritDoc */
   public String getLinkTarget(String path) throws IOException {
-    myMetrics.numgetLinkTargetOps.inc();
+    metrics.incrGetLinkTargetOps();
     /* Resolves the first symlink in the given path, returning a
      * new path consisting of the target of the symlink and any 
      * remaining path components from the original path.
@@ -1582,8 +1583,11 @@ public class NameNode implements Namenod
         return null; // avoid javac warning
       case BACKUP:
       case CHECKPOINT:
-        return new BackupNode(conf, startOpt.toNodeRole());
+        NamenodeRole role = startOpt.toNodeRole();
+        DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
+        return new BackupNode(conf, role);
       default:
+        DefaultMetricsSystem.initialize("NameNode");
         return new NameNode(conf);
     }
   }

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Wed Apr  6 06:22:32 2011
@@ -46,7 +46,8 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.metrics.jvm.JvmMetrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
 import org.apache.hadoop.security.SecurityUtil;
@@ -149,7 +150,9 @@ public class SecondaryNameNode implement
           infoBindAddress);
     }
     // initiate Java VM metrics
-    JvmMetrics.init("SecondaryNameNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY));
+    JvmMetrics.create("SecondaryNameNode",
+        conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
+        DefaultMetricsSystem.instance());
     
     // Create connection to the namenode.
     shouldRun = true;

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java Wed Apr  6 06:22:32 2011
@@ -17,126 +17,143 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.metrics;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.metrics.*;
-import org.apache.hadoop.metrics.jvm.JvmMetrics;
-import org.apache.hadoop.metrics.util.MetricsBase;
-import org.apache.hadoop.metrics.util.MetricsIntValue;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import static org.apache.hadoop.metrics2.impl.MsInfo.*;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
 
 /**
- * 
  * This class is for maintaining  the various NameNode activity statistics
  * and publishing them through the metrics interfaces.
- * This also registers the JMX MBean for RPC.
- * <p>
- * This class has a number of metrics variables that are publicly accessible;
- * these variables (objects) have methods to update their values;
- *  for example:
- *  <p> {@link #syncs}.inc()
- *
  */
 @InterfaceAudience.Private
-public class NameNodeMetrics implements Updater {
-    private static Log log = LogFactory.getLog(NameNodeMetrics.class);
-    private final MetricsRecord metricsRecord;
-    public MetricsRegistry registry = new MetricsRegistry();
-    
-    private NameNodeActivityMBean namenodeActivityMBean;
-    
-    public MetricsTimeVaryingInt numCreateFileOps = 
-                    new MetricsTimeVaryingInt("CreateFileOps", registry);
-    public MetricsTimeVaryingInt numFilesCreated =
-                          new MetricsTimeVaryingInt("FilesCreated", registry);
-    public MetricsTimeVaryingInt numFilesAppended =
-                          new MetricsTimeVaryingInt("FilesAppended", registry);
-    public MetricsTimeVaryingInt numGetBlockLocations = 
-                    new MetricsTimeVaryingInt("GetBlockLocations", registry);
-    public MetricsTimeVaryingInt numFilesRenamed =
-                    new MetricsTimeVaryingInt("FilesRenamed", registry);
-    public MetricsTimeVaryingInt numGetListingOps = 
-                    new MetricsTimeVaryingInt("GetListingOps", registry);
-    public MetricsTimeVaryingInt numDeleteFileOps = 
-                          new MetricsTimeVaryingInt("DeleteFileOps", registry);
-    public MetricsTimeVaryingInt numFilesDeleted = new MetricsTimeVaryingInt(
-        "FilesDeleted", registry, 
-        "Number of files and directories deleted by delete or rename operation");
-    public MetricsTimeVaryingInt numFileInfoOps =
-                          new MetricsTimeVaryingInt("FileInfoOps", registry);
-    public MetricsTimeVaryingInt numAddBlockOps = 
-                          new MetricsTimeVaryingInt("AddBlockOps", registry);
-    public MetricsTimeVaryingInt numcreateSymlinkOps = 
-                          new MetricsTimeVaryingInt("CreateSymlinkOps", registry);
-    public MetricsTimeVaryingInt numgetLinkTargetOps = 
-                          new MetricsTimeVaryingInt("GetLinkTargetOps", registry);
-
-    public MetricsTimeVaryingRate transactions = new MetricsTimeVaryingRate(
-      "Transactions", registry, "Journal Transaction");
-    public MetricsTimeVaryingRate syncs =
-                    new MetricsTimeVaryingRate("Syncs", registry, "Journal Sync");
-    public MetricsTimeVaryingInt transactionsBatchedInSync = new MetricsTimeVaryingInt(
-      "JournalTransactionsBatchedInSync", registry,
-      "Journal Transactions Batched In Sync");
-    public MetricsTimeVaryingRate blockReport =
-                    new MetricsTimeVaryingRate("blockReport", registry, "Block Report");
-    public MetricsIntValue safeModeTime =
-                    new MetricsIntValue("SafemodeTime", registry, "Duration in SafeMode at Startup");
-    public MetricsIntValue fsImageLoadTime = 
-                    new MetricsIntValue("fsImageLoadTime", registry, "Time loading FS Image at Startup");
-    public MetricsIntValue numBlocksCorrupted =
-                    new MetricsIntValue("BlocksCorrupted", registry);
-    public MetricsTimeVaryingInt numFilesInGetListingOps = 
-                    new MetricsTimeVaryingInt("FilesInGetListingOps", registry);
-
-      
-    public NameNodeMetrics(Configuration conf, NamenodeRole nameNodeRole) {
-      String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
-      // Initiate Java VM metrics
-      String processName = nameNodeRole.toString();
-      JvmMetrics.init(processName, sessionId);
-
-      // Now the Mbean for the name node - this also registers the MBean
-      namenodeActivityMBean = new NameNodeActivityMBean(registry);
-      
-      // Create a record for NameNode metrics
-      MetricsContext metricsContext = MetricsUtil.getContext("dfs");
-      metricsRecord = MetricsUtil.createRecord(metricsContext, processName.toLowerCase());
-      metricsRecord.setTag("sessionId", sessionId);
-      metricsContext.registerUpdater(this);
-      log.info("Initializing NameNodeMeterics using context object:" +
-                metricsContext.getClass().getName());
-    }
-    
-
-    
-    public void shutdown() {
-      if (namenodeActivityMBean != null) 
-        namenodeActivityMBean.shutdown();
-    }
-      
-    /**
-     * Since this object is a registered updater, this method will be called
-     * periodically, e.g. every 5 seconds.
-     */
-    public void doUpdates(MetricsContext unused) {
-      synchronized (this) {
-        for (MetricsBase m : registry.getMetricsList()) {
-          m.pushMetric(metricsRecord);
-        }
-      }
-      metricsRecord.update();
-    }
-
-    public void resetAllMinMax() {
-      transactions.resetMinMax();
-      syncs.resetMinMax();
-      blockReport.resetMinMax();
-    }
+@Metrics(name="NameNodeActivity", about="NameNode metrics", context="dfs")
+public class NameNodeMetrics {
+  final MetricsRegistry registry = new MetricsRegistry("namenode");
+
+  @Metric MutableCounterLong createFileOps;
+  @Metric MutableCounterLong filesCreated;
+  @Metric MutableCounterLong filesAppended;
+  @Metric MutableCounterLong getBlockLocations;
+  @Metric MutableCounterLong filesRenamed;
+  @Metric MutableCounterLong getListingOps;
+  @Metric MutableCounterLong deleteFileOps;
+  @Metric("Number of files/dirs deleted by delete or rename operations")
+  MutableCounterLong filesDeleted;
+  @Metric MutableCounterLong fileInfoOps;
+  @Metric MutableCounterLong addBlockOps;
+  @Metric MutableCounterLong createSymlinkOps;
+  @Metric MutableCounterLong getLinkTargetOps;
+  @Metric MutableCounterLong filesInGetListingOps;
+
+  @Metric("Journal transactions") MutableRate transactions;
+  @Metric("Journal syncs") MutableRate syncs;
+  @Metric("Journal transactions batched in sync")
+  MutableCounterLong transactionsBatchedInSync;
+  @Metric("Block report") MutableRate blockReport;
+
+  @Metric("Duration in SafeMode at startup") MutableGaugeInt safeModeTime;
+  @Metric("Time loading FS Image at startup") MutableGaugeInt fsImageLoadTime;
+
+  NameNodeMetrics(String processName, String sessionId) {
+    registry.tag(ProcessName, processName).tag(SessionId, sessionId);
+  }
+
+  public static NameNodeMetrics create(Configuration conf, NamenodeRole r) {
+    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+    String processName = r.toString();
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    JvmMetrics.create(processName, sessionId, ms);
+    return ms.register(new NameNodeMetrics(processName, sessionId));
+  }
+
+  public void shutdown() {
+    DefaultMetricsSystem.shutdown();
+  }
+
+  public void incrGetBlockLocations() {
+    getBlockLocations.incr();
+  }
+
+  public void incrFilesCreated() {
+    filesCreated.incr();
+  }
+
+  public void incrCreateFileOps() {
+    createFileOps.incr();
+  }
+
+  public void incrFilesAppended() {
+    filesAppended.incr();
+  }
+
+  public void incrAddBlockOps() {
+    addBlockOps.incr();
+  }
+
+  public void incrFilesRenamed() {
+    filesRenamed.incr();
+  }
+
+  public void incrFilesDeleted(int delta) {
+    filesDeleted.incr(delta);
+  }
+
+  public void incrDeleteFileOps() {
+    deleteFileOps.incr();
+  }
+
+  public void incrGetListingOps() {
+    getListingOps.incr();
+  }
+
+  public void incrFilesInGetListingOps(int delta) {
+    filesInGetListingOps.incr(delta);
+  }
+
+  public void incrFileInfoOps() {
+    fileInfoOps.incr();
+  }
+
+  public void incrCreateSymlinkOps() {
+    createSymlinkOps.incr();
+  }
+
+  public void incrGetLinkTargetOps() {
+    getLinkTargetOps.incr();
+  }
+
+  public void addTransaction(long latency) {
+    transactions.add(latency);
+  }
+
+  public void incrTransactionsBatchedInSync() {
+    transactionsBatchedInSync.incr();
+  }
+
+  public void addSync(long elapsed) {
+    syncs.add(elapsed);
+  }
+
+  public void setFsImageLoadTime(long elapsed) {
+    fsImageLoadTime.set((int) elapsed);
+  }
+
+  public void addBlockReport(long latency) {
+    blockReport.add(latency);
+  }
+
+  public void setSafeModeTime(long elapsed) {
+    safeModeTime.set((int) elapsed);
+  }
 }

Modified: hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/tools/JMXGet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/tools/JMXGet.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/tools/JMXGet.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/java/org/apache/hadoop/hdfs/tools/JMXGet.java Wed Apr  6 06:22:32 2011
@@ -126,7 +126,7 @@ public class JMXGet {
           continue;
         }
       }
-      err("Info: key = " + key + "; val = " + val);
+      err("Info: key = " + key + "; val = "+ val.getClass() +":"+ val);
       break;
     }
 
@@ -193,7 +193,7 @@ public class JMXGet {
     err("\nMBean count = " + mbsc.getMBeanCount());
 
     // Query MBean names for specific domain "hadoop" and service
-    ObjectName query = new ObjectName("hadoop:service=" + service + ",*");
+    ObjectName query = new ObjectName("Hadoop:service=" + service + ",*");
     hadoopObjectNames = new ArrayList<ObjectName>(5);
     err("\nQuery MBeanServer MBeans:");
     Set<ObjectName> names = new TreeSet<ObjectName>(mbsc
@@ -201,7 +201,7 @@ public class JMXGet {
 
     for (ObjectName name : names) {
       hadoopObjectNames.add(name);
-      err("hadoop services: " + name);
+      err("Hadoop service: " + name);
     }
 
   }

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Apr  6 06:22:32 2011
@@ -61,6 +61,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.StaticMapping;
@@ -82,6 +83,8 @@ public class MiniDFSCluster {
   private static final String NAMESERVICE_ID_PREFIX = "nameserviceId";
   private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class);
 
+  { DefaultMetricsSystem.setMiniClusterMode(true); }
+
   /**
    * Class to construct instances of MiniDFSClusters with specific options.
    */

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java Wed Apr  6 06:22:32 2011
@@ -26,7 +26,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 /**
  * This test ensures the all types of data node report work correctly.
@@ -77,9 +77,7 @@ public class TestDatanodeReport extends 
                    NUM_OF_DATANODES);
 
       Thread.sleep(5000);
-      FSNamesystemMetrics fsMetrics = 
-                     cluster.getNamesystem().getFSNamesystemMetrics();
-      assertEquals(1,fsMetrics.numExpiredHeartbeats.getCurrentIntervalValue());
+      assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
     }finally {
       cluster.shutdown();
     }

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Wed Apr  6 06:22:32 2011
@@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
-import org.apache.hadoop.metrics.util.MBeanUtil;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 
@@ -884,18 +884,17 @@ public class SimulatedFSDataset  impleme
 
     try {
       bean = new StandardMBean(this,FSDatasetMBean.class);
-      mbeanName = MBeanUtil.registerMBean("DataNode",
-          "FSDatasetState-" + storageId, bean);
+      mbeanName = MBeans.register("DataNode", "FSDatasetState-"+
+                                  storageId, bean);
     } catch (NotCompliantMBeanException e) {
-      e.printStackTrace();
+      DataNode.LOG.warn("Error registering FSDatasetState MBean", e);
     }
  
-    DataNode.LOG.info("Registered FSDatasetStatusMBean");
+    DataNode.LOG.info("Registered FSDatasetState MBean");
   }
 
   public void shutdown() {
-    if (mbeanName != null)
-      MBeanUtil.unregisterMBean(mbeanName);
+    if (mbeanName != null) MBeans.unregister(mbeanName);
   }
 
   public String getStorageInfo() {

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java Wed Apr  6 06:22:32 2011
@@ -43,7 +43,8 @@ public class TestDataNodeMXBean {
       DataNode datanode = datanodes.get(0);
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
-      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=DataNodeInfo");
+      ObjectName mxbeanName = new ObjectName(
+          "Hadoop:service=DataNode,name=DataNodeInfo");
       // get attribute "ClusterId"
       String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
       Assert.assertEquals(datanode.getClusterId(), clusterId);

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java Wed Apr  6 06:22:32 2011
@@ -26,6 +26,9 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import static org.apache.hadoop.test.MetricsAsserts.*;
+
 import junit.framework.TestCase;
 
 public class TestDataNodeMetrics extends TestCase {
@@ -42,8 +45,8 @@ public class TestDataNodeMetrics extends
       List<DataNode> datanodes = cluster.getDataNodes();
       assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
-      DataNodeMetrics metrics = datanode.getMetrics();
-      assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
+      MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
+      assertCounter("BytesWritten", LONG_FILE_LEN, rb);
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Wed Apr  6 06:22:32 2011
@@ -45,7 +45,8 @@ public class TestNameNodeMXBean {
       FSNamesystem fsn = cluster.getNameNode().namesystem;
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
-      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=NameNodeInfo");
+      ObjectName mxbeanName = new ObjectName(
+          "Hadoop:service=NameNode,name=NameNodeInfo");
       // get attribute "ClusterId"
       String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
       Assert.assertEquals(fsn.getClusterId(), clusterId);

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java Wed Apr  6 06:22:32 2011
@@ -28,15 +28,16 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 /**
  * Test case for FilesInGetListingOps metric in Namenode
  */
 public class TestNNMetricFilesInGetListingOps extends TestCase {
   private static final Configuration CONF = new HdfsConfiguration();
+  private static final String NN_METRICS = "NameNodeActivity";
   static {
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
     CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
@@ -45,7 +46,6 @@ public class TestNNMetricFilesInGetListi
   }
      
   private MiniDFSCluster cluster;
-  private NameNodeMetrics nnMetrics;
   private DistributedFileSystem fs;
   private Random rand = new Random();
 
@@ -54,7 +54,6 @@ public class TestNNMetricFilesInGetListi
     cluster = new MiniDFSCluster.Builder(CONF).build();
     cluster.waitActive();
     cluster.getNameNode();
-    nnMetrics = NameNode.getNameNodeMetrics();
     fs = (DistributedFileSystem) cluster.getFileSystem();
   }
 
@@ -76,9 +75,9 @@ public class TestNNMetricFilesInGetListi
     createFile("/tmp2/t1", 3200, (short)3);
     createFile("/tmp2/t2", 3200, (short)3);
     cluster.getNameNode().getListing("/tmp1", HdfsFileStatus.EMPTY_NAME, false);
-    assertEquals(2,nnMetrics.numFilesInGetListingOps.getCurrentIntervalValue());
-    cluster.getNameNode().getListing("/tmp2", HdfsFileStatus.EMPTY_NAME, false) ;
-    assertEquals(4,nnMetrics.numFilesInGetListingOps.getCurrentIntervalValue());
+    assertCounter("FilesInGetListingOps", 2L, getMetrics(NN_METRICS));
+    cluster.getNameNode().getListing("/tmp2", HdfsFileStatus.EMPTY_NAME, false);
+    assertCounter("FilesInGetListingOps", 4L, getMetrics(NN_METRICS));
   }
 }
 

Modified: hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1089315&r1=1089314&r2=1089315&view=diff
==============================================================================
--- hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/hdfs/branches/yahoo-merge/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Wed Apr  6 06:22:32 2011
@@ -33,10 +33,11 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.namenode.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 /**
  * Test for metrics published by the Namenode
@@ -46,6 +47,8 @@ public class TestNameNodeMetrics extends
   private static final int DFS_REPLICATION_INTERVAL = 1;
   private static final Path TEST_ROOT_DIR_PATH = 
     new Path(System.getProperty("test.build.data", "build/test/data"));
+  private static final String NN_METRICS = "NameNodeActivity";
+  private static final String NS_METRICS = "FSNamesystem";
   
   // Number of datanodes in the cluster
   private static final int DATANODE_COUNT = 3; 
@@ -59,11 +62,9 @@ public class TestNameNodeMetrics extends
   }
   
   private MiniDFSCluster cluster;
-  private FSNamesystemMetrics metrics;
   private DistributedFileSystem fs;
   private Random rand = new Random();
   private FSNamesystem namesystem;
-  private NameNodeMetrics nnMetrics;
 
   private static Path getTestPath(String fileName) {
     return new Path(TEST_ROOT_DIR_PATH, fileName);
@@ -75,8 +76,6 @@ public class TestNameNodeMetrics extends
     cluster.waitActive();
     namesystem = cluster.getNamesystem();
     fs = (DistributedFileSystem) cluster.getFileSystem();
-    metrics = namesystem.getFSNamesystemMetrics();
-    nnMetrics = NameNode.getNameNodeMetrics();
   }
   
   @Override
@@ -93,8 +92,6 @@ public class TestNameNodeMetrics extends
     // Wait for metrics update (corresponds to dfs.replication.interval
     // for some block related metrics to get updated)
     Thread.sleep(1000);
-    metrics.doUpdates(null);
-    nnMetrics.doUpdates(null);
   }
 
   private void readFile(FileSystem fileSys,Path name) throws IOException {
@@ -110,15 +107,16 @@ public class TestNameNodeMetrics extends
     // Add files with 100 blocks
     final Path file = getTestPath("testFileAdd");
     createFile(file, 3200, (short)3);
-    final int blockCount = 32;
+    final long blockCount = 32;
     int blockCapacity = namesystem.getBlockCapacity();
     updateMetrics();
-    assertEquals(blockCapacity, metrics.blockCapacity.get());
-    
+    assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS));
+
+    MetricsRecordBuilder rb = getMetrics(NN_METRICS);
     // File create operations is 1
     // Number of files created is depth of <code>file</code> path
-    assertEquals(1, nnMetrics.numCreateFileOps.getPreviousIntervalValue());
-    assertEquals(file.depth(), nnMetrics.numFilesCreated.getPreviousIntervalValue());
+    assertCounter("CreateFileOps", 1L, rb);
+    assertCounter("FilesCreated", (long)file.depth(), rb);
 
     // Blocks are stored in a hashmap. Compute its capacity, which
     // doubles every time the number of entries reach the threshold.
@@ -127,10 +125,11 @@ public class TestNameNodeMetrics extends
       blockCapacity <<= 1;
     }
     updateMetrics();
-    int filesTotal = file.depth() + 1; // Add 1 for root
-    assertEquals(filesTotal, metrics.filesTotal.get());
-    assertEquals(blockCount, metrics.blocksTotal.get());
-    assertEquals(blockCapacity, metrics.blockCapacity.get());
+    long filesTotal = file.depth() + 1; // Add 1 for root
+    rb = getMetrics(NS_METRICS);
+    assertGauge("FilesTotal", filesTotal, rb);
+    assertGauge("BlocksTotal", blockCount, rb);
+    assertGauge("BlockCapacity", blockCapacity, rb);
     fs.delete(file, true);
     filesTotal--; // reduce the filecount for deleted file
     
@@ -138,13 +137,15 @@ public class TestNameNodeMetrics extends
     // the blocks pending deletion are sent for deletion to the datanodes.
     Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000);
     updateMetrics();
-    assertEquals(filesTotal, metrics.filesTotal.get());
-    assertEquals(0, metrics.blocksTotal.get());
-    assertEquals(0, metrics.pendingDeletionBlocks.get());
-    
+    rb = getMetrics(NS_METRICS);
+    assertGauge("FilesTotal", filesTotal, rb);
+    assertGauge("BlocksTotal", 0L, rb);
+    assertGauge("PendingDeletionBlocks", 0L, rb);
+
+    rb = getMetrics(NN_METRICS);
     // Delete file operations and number of files deleted must be 1
-    assertEquals(1, nnMetrics.numDeleteFileOps.getPreviousIntervalValue());
-    assertEquals(1, nnMetrics.numFilesDeleted.getPreviousIntervalValue());
+    assertCounter("DeleteFileOps", 1L, rb);
+    assertCounter("FilesDeleted", 1L, rb);
   }
   
   /** Corrupt a block and ensure metrics reflects it */
@@ -158,14 +159,16 @@ public class TestNameNodeMetrics extends
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
     namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
     updateMetrics();
-    assertEquals(1, metrics.corruptBlocks.get());
-    assertEquals(1, metrics.pendingReplicationBlocks.get());
-    assertEquals(1, metrics.scheduledReplicationBlocks.get());
+    MetricsRecordBuilder rb = getMetrics(NS_METRICS);
+    assertGauge("CorruptBlocks", 1L, rb);
+    assertGauge("PendingReplicationBlocks", 1L, rb);
+    assertGauge("ScheduledReplicationBlocks", 1L, rb);
     fs.delete(file, true);
     updateMetrics();
-    assertEquals(0, metrics.corruptBlocks.get());
-    assertEquals(0, metrics.pendingReplicationBlocks.get());
-    assertEquals(0, metrics.scheduledReplicationBlocks.get());
+    rb = getMetrics(NS_METRICS);
+    assertGauge("CorruptBlocks", 0L, rb);
+    assertGauge("PendingReplicationBlocks", 0L, rb);
+    assertGauge("ScheduledReplicationBlocks", 0L, rb);
   }
   
   /** Create excess blocks by reducing the replication factor for
@@ -174,10 +177,11 @@ public class TestNameNodeMetrics extends
   public void testExcessBlocks() throws Exception {
     Path file = getTestPath("testExcessBlocks");
     createFile(file, 100, (short)2);
-    int totalBlocks = 1;
+    long totalBlocks = 1;
     namesystem.setReplication(file.toString(), (short)1);
     updateMetrics();
-    assertEquals(totalBlocks, metrics.excessBlocks.get());
+    MetricsRecordBuilder rb = getMetrics(NS_METRICS);
+    assertGauge("ExcessBlocks", totalBlocks, rb);
     fs.delete(file, true);
   }
   
@@ -192,11 +196,12 @@ public class TestNameNodeMetrics extends
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
     namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
     updateMetrics();
-    assertEquals(1, metrics.underReplicatedBlocks.get());
-    assertEquals(1, metrics.missingBlocks.get());
+    MetricsRecordBuilder rb = getMetrics(NS_METRICS);
+    assertGauge("UnderReplicatedBlocks", 1L, rb);
+    assertGauge("MissingBlocks", 1L, rb);
     fs.delete(file, true);
     updateMetrics();
-    assertEquals(0, metrics.underReplicatedBlocks.get());
+    assertGauge("UnderReplicatedBlocks", 0L, getMetrics(NS_METRICS));
   }
   
   public void testRenameMetrics() throws Exception {
@@ -206,8 +211,9 @@ public class TestNameNodeMetrics extends
     createFile(target, 100, (short)1);
     fs.rename(src, target, Rename.OVERWRITE);
     updateMetrics();
-    assertEquals(1, nnMetrics.numFilesRenamed.getPreviousIntervalValue());
-    assertEquals(1, nnMetrics.numFilesDeleted.getPreviousIntervalValue());
+    MetricsRecordBuilder rb = getMetrics(NN_METRICS);
+    assertCounter("FilesRenamed", 1L, rb);
+    assertCounter("FilesDeleted", 1L, rb);
   }
   
   /**
@@ -226,13 +232,8 @@ public class TestNameNodeMetrics extends
     Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "file1.dat");
 
     // When cluster starts first time there are no file  (read,create,open)
-    // operations so metric numGetBlockLocations should be 0.
-    // Verify that numGetBlockLocations for current interval 
-    // and previous interval are 0
-    assertEquals("numGetBlockLocations for previous interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
-    assertEquals("numGetBlockLocations for current interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getCurrentIntervalValue());
+    // operations so metric GetBlockLocations should be 0.
+    assertCounter("GetBlockLocations", 0L, getMetrics(NN_METRICS));
 
     //Perform create file operation
     createFile(file1_Path,100,(short)2);
@@ -240,36 +241,23 @@ public class TestNameNodeMetrics extends
   
     //Create file does not change numGetBlockLocations metric
     //expect numGetBlockLocations = 0 for previous and current interval 
-    assertEquals("numGetBlockLocations for previous interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
-    // Verify numGetBlockLocations for current interval is 0
-    assertEquals("numGetBlockLocations for current interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getCurrentIntervalValue());
+    assertCounter("GetBlockLocations", 0L, getMetrics(NN_METRICS));
   
-    // Open and read file operation increments numGetBlockLocations
+    // Open and read file operation increments GetBlockLocations
     // Perform read file operation on earlier created file
     readFile(fs, file1_Path);
     updateMetrics();
     // Verify read file operation has incremented numGetBlockLocations by 1
-    assertEquals("numGetBlockLocations for previous interval is incorrect",
-    1,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
-    // Verify numGetBlockLocations for current interval is 0
-    assertEquals("numGetBlockLocations for current interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getCurrentIntervalValue());
+    assertCounter("GetBlockLocations", 1L, getMetrics(NN_METRICS));
 
     // opening and reading file  twice will increment numGetBlockLocations by 2
     readFile(fs, file1_Path);
     readFile(fs, file1_Path);
     updateMetrics();
-    assertEquals("numGetBlockLocations for previous interval is incorrect",
-    2,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
-    // Verify numGetBlockLocations for current interval is 0
-    assertEquals("numGetBlockLocations for current interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getCurrentIntervalValue());
+    assertCounter("GetBlockLocations", 3L, getMetrics(NN_METRICS));
   
     // Verify total load metrics, total load = Data Node started.
     updateMetrics();
-    assertEquals("Metrics TotalLoad is incorrect"
-    ,DATANODE_COUNT,metrics.totalLoad.get());
+    assertGauge("TotalLoad" ,DATANODE_COUNT, getMetrics(NS_METRICS));
   }
 }



Mime
View raw message