hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r1077597 [5/6] - in /hadoop/common/branches/branch-0.20-security-patches: ./ conf/ ivy/ src/core/org/apache/hadoop/ipc/ src/core/org/apache/hadoop/ipc/metrics/ src/core/org/apache/hadoop/log/ src/core/org/apache/hadoop/metrics/ src/core/org...
Date Fri, 04 Mar 2011 04:34:00 GMT
Added: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobTrackerMetricsSource.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobTrackerMetricsSource.java?rev=1077597&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobTrackerMetricsSource.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JobTrackerMetricsSource.java Fri Mar  4 04:33:55 2011
@@ -0,0 +1,346 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import org.apache.hadoop.metrics2.MetricsBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.MetricMutableCounterInt;
+import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MetricMutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.source.JvmMetricsSource;
+
+/**
+ *
+ */
+@SuppressWarnings("deprecation")
+class JobTrackerMetricsSource extends JobTrackerInstrumentation
+                          implements MetricsSource {
+
+  final MetricsRegistry registry = new MetricsRegistry("jobtracker");
+  final MetricMutableGaugeInt mapSlots =
+      registry.newGauge("map_slots", "", 0);
+  final MetricMutableGaugeInt redSlots =
+      registry.newGauge("reduce_slots", "", 0);
+  final MetricMutableGaugeInt blMapSlots =
+      registry.newGauge("blacklisted_maps", "", 0);
+  final MetricMutableGaugeInt blRedSlots =
+      registry.newGauge("blacklisted_reduces", "", 0);
+  final MetricMutableCounterInt mapsLaunched =
+      registry.newCounter("maps_launched", "", 0);
+  final MetricMutableCounterInt mapsCompleted =
+      registry.newCounter("maps_completed", "", 0);
+  final MetricMutableCounterInt mapsFailed =
+      registry.newCounter("maps_failed", "", 0);
+  final MetricMutableCounterInt redsLaunched =
+      registry.newCounter("reduces_launched", "", 0);
+  final MetricMutableCounterInt redsCompleted =
+      registry.newCounter("reduces_completed", "", 0);
+  final MetricMutableCounterInt redsFailed =
+      registry.newCounter("reduces_failed", "", 0);
+  final MetricMutableCounterInt jobsSubmitted =
+      registry.newCounter("jobs_submitted", "", 0);
+  final MetricMutableCounterInt jobsCompleted =
+      registry.newCounter("jobs_completed", "", 0);
+  final MetricMutableGaugeInt waitingMaps =
+      registry.newGauge("waiting_maps", "", 0);
+  final MetricMutableGaugeInt waitingReds =
+      registry.newGauge("waiting_reduces", "", 0);
+  final MetricMutableGaugeInt reservedMapSlots =
+      registry.newGauge("reserved_map_slots", "", 0);
+  final MetricMutableGaugeInt reservedRedSlots =
+      registry.newGauge("reserved_reduce_slots", "", 0);
+  final MetricMutableGaugeInt occupiedMapSlots =
+      registry.newGauge("occupied_map_slots", "", 0);
+  final MetricMutableGaugeInt occupiedRedSlots =
+      registry.newGauge("occupied_reduce_slots", "", 0);
+  final MetricMutableCounterInt jobsFailed =
+      registry.newCounter("jobs_failed", "", 0);
+  final MetricMutableCounterInt jobsKilled =
+      registry.newCounter("jobs_killed", "", 0);
+  final MetricMutableGaugeInt jobsPreparing =
+      registry.newGauge("jobs_preparing", "", 0);
+  final MetricMutableGaugeInt jobsRunning =
+      registry.newGauge("jobs_running", "", 0);
+  final MetricMutableGaugeInt runningMaps =
+      registry.newGauge("running_maps", "", 0);
+  final MetricMutableGaugeInt runningReds =
+      registry.newGauge("running_reduces", "", 0);
+  final MetricMutableCounterInt mapsKilled =
+      registry.newCounter("maps_killed", "", 0);
+  final MetricMutableCounterInt redsKilled =
+      registry.newCounter("reduces_killed", "", 0);
+  final MetricMutableGaugeInt numTrackers =
+      registry.newGauge("trackers", "", 0);
+  final MetricMutableGaugeInt blacklistedTrackers =
+      registry.newGauge("trackers_blacklisted", "", 0);
+  final MetricMutableGaugeInt graylistedTrackers =
+      registry.newGauge("trackers_graylisted", "", 0);
+  final MetricMutableGaugeInt decTrackers =
+      registry.newGauge("trackers_decommissioned", "", 0);
+  final MetricMutableCounterLong numHeartbeats =
+      registry.newCounter("heartbeats", "", 0L);
+
+  final String sessionId;
+
+  public JobTrackerMetricsSource(JobTracker jt, JobConf conf) {
+    super(jt, conf);
+    sessionId = conf.getSessionId();
+    registry.setContext("mapred").tag("sessionId", "", sessionId);
+    JvmMetricsSource.create("JobTracker", sessionId);
+  }
+
+  public void getMetrics(MetricsBuilder builder, boolean all) {
+    registry.snapshot(builder.addRecord(registry.name()), all);
+  }
+
+  @Override
+  public void launchMap(TaskAttemptID taskAttemptID) {
+    mapsLaunched.incr();
+    decWaitingMaps(taskAttemptID.getJobID(), 1);
+  }
+
+  @Override
+  public void completeMap(TaskAttemptID taskAttemptID) {
+    mapsCompleted.incr();
+  }
+
+  @Override
+  public void failedMap(TaskAttemptID taskAttemptID) {
+    mapsFailed.incr();
+    addWaitingMaps(taskAttemptID.getJobID(), 1);
+  }
+
+  @Override
+  public void launchReduce(TaskAttemptID taskAttemptID) {
+    redsLaunched.incr();
+    decWaitingReduces(taskAttemptID.getJobID(), 1);
+  }
+
+  @Override
+  public void completeReduce(TaskAttemptID taskAttemptID) {
+    redsCompleted.incr();
+  }
+
+  @Override
+  public void failedReduce(TaskAttemptID taskAttemptID) {
+    redsFailed.incr();
+    addWaitingReduces(taskAttemptID.getJobID(), 1);
+  }
+
+  @Override
+  public void submitJob(JobConf conf, JobID id) {
+    jobsSubmitted.incr();
+  }
+
+  @Override
+  public void completeJob(JobConf conf, JobID id) {
+    jobsCompleted.incr();
+  }
+
+  @Override
+  public void addWaitingMaps(JobID id, int task) {
+    waitingMaps.incr(task);
+  }
+
+  @Override
+  public void decWaitingMaps(JobID id, int task) {
+    waitingMaps.decr(task);
+  }
+
+  @Override
+  public void addWaitingReduces(JobID id, int task) {
+    waitingReds.incr(task);
+  }
+
+  @Override
+  public void decWaitingReduces(JobID id, int task){
+    waitingReds.decr(task);
+  }
+
+  @Override
+  public void setMapSlots(int slots) {
+    mapSlots.set(slots);
+  }
+
+  @Override
+  public void setReduceSlots(int slots) {
+    redSlots.set(slots);
+  }
+
+  @Override
+  public void addBlackListedMapSlots(int slots){
+    blMapSlots.incr(slots);
+  }
+
+  @Override
+  public void decBlackListedMapSlots(int slots){
+    blMapSlots.decr(slots);
+  }
+
+  @Override
+  public void addBlackListedReduceSlots(int slots){
+    blRedSlots.incr(slots);
+  }
+
+  @Override
+  public void decBlackListedReduceSlots(int slots){
+    blRedSlots.decr(slots);
+  }
+
+  @Override
+  public void addReservedMapSlots(int slots) {
+    reservedMapSlots.incr(slots);;
+  }
+
+  @Override
+  public void decReservedMapSlots(int slots) {
+    reservedMapSlots.decr(slots);
+  }
+
+  @Override
+  public void addReservedReduceSlots(int slots) {
+    reservedRedSlots.incr(slots);
+  }
+
+  @Override
+  public void decReservedReduceSlots(int slots) {
+    reservedRedSlots.decr(slots);
+  }
+
+  @Override
+  public void addOccupiedMapSlots(int slots) {
+    occupiedMapSlots.incr(slots);
+  }
+
+  @Override
+  public void decOccupiedMapSlots(int slots) {
+    occupiedMapSlots.decr(slots);
+  }
+
+  @Override
+  public void addOccupiedReduceSlots(int slots) {
+    occupiedRedSlots.incr(slots);
+  }
+
+  @Override
+  public void decOccupiedReduceSlots(int slots) {
+    occupiedRedSlots.decr(slots);
+  }
+
+  @Override
+  public void failedJob(JobConf conf, JobID id) {
+    jobsFailed.incr();
+  }
+
+  @Override
+  public void killedJob(JobConf conf, JobID id) {
+    jobsKilled.incr();
+  }
+
+  @Override
+  public void addPrepJob(JobConf conf, JobID id) {
+    jobsPreparing.incr();
+  }
+
+  @Override
+  public void decPrepJob(JobConf conf, JobID id) {
+    jobsPreparing.decr();
+  }
+
+  @Override
+  public void addRunningJob(JobConf conf, JobID id) {
+    jobsRunning.incr();
+  }
+
+  @Override
+  public void decRunningJob(JobConf conf, JobID id) {
+    jobsRunning.decr();
+  }
+
+  @Override
+  public void addRunningMaps(int task) {
+    runningMaps.incr(task);
+  }
+
+  @Override
+  public void decRunningMaps(int task) {
+    runningMaps.decr(task);
+  }
+
+  @Override
+  public void addRunningReduces(int task) {
+    runningReds.incr(task);
+  }
+
+  @Override
+  public void decRunningReduces(int task) {
+    runningReds.decr(task);
+  }
+
+  @Override
+  public void killedMap(TaskAttemptID taskAttemptID) {
+    mapsKilled.incr();
+  }
+
+  @Override
+  public void killedReduce(TaskAttemptID taskAttemptID) {
+    redsKilled.incr();
+  }
+
+  @Override
+  public void addTrackers(int trackers) {
+    numTrackers.incr(trackers);
+  }
+
+  @Override
+  public void decTrackers(int trackers) {
+    numTrackers.decr(trackers);
+  }
+
+  @Override
+  public void addBlackListedTrackers(int trackers) {
+    blacklistedTrackers.incr(trackers);
+  }
+
+  @Override
+  public void decBlackListedTrackers(int trackers) {
+    blacklistedTrackers.decr(trackers);
+  }
+
+  @Override
+  public void addGrayListedTrackers(int trackers) {
+    graylistedTrackers.incr(trackers);
+  }
+
+  @Override
+  public void decGrayListedTrackers(int trackers) {
+    graylistedTrackers.decr(trackers);
+  }
+
+  @Override
+  public void setDecommissionedTrackers(int trackers) {
+    decTrackers.set(trackers);
+  }
+
+  @Override
+  public void heartbeat() {
+    numHeartbeats.incr();
+  }
+}

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java?rev=1077597&r1=1077596&r2=1077597&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java Fri Mar  4 04:33:55 2011
@@ -401,7 +401,7 @@ class LocalJobRunner implements JobSubmi
   public LocalJobRunner(JobConf conf) throws IOException {
     this.fs = FileSystem.getLocal(conf);
     this.conf = conf;
-    myMetrics = new JobTrackerMetricsInst(null, new JobConf(conf));
+    myMetrics = JobTrackerInstrumentation.create(null, new JobConf(conf));
   }
 
   // JobSubmissionProtocol methods

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/ReduceTask.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/ReduceTask.java?rev=1077597&r1=1077596&r2=1077597&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/ReduceTask.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/ReduceTask.java Fri Mar  4 04:33:55 2011
@@ -24,9 +24,6 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.lang.Math;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
 import java.net.URI;
 import java.net.URL;
 import java.net.URLClassLoader;
@@ -78,16 +75,20 @@ import org.apache.hadoop.mapred.Merger.S
 import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator;
 import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.Updater;
+import org.apache.hadoop.metrics2.MetricsBuilder;
 import org.apache.hadoop.util.Progress;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 
 import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
+import org.apache.hadoop.metrics2.MetricsException;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricMutableCounterInt;
+import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 
 /** A Reduce task. */
 class ReduceTask extends Task {
@@ -690,7 +691,7 @@ class ReduceTask extends Task {
     /**
      * The object for metrics reporting.
      */
-    private ShuffleClientMetrics shuffleClientMetrics = null;
+    private ShuffleClientInstrumentation shuffleClientMetrics;
     
     /**
      * the minimum interval between tasktracker polls
@@ -803,64 +804,64 @@ class ReduceTask extends Task {
      */
     private final Map<String, List<MapOutputLocation>> mapLocations = 
       new ConcurrentHashMap<String, List<MapOutputLocation>>();
-    
-    /**
-     * This class contains the methods that should be used for metrics-reporting
-     * the specific metrics for shuffle. This class actually reports the
-     * metrics for the shuffle client (the ReduceTask), and hence the name
-     * ShuffleClientMetrics.
-     */
-    class ShuffleClientMetrics implements Updater {
-      private MetricsRecord shuffleMetrics = null;
-      private int numFailedFetches = 0;
-      private int numSuccessFetches = 0;
-      private long numBytes = 0;
-      private int numThreadsBusy = 0;
-      ShuffleClientMetrics(JobConf conf) {
-        MetricsContext metricsContext = MetricsUtil.getContext("mapred");
-        this.shuffleMetrics = 
-          MetricsUtil.createRecord(metricsContext, "shuffleInput");
-        this.shuffleMetrics.setTag("user", conf.getUser());
-        this.shuffleMetrics.setTag("jobName", conf.getJobName());
-        this.shuffleMetrics.setTag("jobId", ReduceTask.this.getJobID().toString());
-        this.shuffleMetrics.setTag("taskId", getTaskID().toString());
-        this.shuffleMetrics.setTag("sessionId", conf.getSessionId());
-        metricsContext.registerUpdater(this);
-      }
-      public synchronized void inputBytes(long numBytes) {
-        this.numBytes += numBytes;
-      }
-      public synchronized void failedFetch() {
-        ++numFailedFetches;
-      }
-      public synchronized void successFetch() {
-        ++numSuccessFetches;
-      }
-      public synchronized void threadBusy() {
-        ++numThreadsBusy;
-      }
-      public synchronized void threadFree() {
-        --numThreadsBusy;
-      }
-      public void doUpdates(MetricsContext unused) {
-        synchronized (this) {
-          shuffleMetrics.incrMetric("shuffle_input_bytes", numBytes);
-          shuffleMetrics.incrMetric("shuffle_failed_fetches", 
-                                    numFailedFetches);
-          shuffleMetrics.incrMetric("shuffle_success_fetches", 
-                                    numSuccessFetches);
-          if (numCopiers != 0) {
-            shuffleMetrics.setMetric("shuffle_fetchers_busy_percent",
-                100*((float)numThreadsBusy/numCopiers));
-          } else {
-            shuffleMetrics.setMetric("shuffle_fetchers_busy_percent", 0);
-          }
-          numBytes = 0;
-          numSuccessFetches = 0;
-          numFailedFetches = 0;
-        }
-        shuffleMetrics.update();
+
+    class ShuffleClientInstrumentation implements MetricsSource {
+      final MetricsRegistry registry = new MetricsRegistry("shuffleInput");
+      final MetricMutableCounterLong inputBytes =
+          registry.newCounter("shuffle_input_bytes", "", 0L);
+      final MetricMutableCounterInt failedFetches =
+          registry.newCounter("shuffle_failed_fetches", "", 0);
+      final MetricMutableCounterInt successFetches =
+          registry.newCounter("shuffle_success_fetches", "", 0);
+      private volatile int threadsBusy = 0;
+
+      @SuppressWarnings("deprecation")
+      ShuffleClientInstrumentation(JobConf conf) {
+        registry.tag("user", "User name", conf.getUser())
+                .tag("jobName", "Job name", conf.getJobName())
+                .tag("jobId", "Job ID", ReduceTask.this.getJobID().toString())
+                .tag("taskId", "Task ID", getTaskID().toString())
+                .tag("sessionId", "Session ID", conf.getSessionId());
+      }
+
+      //@Override
+      void inputBytes(long numBytes) {
+        inputBytes.incr(numBytes);
       }
+
+      //@Override
+      void failedFetch() {
+        failedFetches.incr();
+      }
+
+      //@Override
+      void successFetch() {
+        successFetches.incr();
+      }
+
+      //@Override
+      synchronized void threadBusy() {
+        ++threadsBusy;
+      }
+
+      //@Override
+      synchronized void threadFree() {
+        --threadsBusy;
+      }
+
+      @Override
+      public void getMetrics(MetricsBuilder builder, boolean all) {
+        MetricsRecordBuilder rb = builder.addRecord(registry.name());
+        rb.addGauge("shuffle_fetchers_busy_percent", "", numCopiers == 0 ? 0
+            : 100. * threadsBusy / numCopiers);
+        registry.snapshot(rb, all);
+      }
+
+    }
+
+    private ShuffleClientInstrumentation createShuffleClientInstrumentation() {
+      return DefaultMetricsSystem.INSTANCE.register("ShuffleClientMetrics",
+          "Shuffle input metrics", new ShuffleClientInstrumentation(conf));
     }
 
     /** Represents the result of an attempt to copy a map output */
@@ -1809,7 +1810,7 @@ class ReduceTask extends Task {
       
       configureClasspath(conf);
       this.reporter = reporter;
-      this.shuffleClientMetrics = new ShuffleClientMetrics(conf);
+      this.shuffleClientMetrics = createShuffleClientInstrumentation();
       this.umbilical = umbilical;      
       this.reduceTask = ReduceTask.this;
 

Added: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/ShuffleServerInstrumentation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/ShuffleServerInstrumentation.java?rev=1077597&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/ShuffleServerInstrumentation.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/ShuffleServerInstrumentation.java Fri Mar  4 04:33:55 2011
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import org.apache.hadoop.metrics2.MetricsBuilder;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricMutableCounterInt;
+import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+
+class ShuffleServerInstrumentation implements MetricsSource {
+  final int ttWorkerThreads;
+  final MetricsRegistry registry = new MetricsRegistry("shuffleOutput");
+  private volatile int serverHandlerBusy = 0;
+  final MetricMutableCounterLong outputBytes =
+      registry.newCounter("shuffle_output_bytes", "", 0L);
+  final MetricMutableCounterInt failedOutputs =
+      registry.newCounter("shuffle_failed_outputs", "", 0);
+  final MetricMutableCounterInt successOutputs =
+      registry.newCounter("shuffle_success_outputs", "", 0);
+
+  ShuffleServerInstrumentation(TaskTracker tt) {
+    ttWorkerThreads = tt.workerThreads;
+    registry.setContext("mapred")
+        .tag("sessionId", "session id", tt.getJobConf().getSessionId());
+  }
+
+  //@Override
+  synchronized void serverHandlerBusy() {
+    ++serverHandlerBusy;
+  }
+
+  //@Override
+  synchronized void serverHandlerFree() {
+    --serverHandlerBusy;
+  }
+
+  //@Override
+  void outputBytes(long bytes) {
+    outputBytes.incr(bytes);
+  }
+
+  //@Override
+  void failedOutput() {
+    failedOutputs.incr();
+  }
+
+  //@Override
+  void successOutput() {
+    successOutputs.incr();
+  }
+
+  @Override
+  public void getMetrics(MetricsBuilder builder, boolean all) {
+    MetricsRecordBuilder rb = builder.addRecord(registry.name());
+    rb.addGauge("shuffle_handler_busy_percent", "", ttWorkerThreads == 0 ? 0
+        : 100. * serverHandlerBusy / ttWorkerThreads);
+    registry.snapshot(rb, all);
+  }
+
+  static ShuffleServerInstrumentation create(TaskTracker tt) {
+    return create(tt, DefaultMetricsSystem.INSTANCE);
+  }
+
+  static ShuffleServerInstrumentation create(TaskTracker tt, MetricsSystem ms) {
+    return ms.register("ShuffleServerMetrics", "Shuffle output metrics",
+                      new ShuffleServerInstrumentation(tt));
+  }
+  
+}

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTracker.java?rev=1077597&r1=1077596&r2=1077597&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTracker.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTracker.java Fri Mar  4 04:33:55 2011
@@ -87,11 +87,6 @@ import org.apache.hadoop.mapreduce.TaskT
 import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsException;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.Updater;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
@@ -110,6 +105,8 @@ import org.apache.hadoop.util.VersionInf
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.mapreduce.security.TokenCache;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.security.Credentials;
 
 /*******************************************************
@@ -118,8 +115,9 @@ import org.apache.hadoop.security.Creden
  * for Task assignments and reporting results.
  *
  *******************************************************/
-public class TaskTracker 
-             implements MRConstants, TaskUmbilicalProtocol, Runnable {
+public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
+    Runnable, TaskTrackerMXBean {
+  
   /**
    * @deprecated
    */
@@ -133,6 +131,9 @@ public class TaskTracker 
   static final String MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY =
      "mapred.tasktracker.pmem.reserved";
 
+  static final String CONF_VERSION_KEY = "mapreduce.tasktracker.conf.version";
+  static final String CONF_VERSION_DEFAULT = "default";
+
   static final long WAIT_FOR_DONE = 3 * 1000;
   private int httpPort;
 
@@ -267,7 +268,7 @@ public class TaskTracker 
   private IntWritable finishedCount = new IntWritable(0);
   
   private MapEventsFetcherThread mapEventsFetcher;
-  int workerThreads;
+  final int workerThreads;
   CleanupQueue directoryCleanupThread;
   private volatile JvmManager jvmManager;
   
@@ -311,64 +312,8 @@ public class TaskTracker 
   private List<TaskAttemptID> commitResponses = 
             Collections.synchronizedList(new ArrayList<TaskAttemptID>());
 
-  private ShuffleServerMetrics shuffleServerMetrics;
-  /** This class contains the methods that should be used for metrics-reporting
-   * the specific metrics for shuffle. The TaskTracker is actually a server for
-   * the shuffle and hence the name ShuffleServerMetrics.
-   */
-  private class ShuffleServerMetrics implements Updater {
-    private MetricsRecord shuffleMetricsRecord = null;
-    private int serverHandlerBusy = 0;
-    private long outputBytes = 0;
-    private int failedOutputs = 0;
-    private int successOutputs = 0;
-    ShuffleServerMetrics(JobConf conf) {
-      MetricsContext context = MetricsUtil.getContext("mapred");
-      shuffleMetricsRecord = 
-                           MetricsUtil.createRecord(context, "shuffleOutput");
-      this.shuffleMetricsRecord.setTag("sessionId", conf.getSessionId());
-      context.registerUpdater(this);
-    }
-    synchronized void serverHandlerBusy() {
-      ++serverHandlerBusy;
-    }
-    synchronized void serverHandlerFree() {
-      --serverHandlerBusy;
-    }
-    synchronized void outputBytes(long bytes) {
-      outputBytes += bytes;
-    }
-    synchronized void failedOutput() {
-      ++failedOutputs;
-    }
-    synchronized void successOutput() {
-      ++successOutputs;
-    }
-    public void doUpdates(MetricsContext unused) {
-      synchronized (this) {
-        if (workerThreads != 0) {
-          shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", 
-              100*((float)serverHandlerBusy/workerThreads));
-        } else {
-          shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", 0);
-        }
-        shuffleMetricsRecord.incrMetric("shuffle_output_bytes", 
-                                        outputBytes);
-        shuffleMetricsRecord.incrMetric("shuffle_failed_outputs", 
-                                        failedOutputs);
-        shuffleMetricsRecord.incrMetric("shuffle_success_outputs", 
-                                        successOutputs);
-        outputBytes = 0;
-        failedOutputs = 0;
-        successOutputs = 0;
-      }
-      shuffleMetricsRecord.update();
-    }
-  }
+  private ShuffleServerInstrumentation shuffleServerMetrics;
 
-  
-  
-    
   private TaskTrackerInstrumentation myInstrumentation = null;
 
   public TaskTrackerInstrumentation getTaskTrackerInstrumentation() {
@@ -576,10 +521,6 @@ public class TaskTracker 
                             protocol);
     }
   }
-  
-  int getHttpPort() {
-    return httpPort;
-  }
 
   public static final String TT_USER_NAME = "mapreduce.tasktracker.kerberos.principal";
   public static final String TT_KEYTAB_FILE =
@@ -623,19 +564,9 @@ public class TaskTracker 
     this.minSpaceKill = this.fConf.getLong("mapred.local.dir.minspacekill", 0L);
     //tweak the probe sample size (make it a function of numCopiers)
     probe_sample_size = this.fConf.getInt("mapred.tasktracker.events.batchsize", 500);
-    
-    Class<? extends TaskTrackerInstrumentation> metricsInst = getInstrumentationClass(fConf);
-    try {
-      java.lang.reflect.Constructor<? extends TaskTrackerInstrumentation> c =
-        metricsInst.getConstructor(new Class[] {TaskTracker.class} );
-      this.myInstrumentation = c.newInstance(this);
-    } catch(Exception e) {
-      //Reflection can throw lots of exceptions -- handle them all by 
-      //falling back on the default.
-      LOG.error("failed to initialize taskTracker metrics", e);
-      this.myInstrumentation = new TaskTrackerMetricsInst(this);
-    }
-    
+
+    createInstrumentation();
+
     // bind address
     String address = 
       NetUtils.getServerAddress(fConf,
@@ -731,6 +662,27 @@ public class TaskTracker 
       fConf.getBoolean(TT_OUTOFBAND_HEARBEAT, false);
   }
 
+  private void createInstrumentation() {
+    Class<? extends TaskTrackerInstrumentation> metricsInst =
+        getInstrumentationClass(fConf);
+    LOG.debug("instrumentation class="+ metricsInst);
+    if (metricsInst == null) {
+      myInstrumentation = TaskTrackerInstrumentation.create(this);
+      return;
+    }
+    try {
+      java.lang.reflect.Constructor<? extends TaskTrackerInstrumentation> c =
+        metricsInst.getConstructor(new Class[] {TaskTracker.class} );
+      this.myInstrumentation = c.newInstance(this);
+    } catch(Exception e) {
+      //Reflection can throw lots of exceptions -- handle them all by
+      //falling back on the default.
+      LOG.error("failed to initialize taskTracker metrics", e);
+      this.myInstrumentation = TaskTrackerInstrumentation.create(this);
+    }
+
+  }
+
   UserGroupInformation getMROwner() {
     return aclsManager.getMROwner();
   }
@@ -742,13 +694,13 @@ public class TaskTracker 
     return fConf.getBoolean(JobConf.MR_ACLS_ENABLED, false);
   }
 
-  public static Class<? extends TaskTrackerInstrumentation> getInstrumentationClass(
+  static Class<? extends TaskTrackerInstrumentation> getInstrumentationClass(
     Configuration conf) {
-    return conf.getClass("mapred.tasktracker.instrumentation",
-        TaskTrackerMetricsInst.class, TaskTrackerInstrumentation.class);
+    return conf.getClass("mapred.tasktracker.instrumentation", null,
+                         TaskTrackerInstrumentation.class);
   }
 
-  public static void setInstrumentationClass(
+  static void setInstrumentationClass(
     Configuration conf, Class<? extends TaskTrackerInstrumentation> t) {
     conf.setClass("mapred.tasktracker.instrumentation",
         t, TaskTrackerInstrumentation.class);
@@ -1259,6 +1211,7 @@ public class TaskTracker 
    */
   TaskTracker() {
     server = null;
+    workerThreads = 0;
   }
 
   void setConf(JobConf conf) {
@@ -1274,6 +1227,7 @@ public class TaskTracker 
                   "mapred.tasktracker.map.tasks.maximum", 2);
     maxReduceSlots = conf.getInt(
                   "mapred.tasktracker.reduce.tasks.maximum", 2);
+    UserGroupInformation.setConfiguration(originalConf);
     aclsManager = new ACLsManager(conf, new JobACLsManager(conf), null);
     this.jobTrackAddr = JobTracker.getAddress(conf);
     String infoAddr = 
@@ -1287,12 +1241,17 @@ public class TaskTracker 
     this.server = new HttpServer("task", httpBindAddress, httpPort,
         httpPort == 0, conf, aclsManager.getAdminsAcl());
     workerThreads = conf.getInt("tasktracker.http.threads", 40);
-    this.shuffleServerMetrics = new ShuffleServerMetrics(conf);
     server.setThreads(1, workerThreads);
     // let the jsp pages get to the task tracker, config, and other relevant
     // objects
     FileSystem local = FileSystem.getLocal(conf);
     this.localDirAllocator = new LocalDirAllocator("mapred.local.dir");
+    // create user log manager
+    setUserLogManager(new UserLogManager(conf));
+    SecurityUtil.login(originalConf, TT_KEYTAB_FILE, TT_USER_NAME);
+
+    initialize();
+    this.shuffleServerMetrics = ShuffleServerInstrumentation.create(this);
     server.setAttribute("task.tracker", this);
     server.setAttribute("local.file.system", local);
     server.setAttribute("conf", conf);
@@ -1304,13 +1263,6 @@ public class TaskTracker 
     server.start();
     this.httpPort = server.getPort();
     checkJettyPort(httpPort);
-    // create user log manager
-    setUserLogManager(new UserLogManager(conf));
-
-    UserGroupInformation.setConfiguration(originalConf);
-    SecurityUtil.login(originalConf, TT_KEYTAB_FILE, TT_USER_NAME);
-
-    initialize();
   }
 
   private void checkJettyPort(int port) throws IOException { 
@@ -1644,11 +1596,7 @@ public class TaskTracker 
           } else {
             reduceTotal--;
           }
-          try {
-            myInstrumentation.completeTask(taskStatus.getTaskID());
-          } catch (MetricsException me) {
-            LOG.warn("Caught: " + StringUtils.stringifyException(me));
-          }
+          myInstrumentation.completeTask(taskStatus.getTaskID());
           runningTasks.remove(taskStatus.getTaskID());
         }
       }
@@ -3430,7 +3378,10 @@ public class TaskTracker 
       // enable the server to track time spent waiting on locks
       ReflectionUtils.setContentionTracing
         (conf.getBoolean("tasktracker.contention.tracking", false));
-      new TaskTracker(conf).run();
+      DefaultMetricsSystem.initialize("TaskTracker");
+      TaskTracker tt = new TaskTracker(conf);
+      MBeans.register("TaskTracker", "TaskTrackerInfo", tt);
+      tt.run();
     } catch (Throwable e) {
       LOG.error("Can not start task tracker because "+
                 StringUtils.stringifyException(e));
@@ -3469,8 +3420,8 @@ public class TaskTracker 
       FSDataInputStream mapOutputIn = null;
  
       long totalRead = 0;
-      ShuffleServerMetrics shuffleMetrics =
-        (ShuffleServerMetrics) context.getAttribute("shuffleServerMetrics");
+      ShuffleServerInstrumentation shuffleMetrics =
+        (ShuffleServerInstrumentation) context.getAttribute("shuffleServerMetrics");
       TaskTracker tracker = 
         (TaskTracker) context.getAttribute("task.tracker");
 
@@ -3914,4 +3865,70 @@ public class TaskTracker 
     ACLsManager getACLsManager() {
       return aclsManager;
     }
+
+  // Begin MXBean implementation
+  @Override
+  public String getHostname() {
+    return localHostname;
+  }
+
+  @Override
+  public String getVersion() {
+    return VersionInfo.getVersion() +", r"+ VersionInfo.getRevision();
+  }
+
+  @Override
+  public String getConfigVersion() {
+    return originalConf.get(CONF_VERSION_KEY, CONF_VERSION_DEFAULT);
+  }
+
+  @Override
+  public String getJobTrackerUrl() {
+    return originalConf.get("mapred.job.tracker");
+  }
+
+  @Override
+  public int getRpcPort() {
+    return taskReportAddress.getPort();
+  }
+
+  @Override
+  public int getHttpPort() {
+    return httpPort;
+  }
+
+  @Override
+  public boolean isHealthy() {
+    boolean healthy = true;
+    TaskTrackerHealthStatus hs = new TaskTrackerHealthStatus();
+    if (healthChecker != null) {
+      healthChecker.setHealthStatus(hs);
+      healthy = hs.isNodeHealthy();
+    }    
+    return healthy;
+  }
+
+  @Override
+  public String getTasksInfoJson() {
+    return getTasksInfo().toJson();
+  }
+
+  InfoMap getTasksInfo() {
+    InfoMap map = new InfoMap();
+    int failed = 0;
+    int commitPending = 0;
+    for (TaskStatus st : getNonRunningTasks()) {
+      if (st.getRunState() == TaskStatus.State.FAILED ||
+          st.getRunState() == TaskStatus.State.FAILED_UNCLEAN) {
+        ++failed;
+      } else if (st.getRunState() == TaskStatus.State.COMMIT_PENDING) {
+        ++commitPending;
+      }
+    }
+    map.put("running", runningTasks.size());
+    map.put("failed", failed);
+    map.put("commit_pending", commitPending);
+    return map;
+  }
+  // End MXBean implemenation
 }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java?rev=1077597&r1=1077596&r2=1077597&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java Fri Mar  4 04:33:55 2011
@@ -19,6 +19,8 @@
 package org.apache.hadoop.mapred;
 
 import java.io.File;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 
 /**
  * TaskTrackerInstrumentation defines a number of instrumentation points
@@ -61,5 +63,14 @@ class TaskTrackerInstrumentation  {
    * @param t
    */
   public void reportTaskEnd(TaskAttemptID t) {}
-   
+
+  static TaskTrackerInstrumentation create(TaskTracker tt) {
+    return create(tt, DefaultMetricsSystem.INSTANCE);
+  }
+
+  static TaskTrackerInstrumentation create(TaskTracker tt, MetricsSystem ms) {
+    return ms.register("TaskTrackerMetrics", "TaskTracker metrics",
+                       new TaskTrackerMetricsSource(tt));
+  }
+
 }

Added: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTrackerMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTrackerMXBean.java?rev=1077597&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTrackerMXBean.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTrackerMXBean.java Fri Mar  4 04:33:55 2011
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+/**
+ * MXBean interface for TaskTracker
+ */
+public interface TaskTrackerMXBean {
+
+  /**
+   * @return the hostname of the tasktracker
+   */
+  String getHostname();
+
+  /**
+   * @return the version of the code base
+   */
+  String getVersion();
+
+  /**
+   * @return the config version (from a config properties)
+   */
+  String getConfigVersion();
+
+  /**
+   * @return the URL of the jobtracker
+   */
+  String getJobTrackerUrl();
+
+  /**
+   * @return the RPC port of the tasktracker
+   */
+  int getRpcPort();
+
+  /**
+   * @return the HTTP port of the tasktracker
+   */
+  int getHttpPort();
+
+  /**
+   * @return the health status of the tasktracker
+   */
+  boolean isHealthy();
+
+  /**
+   * @return a json formatted info about tasks of the tasktracker
+   */
+  String getTasksInfoJson();
+
+}

Added: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTrackerMetricsSource.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTrackerMetricsSource.java?rev=1077597&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTrackerMetricsSource.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTrackerMetricsSource.java Fri Mar  4 04:33:55 2011
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import org.apache.hadoop.metrics2.MetricsBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.MetricMutableCounterInt;
+import org.apache.hadoop.metrics2.lib.MetricMutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.source.JvmMetricsSource;
+
+/**
+ * Instrumentation for metrics v2
+ */
+@SuppressWarnings("deprecation")
+public class TaskTrackerMetricsSource extends TaskTrackerInstrumentation
+                                  implements MetricsSource {
+
+  final MetricsRegistry registry = new MetricsRegistry("tasktracker");
+  final MetricMutableGaugeInt mapsRunning =
+      registry.newGauge("maps_running", "", 0);
+  final MetricMutableGaugeInt redsRunning =
+      registry.newGauge("reduces_running", "", 0);
+  final MetricMutableGaugeInt mapSlots =
+      registry.newGauge("mapTaskSlots", "", 0);
+  final MetricMutableGaugeInt redSlots =
+      registry.newGauge("reduceTaskSlots", "", 0);
+  final MetricMutableCounterInt completedTasks =
+      registry.newCounter("tasks_completed", "", 0);
+  final MetricMutableCounterInt timedoutTasks =
+      registry.newCounter("tasks_failed_timeout", "", 0);
+  final MetricMutableCounterInt pingFailedTasks =
+      registry.newCounter("tasks_failed_ping", "", 0);
+
+  public TaskTrackerMetricsSource(TaskTracker tt) {
+    super(tt);
+    String sessionId = tt.getJobConf().getSessionId();
+    JvmMetricsSource.create("TaskTracker", sessionId);
+    registry.setContext("mapred").tag("sessionId", "", sessionId);
+  }
+
+  public void getMetrics(MetricsBuilder builder, boolean all) {
+    mapsRunning.set(tt.mapTotal);
+    redsRunning.set(tt.reduceTotal);
+    mapSlots.set(tt.getMaxCurrentMapTasks());
+    redSlots.set(tt.getMaxCurrentReduceTasks());
+    registry.snapshot(builder.addRecord(registry.name()), all);
+  }
+
+  @Override
+  public void completeTask(TaskAttemptID t) {
+    completedTasks.incr();
+  }
+
+  @Override
+  public void timedoutTask(TaskAttemptID t) {
+    timedoutTasks.incr();
+  }
+
+  @Override
+  public void taskFailedPing(TaskAttemptID t) {
+    pingFailedTasks.incr();
+  }
+
+}

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1077597&r1=1077596&r2=1077597&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Fri Mar  4 04:33:55 2011
@@ -33,7 +33,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
-import org.apache.hadoop.metrics.util.MBeanUtil;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 
@@ -638,7 +638,7 @@ public class SimulatedFSDataset  impleme
 
     try {
       bean = new StandardMBean(this,FSDatasetMBean.class);
-      mbeanName = MBeanUtil.registerMBean("DataNode",
+      mbeanName = MBeans.register("DataNode",
           "FSDatasetState-" + storageId, bean);
     } catch (NotCompliantMBeanException e) {
       e.printStackTrace();
@@ -649,7 +649,7 @@ public class SimulatedFSDataset  impleme
 
   public void shutdown() {
     if (mbeanName != null)
-      MBeanUtil.unregisterMBean(mbeanName);
+      MBeans.unregister(mbeanName);
   }
 
   public String getStorageInfo() {

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java?rev=1077597&r1=1077596&r2=1077597&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java Fri Mar  4 04:33:55 2011
@@ -23,9 +23,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.conf.Configuration;
 import junit.framework.TestCase;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 public class TestDataNodeMetrics extends TestCase {
   
@@ -41,8 +41,7 @@ public class TestDataNodeMetrics extends
       List<DataNode> datanodes = cluster.getDataNodes();
       assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
-      DataNodeMetrics metrics = datanode.getMetrics();
-      assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
+      assertCounter("bytes_written", LONG_FILE_LEN, datanode.getMetrics());
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=1077597&r1=1077596&r2=1077597&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java Fri Mar  4 04:33:55 2011
@@ -35,7 +35,6 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -83,7 +82,6 @@ public class TestSaveNamespace extends T
 
   private void saveNamespaceWithInjectedFault(Fault fault) throws IOException {
     Configuration conf = getConf();
-    NameNode.myMetrics = new NameNodeMetrics(conf, null);
     NameNode.format(conf);
     NameNode nn = new NameNode(conf);
     FSNamesystem fsn = nn.getNamesystem();
@@ -159,7 +157,6 @@ public class TestSaveNamespace extends T
   // @Test
   public void testSaveWhileEditsRolled() throws Exception {
     Configuration conf = getConf();
-    NameNode.myMetrics = new NameNodeMetrics(conf, null);
     NameNode.format(conf);
     NameNode nn = new NameNode(conf);
     FSNamesystem fsn = nn.getNamesystem();

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java?rev=1077597&r1=1077596&r2=1077597&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java Fri Mar  4 04:33:55 2011
@@ -30,6 +30,8 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
+import static org.apache.hadoop.test.MetricsAsserts.*;
+
 /**
  * Test case for FilesInGetListingOps metric in Namenode
  */
@@ -43,7 +45,7 @@ public class TestNNMetricFilesInGetListi
   }
      
   private MiniDFSCluster cluster;
-  private NameNodeMetrics nnMetrics;
+  private NameNodeInstrumentation nnMetrics;
   private DistributedFileSystem fs;
   private Random rand = new Random();
 
@@ -52,7 +54,7 @@ public class TestNNMetricFilesInGetListi
     cluster = new MiniDFSCluster(CONF, 1, true, null);
     cluster.waitActive();
     cluster.getNameNode();
-	nnMetrics = NameNode.getNameNodeMetrics();
+    nnMetrics = NameNode.getNameNodeMetrics();
     fs = (DistributedFileSystem) cluster.getFileSystem();
   }
 
@@ -74,15 +76,15 @@ public class TestNNMetricFilesInGetListi
     createFile("/tmp2/t1", 3200, (short)3);
     createFile("/tmp2/t2", 3200, (short)3);
     cluster.getNameNode().getListing("/tmp1", HdfsFileStatus.EMPTY_NAME) ;
-    assertEquals(2,nnMetrics.numFilesInGetListingOps.getCurrentIntervalValue());
+    assertCounter("FilesInGetListingOps", 2, nnMetrics);
     cluster.getNameNode().getListing("/tmp2", HdfsFileStatus.EMPTY_NAME) ;
-    assertEquals(4,nnMetrics.numFilesInGetListingOps.getCurrentIntervalValue());
+    assertCounter("FilesInGetListingOps", 4, nnMetrics);
     // test non-existent path
     cluster.getNameNode().getListing("/tmp", HdfsFileStatus.EMPTY_NAME) ;
-    assertEquals(4,nnMetrics.numFilesInGetListingOps.getCurrentIntervalValue());
+    assertCounter("FilesInGetListingOps", 4, nnMetrics);
     // test listing a file
     cluster.getNameNode().getListing("/tmp1/t1", HdfsFileStatus.EMPTY_NAME) ;
-    assertEquals(5,nnMetrics.numFilesInGetListingOps.getCurrentIntervalValue());
+    assertCounter("FilesInGetListingOps", 5, nnMetrics);
   }
 }
 

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1077597&r1=1077596&r2=1077597&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Fri Mar  4 04:33:55 2011
@@ -33,6 +33,11 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+
+import static org.apache.hadoop.test.MetricsAsserts.*;
+
 /**
  * Test for metrics published by the Namenode
  */
@@ -52,11 +57,11 @@ public class TestNameNodeMetrics extends
   }
   
   private MiniDFSCluster cluster;
-  private FSNamesystemMetrics metrics;
+  private MetricsSource fsnMetrics;
   private DistributedFileSystem fs;
   private Random rand = new Random();
   private FSNamesystem namesystem;
-  private NameNodeMetrics nnMetrics;
+  private NameNodeInstrumentation nnMetrics;
 
   private static Path getTestPath(String fileName) {
     return new Path(TEST_ROOT_DIR_PATH, fileName);
@@ -68,8 +73,8 @@ public class TestNameNodeMetrics extends
     cluster.waitActive();
     namesystem = cluster.getNameNode().getNamesystem();
     fs = (DistributedFileSystem) cluster.getFileSystem();
-    metrics = namesystem.getFSNamesystemMetrics();
     nnMetrics = NameNode.getNameNodeMetrics();
+    fsnMetrics = nnMetrics.fsNamesystemMetrics();
   }
   
   @Override
@@ -86,8 +91,6 @@ public class TestNameNodeMetrics extends
     // Wait for metrics update (corresponds to dfs.replication.interval
     // for some block related metrics to get updated)
     Thread.sleep(1000);
-    metrics.doUpdates(null);
-    nnMetrics.doUpdates(null);
   }
 
   private void readFile(FileSystem fileSys,Path name) throws IOException {
@@ -103,15 +106,16 @@ public class TestNameNodeMetrics extends
     // Add files with 100 blocks
     final Path file = getTestPath("testFileAdd");
     createFile(file, 3200, (short)3);
-    final int blockCount = 32;
+    final long blockCount = 32;
     int blockCapacity = namesystem.getBlockCapacity();
     updateMetrics();
-    assertEquals(blockCapacity, metrics.blockCapacity.get());
+    assertGauge("BlockCapacity", blockCapacity, fsnMetrics);
     
     // File create operations is 1
     // Number of files created is depth of <code>file</code> path
-    assertEquals(1, nnMetrics.numCreateFileOps.getPreviousIntervalValue());
-    assertEquals(file.depth(), nnMetrics.numFilesCreated.getPreviousIntervalValue());
+    MetricsRecordBuilder rb = getMetrics(nnMetrics);
+    assertCounter("CreateFileOps", 1, rb);
+    assertCounter("FilesCreated", file.depth(), rb);
 
     // Blocks are stored in a hashmap. Compute its capacity, which
     // doubles every time the number of entries reach the threshold.
@@ -120,10 +124,11 @@ public class TestNameNodeMetrics extends
       blockCapacity <<= 1;
     }
     updateMetrics();
-    int filesTotal = file.depth() + 1; // Add 1 for root
-    assertEquals(filesTotal, metrics.filesTotal.get());
-    assertEquals(blockCount, metrics.blocksTotal.get());
-    assertEquals(blockCapacity, metrics.blockCapacity.get());
+    long filesTotal = file.depth() + 1; // Add 1 for root
+    rb = getMetrics(fsnMetrics);
+    assertGauge("FilesTotal", filesTotal, rb);
+    assertGauge("BlocksTotal", blockCount, rb);
+    assertGauge("BlockCapacity", blockCapacity, rb);
     fs.delete(file, true);
     filesTotal--; // reduce the filecount for deleted file
     
@@ -131,12 +136,14 @@ public class TestNameNodeMetrics extends
     // the blocks pending deletion are sent for deletion to the datanodes.
     Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000);
     updateMetrics();
-    assertEquals(filesTotal, metrics.filesTotal.get());
-    assertEquals(0, metrics.pendingDeletionBlocks.get());
+    rb = getMetrics(fsnMetrics);
+    assertGauge("FilesTotal", filesTotal, rb);
+    assertGauge("PendingDeletionBlocks", 0L, rb);
     
     // Delete file operations and number of files deleted must be 1
-    assertEquals(1, nnMetrics.numDeleteFileOps.getPreviousIntervalValue());
-    assertEquals(1, nnMetrics.numFilesDeleted.getPreviousIntervalValue());
+    rb = getMetrics(nnMetrics);
+    assertCounter("DeleteFileOps", 1, rb);
+    assertCounter("FilesDeleted", 1, rb);
   }
   
   /** Corrupt a block and ensure metrics reflects it */
@@ -149,14 +156,16 @@ public class TestNameNodeMetrics extends
     LocatedBlock block = namesystem.getBlockLocations(file.toString(), 0, 1).get(0);
     namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
     updateMetrics();
-    assertEquals(1, metrics.corruptBlocks.get());
-    assertEquals(1, metrics.pendingReplicationBlocks.get());
-    assertEquals(1, metrics.scheduledReplicationBlocks.get());
+    MetricsRecordBuilder rb = getMetrics(fsnMetrics);
+    assertGauge("CorruptBlocks", 1L, rb);
+    assertGauge("PendingReplicationBlocks", 1L, rb);
+    assertGauge("ScheduledReplicationBlocks", 1L, rb);
     fs.delete(file, true);
     updateMetrics();
-    assertEquals(0, metrics.corruptBlocks.get());
-    assertEquals(0, metrics.pendingReplicationBlocks.get());
-    assertEquals(0, metrics.scheduledReplicationBlocks.get());
+    rb = getMetrics(fsnMetrics);
+    assertGauge("CorruptBlocks", 0L, rb);
+    assertGauge("PendingReplicationBlocks", 0L, rb);
+    assertGauge("ScheduledReplicationBlocks", 0L, rb);
   }
   
   /** Create excess blocks by reducing the replication factor for
@@ -165,10 +174,10 @@ public class TestNameNodeMetrics extends
   public void testExcessBlocks() throws Exception {
     Path file = getTestPath("testExcessBlocks");
     createFile(file, 100, (short)2);
-    int totalBlocks = 1;
+    long totalBlocks = 1;
     namesystem.setReplication(file.toString(), (short)1);
     updateMetrics();
-    assertEquals(totalBlocks, metrics.excessBlocks.get());
+    assertGauge("ExcessBlocks", totalBlocks, fsnMetrics);
     fs.delete(file, true);
   }
   
@@ -182,11 +191,12 @@ public class TestNameNodeMetrics extends
     LocatedBlock block = namesystem.getBlockLocations(file.toString(), 0, 1).get(0);
     namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
     updateMetrics();
-    assertEquals(1, metrics.underReplicatedBlocks.get());
-    assertEquals(1, metrics.missingBlocks.get());
+    MetricsRecordBuilder rb = getMetrics(fsnMetrics);
+    assertGauge("UnderReplicatedBlocks", 1L, rb);
+    assertGauge("MissingBlocks", 1L, rb);
     fs.delete(file, true);
     updateMetrics();
-    assertEquals(0, metrics.underReplicatedBlocks.get());
+    assertGauge("UnderReplicatedBlocks", 0L, fsnMetrics);
   }
   
   /**
@@ -206,49 +216,31 @@ public class TestNameNodeMetrics extends
 
     // When cluster starts first time there are no file  (read,create,open)
     // operations so metric numGetBlockLocations should be 0.
-    // Verify that numGetBlockLocations for current interval 
-    // and previous interval are 0
-    assertEquals("numGetBlockLocations for previous interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
-    assertEquals("numGetBlockLocations for current interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getCurrentIntervalValue());
+    assertCounter("GetBlockLocations", 0, nnMetrics);
 
     //Perform create file operation
     createFile(file1_Path,100,(short)2);
     updateMetrics();
   
     //Create file does not change numGetBlockLocations metric
-    //expect numGetBlockLocations = 0 for previous and current interval 
-    assertEquals("numGetBlockLocations for previous interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
-    // Verify numGetBlockLocations for current interval is 0
-    assertEquals("numGetBlockLocations for current interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getCurrentIntervalValue());
+    assertCounter("GetBlockLocations", 0, nnMetrics);
   
     // Open and read file operation increments numGetBlockLocations
     // Perform read file operation on earlier created file
     readFile(fs, file1_Path);
     updateMetrics();
     // Verify read file operation has incremented numGetBlockLocations by 1
-    assertEquals("numGetBlockLocations for previous interval is incorrect",
-    1,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
-    // Verify numGetBlockLocations for current interval is 0
-    assertEquals("numGetBlockLocations for current interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getCurrentIntervalValue());
+    assertCounter("GetBlockLocations", 1, nnMetrics);
 
     // opening and reading file  twice will increment numGetBlockLocations by 2
     readFile(fs, file1_Path);
     readFile(fs, file1_Path);
     updateMetrics();
-    assertEquals("numGetBlockLocations for previous interval is incorrect",
-    2,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
-    // Verify numGetBlockLocations for current interval is 0
-    assertEquals("numGetBlockLocations for current interval is incorrect",
-    0,nnMetrics.numGetBlockLocations.getCurrentIntervalValue());
-  
+    assertCounter("GetBlockLocations", 3, nnMetrics);
+
     // Verify total load metrics, total load = Data Node started.
     updateMetrics();
-    assertEquals("Metrics TotalLoad is incorrect"
-    ,DATANODE_COUNT,metrics.totalLoad.get());
+    assertGauge("TotalLoad", DATANODE_COUNT, fsnMetrics);
   }
+
 }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/ipc/TestRPC.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/ipc/TestRPC.java?rev=1077597&r1=1077596&r2=1077597&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/ipc/TestRPC.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/ipc/TestRPC.java Fri Mar  4 04:33:55 2011
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ipc;
 
+import org.apache.hadoop.metrics2.MetricsSource;
 import java.io.IOException;
 import java.net.ConnectException;
 import java.net.InetSocketAddress;
@@ -33,16 +34,15 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.UTF8;
 import org.apache.hadoop.io.Writable;
-
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.spi.NullContext;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
+import org.apache.hadoop.ipc.metrics.RpcInstrumentation;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.security.AccessControlException;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 
 /** Unit tests for RPC. */
 public class TestRPC extends TestCase {
@@ -251,24 +251,22 @@ public class TestRPC extends TestCase {
     stringResult = proxy.echo((String)null);
     assertEquals(stringResult, null);
     
-    // Check rpcMetrics 
-    server.rpcMetrics.doUpdates(new NullContext());
-    
+    // Check rpcMetrics
+    RpcInstrumentation rpcMetrics = server.rpcMetrics;
+    MetricsRecordBuilder rb = getMetrics(rpcMetrics);
+
     // Number 4 includes getProtocolVersion()
-    assertEquals(4, server.rpcMetrics.rpcProcessingTime.getPreviousIntervalNumOps());
-    assertTrue(server.rpcMetrics.sentBytes.getPreviousIntervalValue() > 0);
-    assertTrue(server.rpcMetrics.receivedBytes.getPreviousIntervalValue() > 0);
-    
-    // Number of calls to echo method should be 2
-    server.rpcDetailedMetrics.doUpdates(new NullContext());
-    MetricsTimeVaryingRate metrics = 
-      (MetricsTimeVaryingRate)server.rpcDetailedMetrics.registry.get("echo");
-    assertEquals(2, metrics.getPreviousIntervalNumOps());
-    
+    assertCounter("RpcProcessingTime_num_ops", 4L, rb);
+    assertCounterGt("SentBytes", 0L, rb);
+    assertCounterGt("ReceivedBytes", 0L, rb);
+
+    MetricsSource detailed = rpcMetrics.detailed();
+    rb = getMetrics(detailed);
+    assertCounter("getProtocolVersion_num_ops", 1L, rb);
     // Number of calls to ping method should be 1
-    metrics = 
-      (MetricsTimeVaryingRate)server.rpcDetailedMetrics.registry.get("ping");
-    assertEquals(1, metrics.getPreviousIntervalNumOps());
+    assertCounter("ping_num_ops", 1L, rb);
+    // Number of calls to echo method should be 2
+    assertCounter("echo_num_ops", 2L, rb);
     
     String[] stringResults = proxy.echo(new String[]{"foo","bar"});
     assertTrue(Arrays.equals(stringResults, new String[]{"foo","bar"}));
@@ -367,6 +365,7 @@ public class TestRPC extends TestCase {
     server.start();
 
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
+    RpcInstrumentation rpcMetrics = server.getRpcMetrics();
     
     try {
       proxy = (TestProtocol)RPC.getProxy(
@@ -387,23 +386,16 @@ public class TestRPC extends TestCase {
       if (proxy != null) {
         RPC.stopProxy(proxy);
       }
+      MetricsRecordBuilder rb = getMetrics(rpcMetrics);
       if (expectFailure) {
-        assertEquals("Wrong number of authorizationFailures ", 1,  
-            server.getRpcMetrics().authorizationFailures
-            .getCurrentIntervalValue());
+        assertCounter("rpcAuthorizationFailures", 1, rb);
       } else {
-        assertEquals("Wrong number of authorizationSuccesses ", 1, 
-            server.getRpcMetrics().authorizationSuccesses
-            .getCurrentIntervalValue());
+        assertCounter("rpcAuthorizationSuccesses", 1, rb);
       }
       //since we don't have authentication turned ON, we should see 
       // 0 for the authentication successes and 0 for failure
-      assertEquals("Wrong number of authenticationFailures ", 0, 
-          server.getRpcMetrics().authenticationFailures
-          .getCurrentIntervalValue());
-      assertEquals("Wrong number of authenticationSuccesses ", 0, 
-          server.getRpcMetrics().authenticationSuccesses
-          .getCurrentIntervalValue());
+      assertCounter("rpcAuthenticationFailures", 0, rb);
+      assertCounter("rpcAuthenticationSuccesses", 0, rb);
     }
   }
   

Added: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/filter/TestPatternFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/filter/TestPatternFilter.java?rev=1077597&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/filter/TestPatternFilter.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/filter/TestPatternFilter.java Fri Mar  4 04:33:55 2011
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.filter;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.impl.ConfigBuilder;
+
+public class TestPatternFilter {
+
+  /**
+   * Filters should default to accept
+   */
+  @Test public void emptyConfigShouldAccept() {
+    SubsetConfiguration empty = new ConfigBuilder().subset("");
+    shouldAccept(empty, "anything");
+    shouldAccept(empty, Arrays.asList(new MetricsTag("key", "desc", "value")));
+  }
+
+  /**
+   * Filters should handle white-listing correctly
+   */
+  @Test public void includeOnlyShouldOnlyIncludeMatched() {
+    SubsetConfiguration wl = new ConfigBuilder()
+        .add("p.include", "foo")
+        .add("p.include.tags", "foo:f").subset("p");
+    shouldAccept(wl, "foo");
+    shouldAccept(wl, Arrays.asList(new MetricsTag("bar", "", ""),
+                                   new MetricsTag("foo", "", "f")));
+    shouldReject(wl, "bar");
+    shouldReject(wl, Arrays.asList(new MetricsTag("bar", "", "")));
+    shouldReject(wl, Arrays.asList(new MetricsTag("foo", "", "boo")));
+  }
+
+  /**
+   * Filters should handle black-listing correctly
+   */
+  @Test public void excludeOnlyShouldOnlyExcludeMatched() {
+    SubsetConfiguration bl = new ConfigBuilder()
+        .add("p.exclude", "foo")
+        .add("p.exclude.tags", "foo:f").subset("p");
+    shouldAccept(bl, "bar");
+    shouldAccept(bl, Arrays.asList(new MetricsTag("bar", "", "")));
+    shouldReject(bl, "foo");
+    shouldReject(bl, Arrays.asList(new MetricsTag("bar", "", ""),
+                                   new MetricsTag("foo", "", "f")));
+  }
+
+  /**
+   * Filters should accepts unmatched item when both include and
+   * exclude patterns are present.
+   */
+  @Test public void shouldAcceptUnmatchedWhenBothAreConfigured() {
+    SubsetConfiguration c = new ConfigBuilder()
+        .add("p.include", "foo")
+        .add("p.include.tags", "foo:f")
+        .add("p.exclude", "bar")
+        .add("p.exclude.tags", "bar:b").subset("p");
+    shouldAccept(c, "foo");
+    shouldAccept(c, Arrays.asList(new MetricsTag("foo", "", "f")));
+    shouldReject(c, "bar");
+    shouldReject(c, Arrays.asList(new MetricsTag("bar", "", "b")));
+    shouldAccept(c, "foobar");
+    shouldAccept(c, Arrays.asList(new MetricsTag("foobar", "", "")));
+  }
+
+  /**
+   * Include patterns should take precedence over exclude patterns
+   */
+  @Test public void includeShouldOverrideExclude() {
+    SubsetConfiguration c = new ConfigBuilder()
+        .add("p.include", "foo")
+        .add("p.include.tags", "foo:f")
+        .add("p.exclude", "foo")
+        .add("p.exclude.tags", "foo:f").subset("p");
+    shouldAccept(c, "foo");
+    shouldAccept(c, Arrays.asList(new MetricsTag("foo", "", "f")));
+  }
+
+  static void shouldAccept(SubsetConfiguration conf, String s) {
+    assertTrue("accepts "+ s, newGlobFilter(conf).accepts(s));
+    assertTrue("accepts "+ s, newRegexFilter(conf).accepts(s));
+  }
+
+  static void shouldAccept(SubsetConfiguration conf, List<MetricsTag> tags) {
+    assertTrue("accepts "+ tags, newGlobFilter(conf).accepts(tags));
+    assertTrue("accepts "+ tags, newRegexFilter(conf).accepts(tags));
+  }
+
+  static void shouldReject(SubsetConfiguration conf, String s) {
+    assertTrue("rejects "+ s, !newGlobFilter(conf).accepts(s));
+    assertTrue("rejects "+ s, !newRegexFilter(conf).accepts(s));
+  }
+
+  static void shouldReject(SubsetConfiguration conf, List<MetricsTag> tags) {
+    assertTrue("rejects "+ tags, !newGlobFilter(conf).accepts(tags));
+    assertTrue("rejects "+ tags, !newRegexFilter(conf).accepts(tags));
+  }
+
+  /**
+   * Create a new glob filter with a config object
+   * @param conf  the config object
+   * @return the filter
+   */
+  public static GlobFilter newGlobFilter(SubsetConfiguration conf) {
+    GlobFilter f = new GlobFilter();
+    f.init(conf);
+    return f;
+  }
+
+  /**
+   * Create a new regex filter with a config object
+   * @param conf  the config object
+   * @return the filter
+   */
+  public static RegexFilter newRegexFilter(SubsetConfiguration conf) {
+    RegexFilter f = new RegexFilter();
+    f.init(conf);
+    return f;
+  }
+}

Added: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/ConfigBuilder.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/ConfigBuilder.java?rev=1077597&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/ConfigBuilder.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/ConfigBuilder.java Fri Mar  4 04:33:55 2011
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import org.apache.commons.configuration.PropertiesConfiguration;
+import org.apache.commons.configuration.SubsetConfiguration;
+
+/**
+ * Helper class for building configs, mostly used in tests
+ */
+public class ConfigBuilder {
+
+  /** The built config */
+  public final PropertiesConfiguration config;
+
+  /**
+   * Default constructor
+   */
+  public ConfigBuilder() {
+    config = new PropertiesConfiguration();
+  }
+
+  /**
+   * Add a property to the config
+   * @param key of the property
+   * @param value of the property
+   * @return self
+   */
+  public ConfigBuilder add(String key, Object value) {
+    config.addProperty(key, value);
+    return this;
+  }
+
+  /**
+   * Save the config to a file
+   * @param filename  to save
+   * @return self
+   * @throws RuntimeException
+   */
+  public ConfigBuilder save(String filename) {
+    try {
+      config.save(filename);
+    }
+    catch (Exception e) {
+      throw new RuntimeException("Error saving config", e);
+    }
+    return this;
+  }
+
+  /**
+   * Return a subset configuration (so getParent() can be used.)
+   * @param prefix  of the subset
+   * @return the subset config
+   */
+  public SubsetConfiguration subset(String prefix) {
+    return new SubsetConfiguration(config, prefix, ".");
+  }
+}
+

Added: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/ConfigUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/ConfigUtil.java?rev=1077597&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/ConfigUtil.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/ConfigUtil.java Fri Mar  4 04:33:55 2011
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import java.io.PrintStream;
+import java.util.Iterator;
+
+import static org.junit.Assert.*;
+import org.apache.commons.configuration.Configuration;
+import org.apache.commons.configuration.PropertiesConfiguration;
+
+/**
+ * Helpers for config tests and debugging
+ */
+class ConfigUtil {
+
+  static void dump(Configuration c) {
+    dump(null, c, System.out);
+  }
+
+  static void dump(String header, Configuration c) {
+    dump(header, c, System.out);
+  }
+
+  static void dump(String header, Configuration c, PrintStream out) {
+    PropertiesConfiguration p = new PropertiesConfiguration();
+    p.copy(c);
+    if (header != null) {
+      out.println(header);
+    }
+    try { p.save(out); }
+    catch (Exception e) {
+      throw new RuntimeException("Error saving config", e);
+    }
+  }
+
+  static void assertEq(Configuration expected, Configuration actual) {
+    // Check that the actual config contains all the properties of the expected
+    for (Iterator<?> it = expected.getKeys(); it.hasNext();) {
+      String key = (String) it.next();
+      assertTrue("actual should contain "+ key, actual.containsKey(key));
+      assertEquals("value of "+ key, expected.getProperty(key),
+                                     actual.getProperty(key));
+    }
+    // Check that the actual config has no extra properties
+    for (Iterator<?> it = actual.getKeys(); it.hasNext();) {
+      String key = (String) it.next();
+      assertTrue("expected should contain "+ key, expected.containsKey(key));
+    }
+  }
+
+}

Added: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/TestMetricsBuilderImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/TestMetricsBuilderImpl.java?rev=1077597&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/TestMetricsBuilderImpl.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/TestMetricsBuilderImpl.java Fri Mar  4 04:33:55 2011
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import static org.apache.hadoop.metrics2.filter.TestPatternFilter.*;
+
+public class TestMetricsBuilderImpl {
+
+  @Test public void recordBuilderShouldNoOpIfFiltered() {
+    SubsetConfiguration fc = new ConfigBuilder()
+        .add("p.exclude", "foo").subset("p");
+    MetricsBuilderImpl mb = new MetricsBuilderImpl();
+    mb.setRecordFilter(newGlobFilter(fc));
+    MetricsRecordBuilderImpl rb = mb.addRecord("foo");
+    rb.tag("foo", "", "value").addGauge("g0", "", 1);
+    assertEquals("no tags", 0, rb.tags().size());
+    assertEquals("no metrics", 0, rb.metrics().size());
+    assertNull("null record", rb.getRecord());
+    assertEquals("no records", 0, mb.getRecords().size());
+  }
+
+  @Test public void testPerMetricFiltering() {
+    SubsetConfiguration fc = new ConfigBuilder()
+        .add("p.exclude", "foo").subset("p");
+    MetricsBuilderImpl mb = new MetricsBuilderImpl();
+    mb.setMetricFilter(newGlobFilter(fc));
+    MetricsRecordBuilderImpl rb = mb.addRecord("foo");
+    rb.tag("foo", "", "").addCounter("c0", "", 0).addGauge("foo", "", 1);
+    assertEquals("1 tag", 1, rb.tags().size());
+    assertEquals("1 metric", 1, rb.metrics().size());
+    assertEquals("expect foo tag", "foo", rb.tags().get(0).name());
+    assertEquals("expect c0", "c0", rb.metrics().get(0).name());
+  }
+}

Added: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java?rev=1077597&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java Fri Mar  4 04:33:55 2011
@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.Log;
+import java.util.Map;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.apache.commons.configuration.Configuration;
+
+import static org.apache.hadoop.metrics2.impl.ConfigUtil.*;
+
+/**
+ * Test metrics configuration
+ */
+public class TestMetricsConfig {
+
+  static final Log LOG = LogFactory.getLog(TestMetricsConfig.class);
+
+  /**
+   * Common use cases
+   * @throws Exception
+   */
+  @Test public void testCommon() throws Exception {
+    String filename = getTestFilename("test-metrics2");
+    new ConfigBuilder()
+        .add("*.foo", "default foo")
+        .add("p1.*.bar", "p1 default bar")
+        .add("p1.t1.*.bar", "p1.t1 default bar")
+        .add("p1.t1.i1.name", "p1.t1.i1.name")
+        .add("p1.t1.42.bar", "p1.t1.42.bar")
+        .add("p1.t2.i1.foo", "p1.t2.i1.foo")
+        .add("p2.*.foo", "p2 default foo")
+        .save(filename);
+
+    MetricsConfig mc = MetricsConfig.create("p1", filename);
+    dump("mc:", mc);
+
+    Configuration expected = new ConfigBuilder()
+        .add("*.bar", "p1 default bar")
+        .add("t1.*.bar", "p1.t1 default bar")
+        .add("t1.i1.name", "p1.t1.i1.name")
+        .add("t1.42.bar", "p1.t1.42.bar")
+        .add("t2.i1.foo", "p1.t2.i1.foo")
+        .config;
+
+    assertEq(expected, mc);
+
+    testInstances(mc);
+  }
+
+  private void testInstances(MetricsConfig c) throws Exception {
+    Map<String, MetricsConfig> map = c.getInstanceConfigs("t1");
+    Map<String, MetricsConfig> map2 = c.getInstanceConfigs("t2");
+
+    assertEquals("number of t1 instances", 2, map.size());
+    assertEquals("number of t2 instances", 1, map2.size());
+    assertTrue("contains t1 instance i1", map.containsKey("i1"));
+    assertTrue("contains t1 instance 42", map.containsKey("42"));
+    assertTrue("contains t2 instance i1", map2.containsKey("i1"));
+
+    MetricsConfig t1i1 = map.get("i1");
+    MetricsConfig t1i42 = map.get("42");
+    MetricsConfig t2i1 = map2.get("i1");
+    dump("--- t1 instance i1:", t1i1);
+    dump("--- t1 instance 42:", t1i42);
+    dump("--- t2 instance i1:", t2i1);
+
+    Configuration t1expected1 = new ConfigBuilder()
+        .add("name", "p1.t1.i1.name").config;
+    Configuration t1expected42 = new ConfigBuilder()
+         .add("bar", "p1.t1.42.bar").config;
+    Configuration t2expected1 = new ConfigBuilder()
+        .add("foo", "p1.t2.i1.foo").config;
+
+    assertEq(t1expected1, t1i1);
+    assertEq(t1expected42, t1i42);
+    assertEq(t2expected1, t2i1);
+
+    LOG.debug("asserting foo == default foo");
+    // Check default lookups
+    assertEquals("value of foo in t1 instance i1", "default foo",
+                 t1i1.getString("foo"));
+    assertEquals("value of bar in t1 instance i1", "p1.t1 default bar",
+                 t1i1.getString("bar"));
+    assertEquals("value of foo in t1 instance 42", "default foo",
+                 t1i42.getString("foo"));
+    assertEquals("value of foo in t2 instance i1", "p1.t2.i1.foo",
+                 t2i1.getString("foo"));
+    assertEquals("value of bar in t2 instance i1", "p1 default bar",
+                 t2i1.getString("bar"));
+  }
+
+  /**
+   * Should throw if missing config files
+   */
+  @Test public void testMissingFiles() {
+    try {
+      MetricsConfig.create("JobTracker");
+    }
+    catch (MetricsConfigException e) {
+      assertTrue("expected the 'cannot locate configuration' exception",
+                 e.getMessage().startsWith("Cannot locate configuration"));
+      return;
+    }
+    fail("should've thrown");
+  }
+
+  /**
+   * Test the config file load order
+   * @throws Exception
+   */
+  @Test public void testLoadFirst() throws Exception {
+    String filename = getTestFilename("hadoop-metrics2-p1");
+    new ConfigBuilder().add("p1.foo", "p1foo").save(filename);
+
+    MetricsConfig mc = MetricsConfig.create("p1");
+    MetricsConfig mc2 = MetricsConfig.create("p1", "na1", "na2", filename);
+    Configuration expected = new ConfigBuilder().add("foo", "p1foo").config;
+
+    assertEq(expected, mc);
+    assertEq(expected, mc2);
+  }
+
+  /**
+   * Return a test filename in the class path
+   * @param basename
+   * @return the filename
+   */
+  public static String getTestFilename(String basename) {
+    return "build/classes/"+ basename +".properties";
+  }
+
+}



Mime
View raw message