hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ndimi...@apache.org
Subject git commit: HBASE-12335 IntegrationTestRegionReplicaPerf is flaky
Date Sat, 01 Nov 2014 00:18:26 GMT
Repository: hbase
Updated Branches:
  refs/heads/branch-1 4649646fc -> 4a803e79e


HBASE-12335 IntegrationTestRegionReplicaPerf is flaky


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4a803e79
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4a803e79
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4a803e79

Branch: refs/heads/branch-1
Commit: 4a803e79e1c742ff877a3abc8cbe2939870707cf
Parents: 4649646
Author: Nick Dimiduk <ndimiduk@apache.org>
Authored: Fri Oct 31 17:17:05 2014 -0700
Committer: Nick Dimiduk <ndimiduk@apache.org>
Committed: Fri Oct 31 17:17:05 2014 -0700

----------------------------------------------------------------------
 .../hbase/IntegrationTestRegionReplicaPerf.java | 125 +++++++++++++------
 .../hadoop/hbase/util/AbstractHBaseTool.java    |   2 +-
 .../hadoop/hbase/util/YammerHistogramUtils.java |  80 ++++++++++++
 .../hadoop/hbase/PerformanceEvaluation.java     | 114 +++++++++--------
 4 files changed, 223 insertions(+), 98 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/4a803e79/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java
b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java
index 4fba36c..6dad1d1 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java
@@ -19,33 +19,31 @@
 package org.apache.hadoop.hbase;
 
 import com.google.common.base.Objects;
+import com.yammer.metrics.core.Histogram;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.math.stat.descriptive.DescriptiveStatistics;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.chaos.actions.MoveRandomRegionOfTableAction;
-import org.apache.hadoop.hbase.chaos.actions.RestartRsHoldingTableAction;
+import org.apache.hadoop.hbase.chaos.actions.RestartRandomRsExceptMetaAction;
 import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
 import org.apache.hadoop.hbase.chaos.policies.PeriodicRandomActionPolicy;
 import org.apache.hadoop.hbase.chaos.policies.Policy;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
+import org.apache.hadoop.hbase.util.YammerHistogramUtils;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.experimental.categories.Category;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.LinkedList;
-import java.util.Queue;
-import java.util.Set;
+import java.util.*;
 import java.util.concurrent.Callable;
 
 import static java.lang.String.format;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -72,6 +70,24 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase
{
   private static final String NUM_RS_KEY = "numRs";
   private static final String NUM_RS_DEFAULT = "" + 3;
 
+  /** Extract a descriptive statistic from a {@link com.yammer.metrics.core.Histogram}. */
+  private enum Stat {
+    STDEV {
+      @Override
+      double apply(Histogram hist) {
+        return hist.stdDev();
+      }
+    },
+    FOUR_9S {
+      @Override
+      double apply(Histogram hist) {
+        return hist.getSnapshot().getValue(0.9999);
+      }
+    };
+
+    abstract double apply(Histogram hist);
+  }
+
   private TableName tableName;
   private long sleepTime;
   private int replicaCount;
@@ -96,17 +112,21 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase
{
     public TimingResult call() throws Exception {
       PerformanceEvaluation.TestOptions opts = PerformanceEvaluation.parseOpts(argv);
       PerformanceEvaluation.checkTable(admin, opts);
+      PerformanceEvaluation.RunResult results[] = null;
       long numRows = opts.totalRows;
-      long elapsedTime;
+      long elapsedTime = 0;
       if (opts.nomapred) {
-        elapsedTime = PerformanceEvaluation.doLocalClients(opts, admin.getConfiguration());
+        results = PerformanceEvaluation.doLocalClients(opts, admin.getConfiguration());
+        for (PerformanceEvaluation.RunResult r : results) {
+          elapsedTime = Math.max(elapsedTime, r.duration);
+        }
       } else {
         Job job = PerformanceEvaluation.doMapReduce(opts, admin.getConfiguration());
         Counters counters = job.getCounters();
         numRows = counters.findCounter(PerformanceEvaluation.Counter.ROWS).getValue();
         elapsedTime = counters.findCounter(PerformanceEvaluation.Counter.ELAPSED_TIME).getValue();
       }
-      return new TimingResult(numRows, elapsedTime);
+      return new TimingResult(numRows, elapsedTime, results);
     }
   }
 
@@ -114,12 +134,14 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase
{
    * Record the results from a single {@link PerformanceEvaluation} job run.
    */
   static class TimingResult {
-    public long numRows;
-    public long elapsedTime;
+    public final long numRows;
+    public final long elapsedTime;
+    public final PerformanceEvaluation.RunResult results[];
 
-    public TimingResult(long numRows, long elapsedTime) {
+    public TimingResult(long numRows, long elapsedTime, PerformanceEvaluation.RunResult results[])
{
       this.numRows = numRows;
       this.elapsedTime = elapsedTime;
+      this.results = results;
     }
 
     @Override
@@ -161,7 +183,7 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase
{
   @Override
   public void setUpMonkey() throws Exception {
     Policy p = new PeriodicRandomActionPolicy(sleepTime,
-      new RestartRsHoldingTableAction(sleepTime, tableName.getNameAsString()),
+      new RestartRandomRsExceptMetaAction(sleepTime),
       new MoveRandomRegionOfTableAction(tableName));
     this.monkey = new PolicyBasedChaosMonkey(util, p);
     // don't start monkey right away
@@ -178,7 +200,7 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase
{
     addOptWithArg(PRIMARY_TIMEOUT_KEY, "Overrides hbase.client.primaryCallTimeout. Default:
"
       + PRIMARY_TIMEOUT_DEFAULT + " (10ms)");
     addOptWithArg(NUM_RS_KEY, "Specify the number of RegionServers to use. Default: "
-      + NUM_RS_DEFAULT);
+        + NUM_RS_DEFAULT);
   }
 
   @Override
@@ -214,6 +236,22 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase
{
     return null;
   }
 
+  /** Compute the mean of the given {@code stat} from a timing results. */
+  private static double calcMean(String desc, Stat stat, List<TimingResult> results)
{
+    double sum = 0;
+    int count = 0;
+
+    for (TimingResult tr : results) {
+      for (PerformanceEvaluation.RunResult r : tr.results) {
+        assertNotNull("One of the run results is missing detailed run data.", r.hist);
+        sum += stat.apply(r.hist);
+        count += 1;
+        LOG.debug(desc + "{" + YammerHistogramUtils.getHistogramReport(r.hist) + "}");
+      }
+    }
+    return sum / count;
+  }
+
   public void test() throws Exception {
     int maxIters = 3;
     String replicas = "--replicas=" + replicaCount;
@@ -225,8 +263,8 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase
{
       format("--nomapred --table=%s --latency --sampleRate=0.1 randomRead 4", tableName);
     String replicaReadOpts = format("%s %s", replicas, readOpts);
 
-    ArrayList<TimingResult> resultsWithoutReplica = new ArrayList<TimingResult>(maxIters);
-    ArrayList<TimingResult> resultsWithReplica = new ArrayList<TimingResult>(maxIters);
+    ArrayList<TimingResult> resultsWithoutReplicas = new ArrayList<TimingResult>(maxIters);
+    ArrayList<TimingResult> resultsWithReplicas = new ArrayList<TimingResult>(maxIters);
 
     // create/populate the table, replicas disabled
     LOG.debug("Populating table.");
@@ -234,14 +272,14 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase
{
 
     // one last sanity check, then send in the clowns!
     assertEquals("Table must be created with DisabledRegionSplitPolicy. Broken test.",
-      DisabledRegionSplitPolicy.class.getName(),
-      util.getHBaseAdmin().getTableDescriptor(tableName).getRegionSplitPolicyClassName());
+        DisabledRegionSplitPolicy.class.getName(),
+        util.getHBaseAdmin().getTableDescriptor(tableName).getRegionSplitPolicyClassName());
     startMonkey();
 
     // collect a baseline without region replicas.
     for (int i = 0; i < maxIters; i++) {
       LOG.debug("Launching non-replica job " + (i + 1) + "/" + maxIters);
-      resultsWithoutReplica.add(new PerfEvalCallable(util.getHBaseAdmin(), readOpts).call());
+      resultsWithoutReplicas.add(new PerfEvalCallable(util.getHBaseAdmin(), readOpts).call());
       // TODO: sleep to let cluster stabilize, though monkey continues. is it necessary?
       Thread.sleep(5000l);
     }
@@ -256,32 +294,41 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase
{
     // run test with region replicas.
     for (int i = 0; i < maxIters; i++) {
       LOG.debug("Launching replica job " + (i + 1) + "/" + maxIters);
-      resultsWithReplica.add(new PerfEvalCallable(util.getHBaseAdmin(), replicaReadOpts).call());
+      resultsWithReplicas.add(new PerfEvalCallable(util.getHBaseAdmin(), replicaReadOpts).call());
       // TODO: sleep to let cluster stabilize, though monkey continues. is it necessary?
       Thread.sleep(5000l);
     }
 
-    DescriptiveStatistics withoutReplicaStats = new DescriptiveStatistics();
-    for (TimingResult tr : resultsWithoutReplica) {
-      withoutReplicaStats.addValue(tr.elapsedTime);
-    }
-    DescriptiveStatistics withReplicaStats = new DescriptiveStatistics();
-    for (TimingResult tr : resultsWithReplica) {
-      withReplicaStats.addValue(tr.elapsedTime);
-    }
-
-    LOG.info(Objects.toStringHelper("testName")
-      .add("withoutReplicas", resultsWithoutReplica)
-      .add("withReplicas", resultsWithReplica)
-      .add("withoutReplicasMean", withoutReplicaStats.getMean())
-      .add("withReplicasMean", withReplicaStats.getMean())
+    // compare the average of the stdev and 99.99pct across runs to determine if region replicas
+    // are having an overall improvement on response variance experienced by clients.
+    double withoutReplicasStdevMean =
+        calcMean("withoutReplicas", Stat.STDEV, resultsWithoutReplicas);
+    double withoutReplicas9999Mean =
+        calcMean("withoutReplicas", Stat.FOUR_9S, resultsWithoutReplicas);
+    double withReplicasStdevMean =
+        calcMean("withReplicas", Stat.STDEV, resultsWithReplicas);
+    double withReplicas9999Mean =
+        calcMean("withReplicas", Stat.FOUR_9S, resultsWithReplicas);
+
+    LOG.info(Objects.toStringHelper(this)
+      .add("withoutReplicas", resultsWithoutReplicas)
+      .add("withReplicas", resultsWithReplicas)
+      .add("withoutReplicasStdevMean", withoutReplicasStdevMean)
+      .add("withoutReplicas99.99Mean", withoutReplicas9999Mean)
+      .add("withReplicasStdevMean", withReplicasStdevMean)
+      .add("withReplicas99.99Mean", withReplicas9999Mean)
       .toString());
 
     assertTrue(
-      "Running with region replicas under chaos should be as fast or faster than without.
"
-      + "withReplicas.mean: " + withReplicaStats.getMean() + "ms "
-      + "withoutReplicas.mean: " + withoutReplicaStats.getMean() + "ms.",
-      withReplicaStats.getMean() <= withoutReplicaStats.getMean());
+      "Running with region replicas under chaos should have less request variance than without.
"
+      + "withReplicas.stdev.mean: " + withReplicasStdevMean + "ms "
+      + "withoutReplicas.stdev.mean: " + withoutReplicasStdevMean + "ms.",
+      withReplicasStdevMean <= withoutReplicasStdevMean);
+    assertTrue(
+        "Running with region replicas under chaos should improve 99.99pct latency. "
+            + "withReplicas.99.99.mean: " + withReplicas9999Mean + "ms "
+            + "withoutReplicas.99.99.mean: " + withoutReplicas9999Mean + "ms.",
+        withReplicas9999Mean <= withoutReplicas9999Mean);
   }
 
   public static void main(String[] args) throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/4a803e79/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
index 96311d7..1323cdf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
@@ -94,7 +94,7 @@ public abstract class AbstractHBaseTool implements Tool {
       cmd = parseArgs(args);
       cmdLineArgs = args;
     } catch (ParseException e) {
-      LOG.error("Error when parsing command-line arguemnts", e);
+      LOG.error("Error when parsing command-line arguments", e);
       printUsage();
       return EXIT_FAILURE;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4a803e79/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
new file mode 100644
index 0000000..120f170
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
@@ -0,0 +1,80 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import com.yammer.metrics.core.Histogram;
+import com.yammer.metrics.stats.Sample;
+import com.yammer.metrics.stats.Snapshot;
+
+import java.lang.reflect.Constructor;
+import java.text.DecimalFormat;
+
+/** Utility functions for working with Yammer Metrics. */
+public final class YammerHistogramUtils {
+
+  // not for public consumption
+  private YammerHistogramUtils() {}
+
+  /**
+   * Used formatting doubles so only two places after decimal point.
+   */
+  private static DecimalFormat DOUBLE_FORMAT = new DecimalFormat("#0.00");
+
+  /**
+   * Create a new {@link com.yammer.metrics.core.Histogram} instance. These constructors
are
+   * not public in 2.2.0, so we use reflection to find them.
+   */
+  public static Histogram newHistogram(Sample sample) {
+    try {
+      Constructor<?> ctor =
+          Histogram.class.getDeclaredConstructor(Sample.class);
+      ctor.setAccessible(true);
+      return (Histogram) ctor.newInstance(sample);
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /** @return an abbreviated summary of {@code hist}. */
+  public static String getShortHistogramReport(final Histogram hist) {
+    Snapshot sn = hist.getSnapshot();
+    return "mean=" + DOUBLE_FORMAT.format(hist.mean()) +
+        ", min=" + DOUBLE_FORMAT.format(hist.min()) +
+        ", max=" + DOUBLE_FORMAT.format(hist.max()) +
+        ", stdDev=" + DOUBLE_FORMAT.format(hist.stdDev()) +
+        ", 95th=" + DOUBLE_FORMAT.format(sn.get95thPercentile()) +
+        ", 99th=" + DOUBLE_FORMAT.format(sn.get99thPercentile());
+  }
+
+  /** @return a summary of {@code hist}. */
+  public static String getHistogramReport(final Histogram hist) {
+    Snapshot sn = hist.getSnapshot();
+    return ", mean=" + DOUBLE_FORMAT.format(hist.mean()) +
+        ", min=" + DOUBLE_FORMAT.format(hist.min()) +
+        ", max=" + DOUBLE_FORMAT.format(hist.max()) +
+        ", stdDev=" + DOUBLE_FORMAT.format(hist.stdDev()) +
+        ", 50th=" + DOUBLE_FORMAT.format(sn.getMedian()) +
+        ", 75th=" + DOUBLE_FORMAT.format(sn.get75thPercentile()) +
+        ", 95th=" + DOUBLE_FORMAT.format(sn.get95thPercentile()) +
+        ", 99th=" + DOUBLE_FORMAT.format(sn.get99thPercentile()) +
+        ", 99.9th=" + DOUBLE_FORMAT.format(sn.get999thPercentile()) +
+        ", 99.99th=" + DOUBLE_FORMAT.format(sn.getValue(0.9999)) +
+        ", 99.999th=" + DOUBLE_FORMAT.format(sn.getValue(0.99999));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4a803e79/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 28324d9..86c6110 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -77,10 +77,7 @@ import org.apache.hadoop.hbase.io.hfile.RandomDistribution;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.trace.SpanReceiverHost;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Hash;
-import org.apache.hadoop.hbase.util.MurmurHash;
-import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.*;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
@@ -181,6 +178,25 @@ public class PerformanceEvaluation extends Configured implements Tool
{
     ROWS
   }
 
+  protected static class RunResult implements Comparable<RunResult> {
+    public RunResult(long duration, Histogram hist) {
+      this.duration = duration;
+      this.hist = hist;
+    }
+
+    public final long duration;
+    public final Histogram hist;
+
+    @Override
+    public String toString() {
+      return Long.toString(duration);
+    }
+
+    @Override public int compareTo(RunResult o) {
+      return Long.compare(this.duration, o.duration);
+    }
+  }
+
   /**
    * Constructor
    * @param conf Configuration object
@@ -260,12 +276,12 @@ public class PerformanceEvaluation extends Configured implements Tool
{
       final Connection con = ConnectionFactory.createConnection(conf);
 
       // Evaluation task
-      long elapsedTime = runOneClient(this.cmd, conf, con, opts, status);
+      RunResult result = PerformanceEvaluation.runOneClient(this.cmd, conf, con, opts, status);
       // Collect how much time the thing took. Report as map output and
       // to the ELAPSED_TIME counter.
-      context.getCounter(Counter.ELAPSED_TIME).increment(elapsedTime);
+      context.getCounter(Counter.ELAPSED_TIME).increment(result.duration);
       context.getCounter(Counter.ROWS).increment(opts.perClientRunRows);
-      context.write(new LongWritable(opts.startRow), new LongWritable(elapsedTime));
+      context.write(new LongWritable(opts.startRow), new LongWritable(result.duration));
       context.progress();
     }
   }
@@ -370,34 +386,32 @@ public class PerformanceEvaluation extends Configured implements Tool
{
 
   /*
    * Run all clients in this vm each to its own thread.
-   * @param cmd Command to run.
-   * @throws IOException
    */
-  static long doLocalClients(final TestOptions opts, final Configuration conf)
+  static RunResult[] doLocalClients(final TestOptions opts, final Configuration conf)
       throws IOException, InterruptedException {
     final Class<? extends Test> cmd = determineCommandClass(opts.cmdName);
     assert cmd != null;
-    Future<Long>[] threads = new Future[opts.numClientThreads];
-    long[] timings = new long[opts.numClientThreads];
+    Future<RunResult>[] threads = new Future[opts.numClientThreads];
+    RunResult[] results = new RunResult[opts.numClientThreads];
     ExecutorService pool = Executors.newFixedThreadPool(opts.numClientThreads,
       new ThreadFactoryBuilder().setNameFormat("TestClient-%s").build());
     final Connection con = ConnectionFactory.createConnection(conf);
     for (int i = 0; i < threads.length; i++) {
       final int index = i;
-      threads[i] = pool.submit(new Callable<Long>() {
+      threads[i] = pool.submit(new Callable<RunResult>() {
         @Override
-        public Long call() throws Exception {
+        public RunResult call() throws Exception {
           TestOptions threadOpts = new TestOptions(opts);
           if (threadOpts.startRow == 0) threadOpts.startRow = index * threadOpts.perClientRunRows;
-          long elapsedTime = runOneClient(cmd, conf, con, threadOpts, new Status() {
+          RunResult run = runOneClient(cmd, conf, con, threadOpts, new Status() {
             @Override
             public void setStatus(final String msg) throws IOException {
               LOG.info(msg);
             }
           });
-          LOG.info("Finished in " + elapsedTime +
+          LOG.info("Finished " + Thread.currentThread().getName() + " in " + run.duration
+
             "ms over " + threadOpts.perClientRunRows + " rows");
-          return elapsedTime;
+          return run;
         }
       });
     }
@@ -405,27 +419,27 @@ public class PerformanceEvaluation extends Configured implements Tool
{
 
     for (int i = 0; i < threads.length; i++) {
       try {
-        timings[i] = threads[i].get();
+        results[i] = threads[i].get();
       } catch (ExecutionException e) {
         throw new IOException(e.getCause());
       }
     }
     final String test = cmd.getSimpleName();
     LOG.info("[" + test + "] Summary of timings (ms): "
-             + Arrays.toString(timings));
-    Arrays.sort(timings);
+             + Arrays.toString(results));
+    Arrays.sort(results);
     long total = 0;
-    for (long timing : timings) {
-      total += timing;
+    for (RunResult result : results) {
+      total += result.duration;
     }
     LOG.info("[" + test + "]"
-      + "\tMin: " + timings[0] + "ms"
-      + "\tMax: " + timings[timings.length - 1] + "ms"
-      + "\tAvg: " + (total / timings.length) + "ms");
+      + "\tMin: " + results[0] + "ms"
+      + "\tMax: " + results[results.length - 1] + "ms"
+      + "\tAvg: " + (total / results.length) + "ms");
 
     con.close();
 
-    return total;
+    return results;
   }
 
   /*
@@ -962,23 +976,21 @@ public class PerformanceEvaluation extends Configured implements Tool
{
       return opts.period;
     }
 
+    /**
+     * Populated by testTakedown. Only implemented by RandomReadTest at the moment.
+     */
+    public Histogram getLatency() {
+      return latency;
+    }
+
     void testSetup() throws IOException {
       if (!opts.oneCon) {
         this.connection = ConnectionFactory.createConnection(conf);
       }
       this.table = new HTable(TableName.valueOf(opts.tableName), connection);
       this.table.setAutoFlushTo(opts.autoFlush);
-
-      try {
-        Constructor<?> ctor =
-            Histogram.class.getDeclaredConstructor(com.yammer.metrics.stats.Sample.class);
-        ctor.setAccessible(true);
-        latency = (Histogram) ctor.newInstance(new UniformSample(1024 * 500));
-        valueSize = (Histogram) ctor.newInstance(new UniformSample(1024 * 500));
-      } catch (Exception e) {
-        throw new RuntimeException(e);
-      }
-
+      latency = YammerHistogramUtils.newHistogram(new UniformSample(1024 * 500));
+      valueSize = YammerHistogramUtils.newHistogram(new UniformSample(1024 * 500));
     }
 
     void testTakedown() throws IOException {
@@ -1054,6 +1066,7 @@ public class PerformanceEvaluation extends Configured implements Tool
{
       status.setStatus(testName + " Avg      = " + h.mean());
       status.setStatus(testName + " StdDev   = " + h.stdDev());
       status.setStatus(testName + " 50th     = " + sn.getMedian());
+      status.setStatus(testName + " 75th     = " + sn.get75thPercentile());
       status.setStatus(testName + " 95th     = " + sn.get95thPercentile());
       status.setStatus(testName + " 99th     = " + sn.get99thPercentile());
       status.setStatus(testName + " 99.9th   = " + sn.get999thPercentile());
@@ -1063,32 +1076,17 @@ public class PerformanceEvaluation extends Configured implements Tool
{
     }
 
     /**
-     * Used formating doubles so only two places after decimal point.
-     */
-    private static DecimalFormat DOUBLE_FORMAT = new DecimalFormat("#0.00");
-
-    /**
      * @return Subset of the histograms' calculation.
      */
-    private String getShortLatencyReport() {
-      return getShortHistogramReport(this.latency);
+    public String getShortLatencyReport() {
+      return YammerHistogramUtils.getShortHistogramReport(this.latency);
     }
 
     /**
      * @return Subset of the histograms' calculation.
      */
-    private String getShortValueSizeReport() {
-      return getShortHistogramReport(this.valueSize);
-    }
-
-    private String getShortHistogramReport(final Histogram h) {
-      Snapshot sn = h.getSnapshot();
-      return "mean=" + DOUBLE_FORMAT.format(h.mean()) +
-        ", min=" + DOUBLE_FORMAT.format(h.min()) +
-        ", max=" + DOUBLE_FORMAT.format(h.max()) +
-        ", stdDev=" + DOUBLE_FORMAT.format(h.stdDev()) +
-        ", 95th=" + DOUBLE_FORMAT.format(sn.get95thPercentile()) +
-        ", 99th=" + DOUBLE_FORMAT.format(sn.get99thPercentile());
+    public String getShortValueSizeReport() {
+      return YammerHistogramUtils.getShortHistogramReport(this.valueSize);
     }
 
     /*
@@ -1492,7 +1490,7 @@ public class PerformanceEvaluation extends Configured implements Tool
{
     return format(random.nextInt(Integer.MAX_VALUE) % totalRows);
   }
 
-  static long runOneClient(final Class<? extends Test> cmd, Configuration conf, Connection
con,
+  static RunResult runOneClient(final Class<? extends Test> cmd, Configuration conf,
Connection con,
                            TestOptions opts, final Status status)
       throws IOException, InterruptedException {
     status.setStatus("Start " + cmd + " at offset " + opts.startRow + " for " +
@@ -1519,7 +1517,7 @@ public class PerformanceEvaluation extends Configured implements Tool
{
       " (" + calculateMbps((int)(opts.perClientRunRows * opts.sampleRate), totalElapsedTime,
           getAverageValueLength(opts)) + ")");
 
-    return totalElapsedTime;
+    return new RunResult(totalElapsedTime, t.getLatency());
   }
 
   private static int getAverageValueLength(final TestOptions opts) {


Mime
View raw message