hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject hbase git commit: HBASE-15405 Fix PE logging and wrong defaults in help message. (Apekshit)
Date Thu, 14 Apr 2016 16:01:13 GMT
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 520c815ab -> 6d2dc2a8b


HBASE-15405 Fix PE logging and wrong defaults in help message. (Apekshit)

Change-Id: I81f5f57e1db18df252fa92c0cc14bf950ab17034

Signed-off-by: stack <stack@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6d2dc2a8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6d2dc2a8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6d2dc2a8

Branch: refs/heads/branch-1.3
Commit: 6d2dc2a8bd9f9684143a1663ccc7470b2add4643
Parents: 520c815
Author: Apekshit <apeksharma@gmail.com>
Authored: Wed Apr 13 12:27:21 2016 -0700
Committer: stack <stack@apache.org>
Committed: Thu Apr 14 09:01:05 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/util/YammerHistogramUtils.java |  19 +++-
 .../hadoop/hbase/PerformanceEvaluation.java     | 112 ++++++++-----------
 2 files changed, 65 insertions(+), 66 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/6d2dc2a8/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
index 120f170..979467a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
@@ -65,7 +65,7 @@ public final class YammerHistogramUtils {
   /** @return a summary of {@code hist}. */
   public static String getHistogramReport(final Histogram hist) {
     Snapshot sn = hist.getSnapshot();
-    return ", mean=" + DOUBLE_FORMAT.format(hist.mean()) +
+    return "mean=" + DOUBLE_FORMAT.format(hist.mean()) +
         ", min=" + DOUBLE_FORMAT.format(hist.min()) +
         ", max=" + DOUBLE_FORMAT.format(hist.max()) +
         ", stdDev=" + DOUBLE_FORMAT.format(hist.stdDev()) +
@@ -77,4 +77,21 @@ public final class YammerHistogramUtils {
         ", 99.99th=" + DOUBLE_FORMAT.format(sn.getValue(0.9999)) +
         ", 99.999th=" + DOUBLE_FORMAT.format(sn.getValue(0.99999));
   }
+
+  /** @return pretty summary of {@code hist}. */
+  public static String getPrettyHistogramReport(final Histogram h) {
+    Snapshot sn = h.getSnapshot();
+    return
+        "Mean      = " + DOUBLE_FORMAT.format(h.mean()) + "\n" +
+        "Min       = " + DOUBLE_FORMAT.format(h.min()) + "\n" +
+        "Max       = " + DOUBLE_FORMAT.format(h.max()) + "\n" +
+        "StdDev    = " + DOUBLE_FORMAT.format(h.stdDev()) + "\n" +
+        "50th      = " + DOUBLE_FORMAT.format(sn.getMedian()) + "\n" +
+        "75th      = " + DOUBLE_FORMAT.format(sn.get75thPercentile()) + "\n" +
+        "95th      = " + DOUBLE_FORMAT.format(sn.get95thPercentile()) + "\n" +
+        "99th      = " + DOUBLE_FORMAT.format(sn.get99thPercentile()) + "\n" +
+        "99.9th    = " + DOUBLE_FORMAT.format(sn.get999thPercentile()) + "\n" +
+        "99.99th   = " + DOUBLE_FORMAT.format(sn.getValue(0.9999)) + "\n" +
+        "99.999th  = " + DOUBLE_FORMAT.format(sn.getValue(0.99999));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d2dc2a8/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 05d788e..a017d33 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -957,8 +957,8 @@ public class PerformanceEvaluation extends Configured implements Tool
{
     protected Connection connection;
 
     private String testName;
-    private Histogram latency;
-    private Histogram valueSize;
+    private Histogram latencyHistogram;
+    private Histogram valueSizeHistogram;
     private RandomDistribution.Zipf zipf;
 
     /**
@@ -1009,7 +1009,7 @@ public class PerformanceEvaluation extends Configured implements Tool
{
 
     void updateValueSize(final int valueSize) {
       if (!isRandomValueSize()) return;
-      this.valueSize.update(valueSize);
+      this.valueSizeHistogram.update(valueSize);
     }
 
     String generateStatus(final int sr, final int i, final int lr) {
@@ -1028,8 +1028,8 @@ public class PerformanceEvaluation extends Configured implements Tool
{
     /**
      * Populated by testTakedown. Only implemented by RandomReadTest at the moment.
      */
-    public Histogram getLatency() {
-      return latency;
+    public Histogram getLatencyHistogram() {
+      return latencyHistogram;
     }
 
     void testSetup() throws IOException {
@@ -1037,16 +1037,28 @@ public class PerformanceEvaluation extends Configured implements Tool
{
         this.connection = ConnectionFactory.createConnection(conf);
       }
       onStartup();
-      latency = YammerHistogramUtils.newHistogram(new UniformSample(1024 * 500));
-      valueSize = YammerHistogramUtils.newHistogram(new UniformSample(1024 * 500));
+      latencyHistogram = YammerHistogramUtils.newHistogram(new UniformSample(1024 * 500));
+      valueSizeHistogram = YammerHistogramUtils.newHistogram(new UniformSample(1024 * 500));
     }
 
     abstract void onStartup() throws IOException;
 
     void testTakedown() throws IOException {
-      reportLatency();
-      reportValueSize();
       onTakedown();
+      // Print all stats for this thread continuously.
+      // Synchronize on Test.class so different threads don't intermingle the
+      // output. We can't use 'this' here because each thread has its own instance of Test
class.
+      synchronized (Test.class) {
+        status.setStatus("Test : " + testName + ", Thread : " + Thread.currentThread().getName());
+        status.setStatus("Latency (us) : " + YammerHistogramUtils.getHistogramReport(
+            latencyHistogram));
+        status.setStatus("Num measures (latency) : " + latencyHistogram.count());
+        status.setStatus(YammerHistogramUtils.getPrettyHistogramReport(latencyHistogram));
+        status.setStatus("ValueSize (bytes) : "
+            + YammerHistogramUtils.getHistogramReport(valueSizeHistogram));
+        status.setStatus("Num measures (ValueSize): " + valueSizeHistogram.count());
+        status.setStatus(YammerHistogramUtils.getPrettyHistogramReport(valueSizeHistogram));
+      }
       if (!opts.oneCon) {
         connection.close();
       }
@@ -1096,7 +1108,7 @@ public class PerformanceEvaluation extends Configured implements Tool
{
         } finally {
           scope.close();
         }
-        latency.update((System.nanoTime() - startTime) / 1000);
+        latencyHistogram.update((System.nanoTime() - startTime) / 1000);
         if (status != null && i > 0 && (i % getReportingPeriod()) == 0)
{
           status.setStatus(generateStatus(startRow, i, lastRow));
         }
@@ -1104,48 +1116,17 @@ public class PerformanceEvaluation extends Configured implements Tool
{
     }
 
     /**
-     * report percentiles of latency
-     * @throws IOException
-     */
-    private void reportLatency() throws IOException {
-      status.setStatus(testName + " latency log (microseconds), on " +
-          latency.count() + " measures");
-      reportHistogram(this.latency);
-    }
-
-    private void reportValueSize() throws IOException {
-      status.setStatus(testName + " valueSize after " +
-          valueSize.count() + " measures");
-      reportHistogram(this.valueSize);
-    }
-
-    private void reportHistogram(final Histogram h) throws IOException {
-      Snapshot sn = h.getSnapshot();
-      status.setStatus(testName + " Min      = " + h.min());
-      status.setStatus(testName + " Avg      = " + h.mean());
-      status.setStatus(testName + " StdDev   = " + h.stdDev());
-      status.setStatus(testName + " 50th     = " + sn.getMedian());
-      status.setStatus(testName + " 75th     = " + sn.get75thPercentile());
-      status.setStatus(testName + " 95th     = " + sn.get95thPercentile());
-      status.setStatus(testName + " 99th     = " + sn.get99thPercentile());
-      status.setStatus(testName + " 99.9th   = " + sn.get999thPercentile());
-      status.setStatus(testName + " 99.99th  = " + sn.getValue(0.9999));
-      status.setStatus(testName + " 99.999th = " + sn.getValue(0.99999));
-      status.setStatus(testName + " Max      = " + h.max());
-    }
-
-    /**
      * @return Subset of the histograms' calculation.
      */
     public String getShortLatencyReport() {
-      return YammerHistogramUtils.getShortHistogramReport(this.latency);
+      return YammerHistogramUtils.getShortHistogramReport(this.latencyHistogram);
     }
 
     /**
      * @return Subset of the histograms' calculation.
      */
     public String getShortValueSizeReport() {
-      return YammerHistogramUtils.getShortHistogramReport(this.valueSize);
+      return YammerHistogramUtils.getShortHistogramReport(this.valueSizeHistogram);
     }
 
     /*
@@ -1750,7 +1731,7 @@ public class PerformanceEvaluation extends Configured implements Tool
{
       " (" + calculateMbps((int)(opts.perClientRunRows * opts.sampleRate), totalElapsedTime,
           getAverageValueLength(opts), opts.columns) + ")");
 
-    return new RunResult(totalElapsedTime, t.getLatency());
+    return new RunResult(totalElapsedTime, t.getLatencyHistogram());
   }
 
   private static int getAverageValueLength(final TestOptions opts) {
@@ -1795,46 +1776,48 @@ public class PerformanceEvaluation extends Configured implements Tool
{
     System.err.println();
     System.err.println("Options:");
     System.err.println(" nomapred        Run multiple clients using threads " +
-      "(rather than use mapreduce)");
-    System.err.println(" rows            Rows each client runs. Default: One million");
+        "(rather than use mapreduce)");
+    System.err.println(" rows            Rows each client runs. Default: " +
+        DEFAULT_OPTS.getPerClientRunRows());
     System.err.println(" size            Total size in GiB. Mutually exclusive with --rows.
" +
-      "Default: 1.0.");
+        "Default: 1.0.");
     System.err.println(" sampleRate      Execute test on a sample of total " +
-      "rows. Only supported by randomRead. Default: 1.0");
+        "rows. Only supported by randomRead. Default: 1.0");
     System.err.println(" traceRate       Enable HTrace spans. Initiate tracing every N rows.
" +
-      "Default: 0");
+        "Default: 0");
     System.err.println(" table           Alternate table name. Default: 'TestTable'");
     System.err.println(" multiGet        If >0, when doing RandomRead, perform multiple
gets " +
-      "instead of single gets. Default: 0");
+        "instead of single gets. Default: 0");
     System.err.println(" compress        Compression type to use (GZ, LZO, ...). Default:
'NONE'");
     System.err.println(" flushCommits    Used to determine if the test should flush the table.
" +
-      "Default: false");
+        "Default: false");
     System.err.println(" writeToWAL      Set writeToWAL on puts. Default: True");
     System.err.println(" autoFlush       Set autoFlush on htable. Default: False");
     System.err.println(" oneCon          all the threads share the same connection. Default:
False");
     System.err.println(" presplit        Create presplit table. Recommended for accurate
perf " +
-      "analysis (see guide).  Default: disabled");
+        "analysis (see guide).  Default: disabled");
     System.err.println(" inmemory        Tries to keep the HFiles of the CF " +
-      "inmemory as far as possible. Not guaranteed that reads are always served " +
-      "from memory.  Default: false");
+        "inmemory as far as possible. Not guaranteed that reads are always served " +
+        "from memory.  Default: false");
     System.err.println(" usetags         Writes tags along with KVs. Use with HFile V3. "
+
-      "Default: false");
+        "Default: false");
     System.err.println(" numoftags       Specify the no of tags that would be needed. " +
-       "This works only if usetags is true.");
-    System.err.println(" filterAll       Helps to filter out all the rows on the server side"
-        + " there by not returning any thing back to the client.  Helps to check the server
side"
-        + " performance.  Uses FilterAllFilter internally. ");
+        "This works only if usetags is true.");
+    System.err.println(" filterAll       Helps to filter out all the rows on the server side"
+
+        " there by not returning any thing back to the client.  Helps to check the server
side" +
+        " performance.  Uses FilterAllFilter internally. ");
     System.err.println(" latency         Set to report operation latencies. Default: False");
     System.err.println(" bloomFilter      Bloom filter type, one of " + Arrays.toString(BloomType.values()));
-    System.err.println(" valueSize       Pass value size to use: Default: 1024");
+    System.err.println(" valueSize       Pass value size to use: Default: " +
+        DEFAULT_OPTS.getValueSize());
     System.err.println(" valueRandom     Set if we should vary value size between 0 and "
+
         "'valueSize'; set on read for stats on size: Default: Not set.");
     System.err.println(" valueZipf       Set if we should vary value size between 0 and "
+
         "'valueSize' in zipf form: Default: Not set.");
     System.err.println(" period          Report every 'period' rows: " +
-      "Default: opts.perClientRunRows / 10");
+        "Default: opts.perClientRunRows / 10 = " + DEFAULT_OPTS.getPerClientRunRows()/10);
     System.err.println(" multiGet        Batch gets together into groups of N. Only supported
" +
-      "by randomRead. Default: disabled");
+        "by randomRead. Default: disabled");
     System.err.println(" addColumns      Adds columns to scans/gets explicitly. Default:
true");
     System.err.println(" replicas        Enable region replica testing. Defaults: 1.");
     System.err.println(" splitPolicy     Specify a custom RegionSplitPolicy for the table.");
@@ -1853,9 +1836,8 @@ public class PerformanceEvaluation extends Configured implements Tool
{
     }
     System.err.println();
     System.err.println("Args:");
-    System.err.println(" nclients        Integer. Required. Total number of " +
-      "clients (and HRegionServers)");
-    System.err.println("                 running: 1 <= value <= 500");
+    System.err.println(" nclients        Integer. Required. Total number of clients "
+        + "(and HRegionServers) running. 1 <= value <= 500");
     System.err.println("Examples:");
     System.err.println(" To run a single client doing the default 1M sequentialWrites:");
     System.err.println(" $ bin/hbase " + className + " sequentialWrite 1");


Mime
View raw message