hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ndimi...@apache.org
Subject svn commit: r1591001 - /hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
Date Tue, 29 Apr 2014 15:00:18 GMT
Author: ndimiduk
Date: Tue Apr 29 15:00:18 2014
New Revision: 1591001

URL: http://svn.apache.org/r1591001
Log:
HBASE-11086 Add htrace support for PerfEval

Modified:
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java?rev=1591001&r1=1591000&r2=1591001&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
Tue Apr 29 15:00:18 2014
@@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.filter.Wh
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
+import org.apache.hadoop.hbase.trace.SpanReceiverHost;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Hash;
 import org.apache.hadoop.hbase.util.MurmurHash;
@@ -85,6 +86,10 @@ import com.google.common.util.concurrent
 import com.yammer.metrics.core.Histogram;
 import com.yammer.metrics.stats.UniformSample;
 import com.yammer.metrics.stats.Snapshot;
+import org.htrace.Sampler;
+import org.htrace.Trace;
+import org.htrace.TraceScope;
+import org.htrace.impl.ProbabilitySampler;
 
 /**
  * Script used evaluating HBase performance and scalability.  Runs a HBase
@@ -490,6 +495,7 @@ public class PerformanceEvaluation exten
       this.numClientThreads = that.numClientThreads;
       this.totalRows = that.totalRows;
       this.sampleRate = that.sampleRate;
+      this.traceRate = that.traceRate;
       this.tableName = that.tableName;
       this.flushCommits = that.flushCommits;
       this.writeToWAL = that.writeToWAL;
@@ -513,6 +519,7 @@ public class PerformanceEvaluation exten
     public int numClientThreads = 1;
     public int totalRows = ROWS_PER_GB;
     public float sampleRate = 1.0f;
+    public double traceRate = 0.0;
     public String tableName = TABLE_NAME;
     public boolean flushCommits = true;
     public boolean writeToWAL = true;
@@ -521,8 +528,8 @@ public class PerformanceEvaluation exten
     public int noOfTags = 1;
     public boolean reportLatency = false;
     public int multiGet = 0;
-    boolean inMemoryCF = false;
-    int presplitRegions = 0;
+    public boolean inMemoryCF = false;
+    public int presplitRegions = 0;
     public Compression.Algorithm compression = Compression.Algorithm.NONE;
     public DataBlockEncoding blockEncoding = DataBlockEncoding.NONE;
   }
@@ -546,6 +553,8 @@ public class PerformanceEvaluation exten
     protected final TestOptions opts;
 
     private final Status status;
+    private final Sampler<?> traceSampler;
+    private final SpanReceiverHost receiverHost;
     protected HConnection connection;
     protected HTableInterface table;
 
@@ -561,6 +570,14 @@ public class PerformanceEvaluation exten
       this.opts = options;
       this.status = status;
       this.testName = this.getClass().getSimpleName();
+      receiverHost = SpanReceiverHost.getInstance(conf);
+      if (options.traceRate >= 1.0) {
+        this.traceSampler = Sampler.ALWAYS;
+      } else if (options.traceRate > 0.0) {
+        this.traceSampler = new ProbabilitySampler(options.traceRate);
+      } else {
+        this.traceSampler = Sampler.NEVER;
+      }
       everyN = (int) (opts.totalRows / (opts.totalRows * opts.sampleRate));
       LOG.info("Sampling 1 every " + everyN + " out of " + opts.perClientRunRows + " total
rows.");
     }
@@ -597,6 +614,7 @@ public class PerformanceEvaluation exten
       }
       table.close();
       connection.close();
+      receiverHost.closeReceivers();
     }
 
     /*
@@ -625,7 +643,12 @@ public class PerformanceEvaluation exten
       for (int i = opts.startRow; i < lastRow; i++) {
         if (i % everyN != 0) continue;
         long startTime = System.nanoTime();
-        testRow(i);
+        TraceScope scope = Trace.startSpan("test row", traceSampler);
+        try {
+          testRow(i);
+        } finally {
+          scope.close();
+        }
         latency.update((System.nanoTime() - startTime) / 1000);
         if (status != null && i > 0 && (i % getReportingPeriod()) == 0)
{
           status.setStatus(generateStatus(opts.startRow, i, lastRow));
@@ -1111,6 +1134,8 @@ public class PerformanceEvaluation exten
       "Default: 1.0.");
     System.err.println(" sampleRate      Execute test on a sample of total " +
       "rows. Only supported by randomRead. Default: 1.0");
+    System.err.println(" traceRate       Enable HTrace spans. Initiate tracing every N rows.
" +
+      "Default: 0");
     System.err.println(" table           Alternate table name. Default: 'TestTable'");
     System.err.println(" compress        Compression type to use (GZ, LZO, ...). Default:
'NONE'");
     System.err.println(" flushCommits    Used to determine if the test should flush the table.
" +
@@ -1206,6 +1231,12 @@ public class PerformanceEvaluation exten
           continue;
         }
 
+        final String traceRate = "--traceRate=";
+        if (cmd.startsWith(traceRate)) {
+          opts.traceRate = Double.parseDouble(cmd.substring(traceRate.length()));
+          continue;
+        }
+
         final String table = "--table=";
         if (cmd.startsWith(table)) {
           opts.tableName = cmd.substring(table.length());



Mime
View raw message