hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mbau...@apache.org
Subject svn commit: r1356924 [2/2] - in /hbase/branches/0.89-fb/src: main/java/org/apache/hadoop/hbase/client/ main/java/org/apache/hadoop/hbase/io/ main/java/org/apache/hadoop/hbase/ipc/ main/java/org/apache/hadoop/hbase/regionserver/ main/java/org/apache/had...
Date Tue, 03 Jul 2012 20:41:07 GMT
Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java?rev=1356924&r1=1356923&r2=1356924&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java
(original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java
Tue Jul  3 20:41:02 2012
@@ -28,6 +28,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.client.HTableMultiplexer.HTableMultiplexerStatus;
+import org.apache.hadoop.hbase.ipc.HBaseRPCOptions;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -80,7 +81,7 @@ public class TestHTableMultiplexer {
     for (int i = 0; i < NUM_REGIONS; i++) {
       Put put = new Put(startRows[i]);
       put.add(FAMILY, QUALIFIER, VALUE1);
-      success = multiplexer.put(TABLE, put);
+      success = multiplexer.put(TABLE, put, HBaseRPCOptions.DEFAULT);
       Assert.assertTrue(success);
 
       // ensure the buffer has been flushed
@@ -101,7 +102,7 @@ public class TestHTableMultiplexer {
       put.add(FAMILY, QUALIFIER, VALUE2);
       multiput.add(put);
     }
-    failedPuts = multiplexer.put(TABLE, multiput);
+    failedPuts = multiplexer.put(TABLE, multiput, HBaseRPCOptions.DEFAULT);
     Assert.assertTrue(failedPuts == null);
 
     // ensure the buffer has been flushed

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/ipc/TestPerRequestProfiling.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/ipc/TestPerRequestProfiling.java?rev=1356924&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/ipc/TestPerRequestProfiling.java
(added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/ipc/TestPerRequestProfiling.java
Tue Jul  3 20:41:02 2012
@@ -0,0 +1,177 @@
+package org.apache.hadoop.hbase.ipc;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.io.hfile.Compression;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestPerRequestProfiling {
+  final Log LOG = LogFactory.getLog(getClass());
+  private static HBaseTestingUtility TEST_UTIL;
+  private static int SLAVES = 1;
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    // start the cluster
+    Configuration conf = HBaseConfiguration.create();
+    TEST_UTIL = new HBaseTestingUtility(conf);
+    TEST_UTIL.startMiniCluster(SLAVES);
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @Before
+  public void setUp() throws Exception {
+    // Nothing to do.
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @After
+  public void tearDown() throws Exception {
+    // Nothing to do.
+  }
+  
+  @Test
+  public void testPerRequestProfiling() throws Exception {
+    byte[] TABLE = Bytes.toBytes("testPerRequestProfiling");
+    byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") };
+    byte [] QUALIFIER = Bytes.toBytes("testQualifier");
+    byte [] VALUE = Bytes.toBytes("testValue");
+
+    // create a table
+    TEST_UTIL.createTable(TABLE, FAMILIES);
+    LOG.debug("Created table " + new String(TABLE));
+    
+    // open the table
+    Configuration conf = HBaseConfiguration.create();
+    String zkPortStr = TEST_UTIL.getConfiguration().get(
+        "hbase.zookeeper.property.clientPort");
+    conf.setInt("hbase.zookeeper.property.clientPort", 
+        Integer.parseInt(zkPortStr));
+    HTable table = new HTable(conf, TABLE);
+
+    LOG.debug("Testing with profiling off");
+    // put some values with profiling off
+    byte [][] ROWS = { Bytes.toBytes("a"), Bytes.toBytes("b") };
+    for (int i = 0; i < ROWS.length; i++) {
+      Put put = new Put(ROWS[i]);
+      put.add(FAMILIES[0], QUALIFIER, VALUE);
+      table.put(put);
+      // autoflush is on by default, or else move this check after flush
+      assertTrue (table.getProfilingData () == null);
+    }
+    LOG.debug("Wrote some puts to table " + new String(TABLE));
+
+    // flush the table
+    table.flushCommits();
+    LOG.debug("Flushed table " + new String(TABLE));
+
+    // read back the values
+    for (int i = 0; i < ROWS.length; i++) {
+      Get get = new Get(ROWS[i]);
+      get.addColumn(FAMILIES[0], QUALIFIER);
+      Result result = table.get(get);
+      
+      assertEquals(new String(VALUE), 
+    		  new String(result.getValue(FAMILIES[0], QUALIFIER)));
+      
+      assertTrue (table.getProfilingData () == null);
+    }
+    LOG.debug("Read and verified from table " + new String(TABLE));
+    
+    // turn profiling on and repeat test
+    table.setProfiling(true);
+    
+    LOG.debug("Testing with profiling on");
+    for (int i = 0; i < ROWS.length; i++) {
+      Put put = new Put(ROWS[i]);
+      put.add(FAMILIES[0], QUALIFIER, VALUE);
+      table.put(put);
+      // autoflush is on by default, or else move this check after flush
+      assertTrue (table.getProfilingData () != null);
+    }
+    LOG.debug("Wrote some puts to table " + new String(TABLE));
+
+    // flush the table
+    table.flushCommits();
+    LOG.debug("Flushed table " + new String(TABLE));
+
+    // read back the values
+    for (int i = 0; i < ROWS.length; i++) {
+      Get get = new Get(ROWS[i]);
+      get.addColumn(FAMILIES[0], QUALIFIER);
+      Result result = table.get(get);
+      
+      assertEquals(new String(VALUE), 
+          new String(result.getValue(FAMILIES[0], QUALIFIER)));
+      
+      assertTrue (table.getProfilingData () != null);
+    }
+    LOG.debug("Read and verified from table " + new String(TABLE));
+    
+    // turn profiling back off and repeat test to make sure
+    // profiling data gets cleared
+    table.setProfiling(false);
+    
+    LOG.debug("Testing with profiling off");
+    for (int i = 0; i < ROWS.length; i++) {
+      Put put = new Put(ROWS[i]);
+      put.add(FAMILIES[0], QUALIFIER, VALUE);
+      table.put(put);
+      // autoflush is on by default, or else move this check after flush
+      assertTrue (table.getProfilingData () == null);
+    }
+    LOG.debug("Wrote some puts to table " + new String(TABLE));
+
+    // flush the table
+    table.flushCommits();
+    LOG.debug("Flushed table " + new String(TABLE));
+
+    // read back the values
+    for (int i = 0; i < ROWS.length; i++) {
+      Get get = new Get(ROWS[i]);
+      get.addColumn(FAMILIES[0], QUALIFIER);
+      Result result = table.get(get);
+      
+      assertEquals(new String(VALUE), 
+          new String(result.getValue(FAMILIES[0], QUALIFIER)));
+      
+      assertTrue (table.getProfilingData () == null);
+    }
+    LOG.debug("Read and verified from table " + new String(TABLE));
+  }
+}

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java?rev=1356924&r1=1356923&r2=1356924&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java Tue
Jul  3 20:41:02 2012
@@ -95,6 +95,7 @@ public class LoadTestTool extends Abstra
   private static final String OPT_START_KEY = "start_key";
   private static final String OPT_TABLE_NAME = "tn";
   private static final String OPT_ZK_QUORUM = "zk";
+  private static final String OPT_PROFILING = "profiling";
 
   private static final long DEFAULT_START_KEY = 0;
 
@@ -125,6 +126,7 @@ public class LoadTestTool extends Abstra
   private int keyWindow = MultiThreadedReader.DEFAULT_KEY_WINDOW;
   private int maxReadErrors = MultiThreadedReader.DEFAULT_MAX_ERRORS;
   private int verifyPercent;
+  private int profilePercent = 0;
 
   private String[] splitColonSeparated(String option,
       int minNumCols, int maxNumCols) {
@@ -196,6 +198,8 @@ public class LoadTestTool extends Abstra
     addOptWithArg(OPT_START_KEY, "The first key to read/write " +
         "(a 0-based index). The default value is " +
         DEFAULT_START_KEY + ".");
+    addOptWithArg(OPT_PROFILING, "Percent of reads/writes to request " +
+        "profiling data");
   }
 
   @Override
@@ -266,6 +270,14 @@ public class LoadTestTool extends Abstra
       System.out.println("Percent of keys to verify: " + verifyPercent);
       System.out.println("Reader threads: " + numReaderThreads);
     }
+    
+    if (cmd.hasOption(OPT_PROFILING)) {
+      this.profilePercent = parseInt(cmd.getOptionValue(OPT_PROFILING),
+          0, 100);
+      
+      System.out.println ("Requesting profiling data on " + profilePercent +
+          "% of reads/writes");
+    }
 
     System.out.println("Key range: [" + startKey + ".." + (endKey - 1) + "]");
   }
@@ -299,7 +311,8 @@ public class LoadTestTool extends Abstra
     applyColumnFamilyOptions(tableName, COLUMN_FAMILIES);
 
     if (isWrite) {
-      writerThreads = new MultiThreadedWriter(conf, tableName, COLUMN_FAMILY);
+      writerThreads = new MultiThreadedWriter(conf, tableName, COLUMN_FAMILY, 
+          profilePercent);
       writerThreads.setMultiPut(isMultiPut);
       writerThreads.setColumnsPerKey(minColsPerKey, maxColsPerKey);
       writerThreads.setDataSize(minColDataSize, maxColDataSize);
@@ -307,7 +320,7 @@ public class LoadTestTool extends Abstra
 
     if (isRead) {
       readerThreads = new MultiThreadedReader(conf, tableName, COLUMN_FAMILY,
-          verifyPercent);
+          verifyPercent, profilePercent);
       readerThreads.setMaxErrors(maxReadErrors);
       readerThreads.setKeyWindow(keyWindow);
     }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java?rev=1356924&r1=1356923&r2=1356924&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
(original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
Tue Jul  3 20:41:02 2012
@@ -39,6 +39,7 @@ public class MultiThreadedReader extends
 
   private Set<HBaseReaderThread> readers = new HashSet<HBaseReaderThread>();
   private final double verifyPercent;
+  private final double profilePercent;
   private volatile boolean aborted;
 
   private MultiThreadedWriter writer = null;
@@ -73,8 +74,14 @@ public class MultiThreadedReader extends
 
   public MultiThreadedReader(Configuration conf, byte[] tableName,
       byte[] columnFamily, double verifyPercent) {
+    this (conf, tableName, columnFamily, verifyPercent, 0);
+  }
+  
+  public MultiThreadedReader(Configuration conf, byte[] tableName,
+      byte[] columnFamily, double verifyPercent, double profilePercent) {
     super(conf, tableName, columnFamily, "R");
     this.verifyPercent = verifyPercent;
+    this.profilePercent = profilePercent;
   }
 
   public void linkToWriter(MultiThreadedWriter writer) {
@@ -231,7 +238,8 @@ public class MultiThreadedReader extends
           LOG.info("[" + readerId + "] " + "Querying key " + keyToRead
               + ", cf " + Bytes.toStringBinary(columnFamily));
         }
-        queryKey(get, random.nextInt(100) < verifyPercent);
+        queryKey(get, random.nextInt(100) < verifyPercent, 
+            random.nextInt(100) < profilePercent);
       } catch (IOException e) {
         numReadFailures.addAndGet(1);
         LOG.debug("[" + readerId + "] FAILED read, key = " + (keyToRead + "")
@@ -241,11 +249,12 @@ public class MultiThreadedReader extends
       return get;
     }
 
-    public void queryKey(Get get, boolean verify) throws IOException {
+    public void queryKey(Get get, boolean verify, boolean profile) throws IOException {
       String rowKey = Bytes.toString(get.getRow());
 
       // read the data
       long start = System.currentTimeMillis();
+      table.setProfiling (profile);
       Result result = table.get(get);
       totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
       numKeys.addAndGet(1);

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java?rev=1356924&r1=1356923&r2=1356924&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java
(original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java
Tue Jul  3 20:41:02 2012
@@ -43,6 +43,8 @@ public class MultiThreadedWriter extends
   private Set<HBaseWriterThread> writers = new HashSet<HBaseWriterThread>();
 
   private boolean isMultiPut = false;
+  
+  private final double profilePercent;
 
   /**
    * A temporary place to keep track of inserted keys. This is written to by
@@ -76,10 +78,16 @@ public class MultiThreadedWriter extends
 
   /** Enable this if used in conjunction with a concurrent reader. */
   private boolean trackInsertedKeys;
-
+  
   public MultiThreadedWriter(Configuration conf, byte[] tableName,
       byte[] columnFamily) {
+    this (conf, tableName, columnFamily, 0);
+  }
+  
+  public MultiThreadedWriter(Configuration conf, byte[] tableName,
+      byte[] columnFamily, double profilePercent) {
     super(conf, tableName, columnFamily, "W");
+    this.profilePercent = profilePercent;
   }
 
   /** Use multi-puts vs. separate puts for every column in a row */
@@ -141,10 +149,11 @@ public class MultiThreadedWriter extends
               % (maxColumnsPerKey - minColumnsPerKey);
           numKeys.addAndGet(1);
           if (isMultiPut) {
-            multiPutInsertKey(rowKey, 0, numColumns);
+            multiPutInsertKey(rowKey, 0, numColumns,
+                random.nextInt(100) < profilePercent);
           } else {
             for (long col = 0; col < numColumns; ++col) {
-              insert(rowKey, col);
+              insert(rowKey, col, random.nextInt(100) < profilePercent);
             }
           }
           if (trackInsertedKeys) {
@@ -161,11 +170,12 @@ public class MultiThreadedWriter extends
       }
     }
 
-    public void insert(long rowKey, long col) {
+    public void insert(long rowKey, long col, boolean profile) {
       Put put = new Put(longToByteArrayKey(rowKey));
       String colAsStr = String.valueOf(col);
       put.add(columnFamily, Bytes.toBytes(colAsStr),
           dataGenerator.generateRandomSizeValue(rowKey, colAsStr));
+      table.setProfiling(profile);
       try {
         long start = System.currentTimeMillis();
         table.put(put);
@@ -178,7 +188,8 @@ public class MultiThreadedWriter extends
       }
     }
 
-    public void multiPutInsertKey(long rowKey, long startCol, long endCol) {
+    public void multiPutInsertKey(long rowKey, long startCol, long endCol,
+        boolean profile) {
       if (verbose) {
         LOG.debug("Preparing put for key = " + rowKey + ", cols = ["
             + startCol + ", " + endCol + ")");
@@ -198,7 +209,8 @@ public class MultiThreadedWriter extends
         value = dataGenerator.generateRandomSizeValue(rowKey, qualStr);
         put.add(columnFamily, columnQualifier, value);
       }
-
+      table.setProfiling (profile);
+      
       try {
         long start = System.currentTimeMillis();
         table.put(put);



Mime
View raw message