Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 15F6A9C83 for ; Fri, 30 Mar 2012 21:49:37 +0000 (UTC) Received: (qmail 52859 invoked by uid 500); 30 Mar 2012 21:49:36 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 52814 invoked by uid 500); 30 Mar 2012 21:49:36 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 52807 invoked by uid 99); 30 Mar 2012 21:49:36 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 30 Mar 2012 21:49:36 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 30 Mar 2012 21:49:35 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 7A26A2388993 for ; Fri, 30 Mar 2012 21:49:15 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1307631 [2/2] - in /hbase/branches/0.89-fb: ./ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/regionserver/ src/main/java/org/apache/hadoop/hbase/util/ src/main/java/org/apache/hadoop/hbase/zookeeper/ src/test... Date: Fri, 30 Mar 2012 21:49:14 -0000 To: commits@hbase.apache.org From: mbautin@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120330214915.7A26A2388993@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadParallel.java URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadParallel.java?rev=1307631&view=auto ============================================================================== --- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadParallel.java (added) +++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadParallel.java Fri Mar 30 21:49:13 2012 @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * A write/read/verify load test on a mini HBase cluster. Tests reading + * and writing at the same time. + */ +@RunWith(Parameterized.class) +public class TestMiniClusterLoadParallel + extends TestMiniClusterLoadSequential { + + public TestMiniClusterLoadParallel(boolean isMultiPut, + DataBlockEncoding encoding) { + super(isMultiPut, encoding); + } + + @Test(timeout=TIMEOUT_MS) + public void loadTest() throws Exception { + prepareForLoadTest(); + + readerThreads.linkToWriter(writerThreads); + + writerThreads.start(0, numKeys, NUM_THREADS); + readerThreads.start(0, numKeys, NUM_THREADS); + + writerThreads.waitForFinish(); + readerThreads.waitForFinish(); + + assertEquals(0, writerThreads.getNumWriteFailures()); + assertEquals(0, readerThreads.getNumReadFailures()); + assertEquals(0, readerThreads.getNumReadErrors()); + assertEquals(numKeys, readerThreads.getNumUniqueKeysVerified()); + } + +} Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java?rev=1307631&view=auto ============================================================================== --- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java (added) +++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java Fri Mar 30 21:49:13 2012 @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.io.hfile.Compression; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +/** + * A write/read/verify load test on a mini HBase cluster. Tests reading + * and then writing. + */ +@RunWith(Parameterized.class) +public class TestMiniClusterLoadSequential { + + private static final Log LOG = LogFactory.getLog( + TestMiniClusterLoadSequential.class); + + protected static final byte[] TABLE = Bytes.toBytes("load_test_tbl"); + protected static final byte[] CF = Bytes.toBytes("load_test_cf"); + protected static final int NUM_THREADS = 8; + protected static final int NUM_RS = 2; + protected static final int TIMEOUT_MS = 120000; + protected static final HBaseTestingUtility TEST_UTIL = + new HBaseTestingUtility(); + + protected final Configuration conf = TEST_UTIL.getConfiguration(); + protected final boolean isMultiPut; + protected final DataBlockEncoding dataBlockEncoding; + + protected MultiThreadedWriter writerThreads; + protected MultiThreadedReader readerThreads; + protected int numKeys; + + protected Compression.Algorithm compression = Compression.Algorithm.NONE; + + public TestMiniClusterLoadSequential(boolean isMultiPut, + DataBlockEncoding dataBlockEncoding) { + this.isMultiPut = isMultiPut; + this.dataBlockEncoding = dataBlockEncoding; + conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024); + } + + @Parameters + public static Collection parameters() { + List parameters = new ArrayList(); + for (boolean multiPut : new boolean[]{false, true}) { + for (DataBlockEncoding dataBlockEncoding : new DataBlockEncoding[] { + DataBlockEncoding.NONE, DataBlockEncoding.PREFIX }) { + parameters.add(new Object[]{multiPut, dataBlockEncoding}); + } + } + return parameters; + } + + @Before + public void setUp() throws Exception { + LOG.debug("Test setup: isMultiPut=" + isMultiPut); + TEST_UTIL.startMiniCluster(1, NUM_RS); + } + + @After + public void tearDown() throws Exception { + LOG.debug("Test teardown: isMultiPut=" + isMultiPut); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test(timeout=TIMEOUT_MS) + public void loadTest() throws Exception { + prepareForLoadTest(); + runLoadTestOnExistingTable(); + } + + protected void runLoadTestOnExistingTable() throws IOException { + writerThreads.start(0, numKeys, NUM_THREADS); + writerThreads.waitForFinish(); + assertEquals(0, writerThreads.getNumWriteFailures()); + + readerThreads.start(0, numKeys, NUM_THREADS); + readerThreads.waitForFinish(); + assertEquals(0, readerThreads.getNumReadFailures()); + assertEquals(0, readerThreads.getNumReadErrors()); + assertEquals(numKeys, readerThreads.getNumKeysVerified()); + } + + protected void prepareForLoadTest() throws IOException { + LOG.info("Starting load test: dataBlockEncoding=" + dataBlockEncoding + + ", isMultiPut=" + isMultiPut); + numKeys = numKeys(); + HBaseAdmin admin = new HBaseAdmin(conf); + while (admin.getClusterStatus().getServers() < NUM_RS) { + LOG.info("Sleeping until " + NUM_RS + " RSs are online"); + Threads.sleepWithoutInterrupt(1000); + } + admin.close(); + + int numRegions = HBaseTestingUtility.createPreSplitLoadTestTable(conf, + TABLE, CF, compression, dataBlockEncoding); + + TEST_UTIL.waitUntilAllRegionsAssigned(numRegions); + + writerThreads = new MultiThreadedWriter(conf, TABLE, CF); + writerThreads.setMultiPut(isMultiPut); + readerThreads = new MultiThreadedReader(conf, TABLE, CF, 100); + } + + protected int numKeys() { + return 10000; + } + + protected HColumnDescriptor getColumnDesc(HBaseAdmin admin) + throws TableNotFoundException, IOException { + return admin.getTableDescriptor(TABLE).getFamily(CF); + } + +}