hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r1037102 [3/3] - in /hbase/trunk: ./ src/main/java/org/apache/hadoop/hbase/coprocessor/ src/main/java/org/apache/hadoop/hbase/regionserver/ src/main/java/org/apache/hadoop/hbase/regionserver/handler/ src/main/resources/ src/test/java/org/ap...
Date Sat, 20 Nov 2010 01:23:40 GMT
Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java?rev=1037102&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java
(added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java
Sat Nov 20 01:23:39 2010
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.coprocessor;
+
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.client.coprocessor.*;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.*;
+import org.apache.hadoop.conf.Configuration;
+
+import static org.junit.Assert.*;
+
+import java.util.Map;
+import java.io.IOException;
+
+/**
+ * TestEndpoint: test cases to verify coprocessor Endpoint
+ */
+public class TestCoprocessorEndpoint {
+
+  private static final byte[] TEST_TABLE = Bytes.toBytes("TestTable");
+  private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily");
+  private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier");
+  private static byte [] ROW = Bytes.toBytes("testRow");
+
+  private static final int ROWSIZE = 20;
+  private static final int rowSeperator1 = 5;
+  private static final int rowSeperator2 = 12;
+  private static byte [][] ROWS = makeN(ROW, ROWSIZE);
+
+  private static HBaseTestingUtility util = new HBaseTestingUtility();
+  private static MiniHBaseCluster cluster = null;
+
+  @BeforeClass
+  public static void setupBeforeClass() throws Exception {
+    // set configure to indicate which cp should be loaded
+    Configuration conf = util.getConfiguration();
+    conf.set("hbase.coprocessor.default.classes",
+        "org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint");
+
+    util.startMiniCluster(2);
+    cluster = util.getMiniHBaseCluster();
+
+    HTable table = util.createTable(TEST_TABLE, TEST_FAMILY);
+    util.createMultiRegions(util.getConfiguration(), table, TEST_FAMILY,
+        new byte[][]{ HConstants.EMPTY_BYTE_ARRAY, ROWS[rowSeperator1],
+      ROWS[rowSeperator2]});
+
+    for(int i = 0; i < ROWSIZE; i++) {
+      Put put = new Put(ROWS[i]);
+      put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
+      table.put(put);
+    }
+
+    // sleep here is an ugly hack to allow region transitions to finish
+    Thread.sleep(5000);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    util.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testAggregation() throws Throwable {
+    HTable table = new HTable(util.getConfiguration(), TEST_TABLE);
+    Scan scan;
+    Map<byte[], Long> results;
+
+    // scan: for all regions
+    results = table.coprocessorExec(ColumnAggregationProtocol.class,
+        ROWS[rowSeperator1 - 1],
+        ROWS[rowSeperator2 + 1],
+        new Batch.Call<ColumnAggregationProtocol,Long>() {
+          public Long call(ColumnAggregationProtocol instance)
+          throws IOException{
+            return instance.sum(TEST_FAMILY, TEST_QUALIFIER);
+          }
+        });
+    int sumResult = 0;
+    int expectedResult = 0;
+    for (Map.Entry<byte[], Long> e : results.entrySet()) {
+      sumResult += e.getValue();
+    }
+    for(int i = 0;i < ROWSIZE; i++) {
+      expectedResult += i;
+    }
+    assertEquals("Invalid result", sumResult, expectedResult);
+
+    results.clear();
+
+    // scan: for region 2 and region 3
+    results = table.coprocessorExec(ColumnAggregationProtocol.class,
+        ROWS[rowSeperator1 + 1],
+        ROWS[rowSeperator2 + 1],
+        new Batch.Call<ColumnAggregationProtocol,Long>() {
+          public Long call(ColumnAggregationProtocol instance)
+          throws IOException{
+            return instance.sum(TEST_FAMILY, TEST_QUALIFIER);
+          }
+        });
+    sumResult = 0;
+    expectedResult = 0;
+    for (Map.Entry<byte[], Long> e : results.entrySet()) {
+      sumResult += e.getValue();
+    }
+    for(int i = rowSeperator1;i < ROWSIZE; i++) {
+      expectedResult += i;
+    }
+    assertEquals("Invalid result", sumResult, expectedResult);
+  }
+
+  private static byte [][] makeN(byte [] base, int n) {
+    byte [][] ret = new byte[n][];
+    for(int i=0;i<n;i++) {
+      ret[i] = Bytes.add(base, Bytes.toBytes(String.format("%02d", i)));
+    }
+    return ret;
+  }
+}

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java?rev=1037102&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
(added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
Sat Nov 20 01:23:39 2010
@@ -0,0 +1,271 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.coprocessor;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.coprocessor.Coprocessor;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.Coprocessor.Priority;
+import org.apache.hadoop.hbase.regionserver.CoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.SplitTransaction;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.PairOfSameType;
+import org.apache.hadoop.hbase.Server;
+import org.mockito.Mockito;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import static org.mockito.Mockito.when;
+
+public class TestCoprocessorInterface extends HBaseTestCase {
+  static final Log LOG = LogFactory.getLog(TestCoprocessorInterface.class);
+  static final String DIR = "test/build/data/TestCoprocessorInterface/";
+  private static final HBaseTestingUtility TEST_UTIL =
+    new HBaseTestingUtility();
+
+  public static class CoprocessorImpl implements Coprocessor {
+
+    private boolean preOpenCalled;
+    private boolean postOpenCalled;
+    private boolean preCloseCalled;
+    private boolean postCloseCalled;
+    private boolean preCompactCalled;
+    private boolean postCompactCalled;
+    private boolean preFlushCalled;
+    private boolean postFlushCalled;
+    private boolean preSplitCalled;
+    private boolean postSplitCalled;
+
+    @Override
+    public void preOpen(CoprocessorEnvironment e) {
+      preOpenCalled = true;
+    }
+    @Override
+    public void postOpen(CoprocessorEnvironment e) {
+      postOpenCalled = true;
+    }
+    @Override
+    public void preClose(CoprocessorEnvironment e, boolean abortRequested) {
+      preCloseCalled = true;
+    }
+    @Override
+    public void postClose(CoprocessorEnvironment e, boolean abortRequested) {
+      postCloseCalled = true;
+    }
+    @Override
+    public void preCompact(CoprocessorEnvironment e, boolean willSplit) {
+      preCompactCalled = true;
+    }
+    @Override
+    public void postCompact(CoprocessorEnvironment e, boolean willSplit) {
+      postCompactCalled = true;
+    }
+    @Override
+    public void preFlush(CoprocessorEnvironment e) {
+      preFlushCalled = true;
+    }
+    @Override
+    public void postFlush(CoprocessorEnvironment e) {
+      postFlushCalled = true;
+    }
+    @Override
+    public void preSplit(CoprocessorEnvironment e) {
+      preSplitCalled = true;
+    }
+    @Override
+    public void postSplit(CoprocessorEnvironment e, HRegion l, HRegion r) {
+      postSplitCalled = true;
+    }
+
+    boolean wasOpened() {
+      return (preOpenCalled && postOpenCalled);
+    }
+
+    boolean wasClosed() {
+      return (preCloseCalled && postCloseCalled);
+    }
+
+    boolean wasFlushed() {
+      return (preFlushCalled && postFlushCalled);
+    }
+
+    boolean wasCompacted() {
+      return (preCompactCalled && postCompactCalled);
+    }
+
+    boolean wasSplit() {
+      return (preSplitCalled && postSplitCalled);
+    }
+  }
+
+  public void testCoprocessorInterface() throws IOException {
+    byte [] tableName = Bytes.toBytes("testtable");
+    byte [][] families = { fam1, fam2, fam3 };
+
+    Configuration hc = initSplit();
+    HRegion region = initHRegion(tableName, getName(), hc,
+      CoprocessorImpl.class, families);
+
+    addContent(region, fam3);
+    region.flushcache();
+    byte [] splitRow = region.compactStores();
+    assertNotNull(splitRow);
+    HRegion [] regions = split(region, splitRow);
+    for (int i = 0; i < regions.length; i++) {
+      regions[i] = reopenRegion(regions[i], CoprocessorImpl.class);
+    }
+    region.close();
+    region.getLog().closeAndDelete();
+
+    Coprocessor c = region.getCoprocessorHost()
+      .findCoprocessor(CoprocessorImpl.class.getName());
+    assertTrue(((CoprocessorImpl)c).wasOpened());
+    assertTrue(((CoprocessorImpl)c).wasClosed());
+    assertTrue(((CoprocessorImpl)c).wasFlushed());
+    assertTrue(((CoprocessorImpl)c).wasCompacted());
+    assertTrue(((CoprocessorImpl)c).wasSplit());
+
+    for (int i = 0; i < regions.length; i++) {
+      regions[i].close();
+      regions[i].getLog().closeAndDelete();
+      c = region.getCoprocessorHost()
+            .findCoprocessor(CoprocessorImpl.class.getName());
+      assertTrue(((CoprocessorImpl)c).wasOpened());
+      assertTrue(((CoprocessorImpl)c).wasClosed());
+      assertTrue(((CoprocessorImpl)c).wasCompacted());
+    }
+  }
+
+  HRegion reopenRegion(final HRegion closedRegion, Class<?> implClass)
+      throws IOException {
+    HRegion r = new HRegion(closedRegion.getRegionDir(), closedRegion.getLog(),
+        closedRegion.getFilesystem(), closedRegion.getConf(),
+        closedRegion.getRegionInfo(), null);
+    r.initialize();
+
+    // this following piece is a hack. currently a coprocessorHost
+    // is secretly loaded at OpenRegionHandler. we don't really
+    // start a region server here, so just manually create cphost
+    // and set it to region.
+    CoprocessorHost host = new CoprocessorHost(r, null, conf);
+    r.setCoprocessorHost(host);
+
+    host.load(implClass, Priority.USER);
+    // we need to manually call pre- and postOpen here since the
+    // above load() is not the real case for CP loading. A CP is
+    // expected to be loaded by default from 1) configuration; or 2)
+    // HTableDescriptor. If it's loaded after HRegion initialized,
+    // the pre- and postOpen() won't be triggered automatically.
+    // Here we have to call pre and postOpen explicitly.
+    host.preOpen();
+    host.postOpen();
+    return r;
+  }
+
+  HRegion initHRegion (byte [] tableName, String callingMethod,
+      Configuration conf, Class<?> implClass, byte [] ... families)
+      throws IOException {
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    for(byte [] family : families) {
+      htd.addFamily(new HColumnDescriptor(family));
+    }
+    HRegionInfo info = new HRegionInfo(htd, null, null, false);
+    Path path = new Path(DIR + callingMethod);
+    HRegion r = HRegion.createHRegion(info, path, conf);
+
+    // this following piece is a hack.
+    CoprocessorHost host = new CoprocessorHost(r, null, conf);
+    r.setCoprocessorHost(host);
+
+    host.load(implClass, Priority.USER);
+
+    // Here we have to call pre and postOpen explicitly.
+    host.preOpen();
+    host.postOpen();
+    return r;
+  }
+
+  Configuration initSplit() {
+    // Always compact if there is more than one store file.
+    TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 2);
+    // Make lease timeout longer, lease checks less frequent
+    TEST_UTIL.getConfiguration().setInt(
+        "hbase.master.lease.thread.wakefrequency", 5 * 1000);
+    TEST_UTIL.getConfiguration().setInt(
+        "hbase.regionserver.lease.period", 10 * 1000);
+    // Increase the amount of time between client retries
+    TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 15 * 1000);
+    // This size should make it so we always split using the addContent
+    // below.  After adding all data, the first region is 1.3M
+    TEST_UTIL.getConfiguration().setLong("hbase.hregion.max.filesize",
+        1024 * 128);
+    TEST_UTIL.getConfiguration().setBoolean("hbase.testing.nocluster",
+        true);
+
+    return TEST_UTIL.getConfiguration();
+  }
+
+  private HRegion [] split(final HRegion r, final byte [] splitRow)
+      throws IOException {
+
+    HRegion[] regions = new HRegion[2];
+
+    SplitTransaction st = new SplitTransaction(r, splitRow);
+    int i = 0;
+
+    if (!st.prepare()) {
+      // test fails.
+      assertTrue(false);
+    }
+    try {
+      Server mockServer = Mockito.mock(Server.class);
+      when(mockServer.getConfiguration()).thenReturn(
+          TEST_UTIL.getConfiguration());
+      PairOfSameType<HRegion> daughters = st.execute(mockServer, null);
+      for (HRegion each_daughter: daughters) {
+        regions[i] = each_daughter;
+        i++;
+      }
+    }
+    catch (IOException ioe) {
+      LOG.info("Split transaction of " + r.getRegionNameAsString() +
+          " failed:" + ioe.getMessage());
+      assertTrue(false);
+    }
+    catch (RuntimeException e) {
+      LOG.info("Failed rollback of failed split of " +
+          r.getRegionNameAsString() + e.getMessage());
+    }
+
+    assertTrue(i == 2);
+    return regions;
+  }
+}
+
+

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java?rev=1037102&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
(added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
Sat Nov 20 01:23:39 2010
@@ -0,0 +1,195 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.coprocessor;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.coprocessor.Coprocessor.Priority;
+import org.apache.hadoop.hbase.regionserver.CoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.hadoop.conf.Configuration;
+
+import static org.junit.Assert.*;
+
+public class TestRegionObserverInterface {
+  static final Log LOG = LogFactory.getLog(TestRegionObserverInterface.class);
+  static final String DIR = "test/build/data/TestRegionObserver/";
+
+  public static final byte[] TEST_TABLE = Bytes.toBytes("TestTable");
+  public static final byte[] TEST_TABLE_2 = Bytes.toBytes("TestTable2");
+  public static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily");
+  public static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier");
+
+  public final static byte[] A = Bytes.toBytes("a");
+  public final static byte[] B = Bytes.toBytes("b");
+  public final static byte[] C = Bytes.toBytes("c");
+  public final static byte[] ROW = Bytes.toBytes("testrow");
+  public final static byte[] ROW1 = Bytes.toBytes("testrow1");
+  public final static byte[] ROW2 = Bytes.toBytes("testrow2");
+
+  private static final int ROWSIZE = 20;
+  private static byte [][] ROWS = makeN(ROW, ROWSIZE);
+
+  private static HBaseTestingUtility util = new HBaseTestingUtility();
+  private static MiniHBaseCluster cluster = null;
+
+  @BeforeClass
+  public static void setupBeforeClass() throws Exception {
+    // set configure to indicate which cp should be loaded
+    Configuration conf = util.getConfiguration();
+    conf.set("hbase.coprocessor.default.classes",
+        "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver");
+
+    util.startMiniCluster(2);
+    cluster = util.getMiniHBaseCluster();
+
+    HTable table = util.createTable(TEST_TABLE_2, TEST_FAMILY);
+
+    for(int i = 0; i < ROWSIZE; i++) {
+      Put put = new Put(ROWS[i]);
+      put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
+      table.put(put);
+    }
+
+    // sleep here is an ugly hack to allow region transitions to finish
+    Thread.sleep(5000);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    util.shutdownMiniCluster();
+  }
+
+  HRegion initHRegion (byte [] tableName, String callingMethod,
+      Configuration conf, Class<?> implClass, byte [] ... families)
+      throws IOException{
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    for(byte [] family : families) {
+      htd.addFamily(new HColumnDescriptor(family));
+    }
+    HRegionInfo info = new HRegionInfo(htd, null, null, false);
+    Path path = new Path(DIR + callingMethod);
+    // this following piece is a hack. currently a coprocessorHost
+    // is secretly loaded at OpenRegionHandler. we don't really
+    // start a region server here, so just manually create cphost
+    // and set it to region.
+    HRegion r = HRegion.createHRegion(info, path, conf);
+    CoprocessorHost host = new CoprocessorHost(r, null, conf);
+    r.setCoprocessorHost(host);
+    host.load(implClass, Priority.USER);
+    return r;
+  }
+
+  @Test
+  public void testRegionObserver() throws IOException {
+    byte[] TABLE = Bytes.toBytes(getClass().getName());
+    byte[][] FAMILIES = new byte[][] { A, B, C } ;
+
+    Put put = new Put(ROW);
+    put.add(A, A, A);
+    put.add(B, B, B);
+    put.add(C, C, C);
+
+    Get get = new Get(ROW);
+    get.addColumn(A, A);
+    get.addColumn(B, B);
+    get.addColumn(C, C);
+
+    Delete delete = new Delete(ROW);
+    delete.deleteColumn(A, A);
+    delete.deleteColumn(B, B);
+    delete.deleteColumn(C, C);
+
+    for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
+      for (HRegionInfo r : t.getRegionServer().getOnlineRegions()) {
+        if (!Arrays.equals(r.getTableDesc().getName(), TEST_TABLE)) {
+          continue;
+        }
+        CoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).
+          getCoprocessorHost();
+        Coprocessor c = cph.findCoprocessor(SimpleRegionObserver.class.getName());
+        assertNotNull(c);
+        assertTrue(((SimpleRegionObserver)c).hadPreGet());
+        assertTrue(((SimpleRegionObserver)c).hadPostGet());
+        assertTrue(((SimpleRegionObserver)c).hadPrePut());
+        assertTrue(((SimpleRegionObserver)c).hadPostPut());
+        assertTrue(((SimpleRegionObserver)c).hadDelete());
+      }
+    }
+  }
+
+  // TODO: add tests for other methods which need to be tested
+  // at region servers.
+
+  @Test
+  public void testIncrementHook() throws IOException {
+    HTable table = new HTable(util.getConfiguration(), TEST_TABLE_2);
+
+    Increment inc = new Increment(Bytes.toBytes(0));
+    inc.addColumn(TEST_FAMILY, TEST_QUALIFIER, 1);
+
+    table.increment(inc);
+
+    for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
+      for (HRegionInfo r : t.getRegionServer().getOnlineRegions()) {
+        if (!Arrays.equals(r.getTableDesc().getName(), TEST_TABLE_2)) {
+          continue;
+        }
+        CoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).
+          getCoprocessorHost();
+        Coprocessor c = cph.findCoprocessor(SimpleRegionObserver.class.getName());
+        assertTrue(((SimpleRegionObserver)c).hadPreIncrement());
+        assertTrue(((SimpleRegionObserver)c).hadPostIncrement());
+      }
+    }
+  }
+
+  private static byte [][] makeN(byte [] base, int n) {
+    byte [][] ret = new byte[n][];
+    for(int i=0;i<n;i++) {
+      ret[i] = Bytes.add(base, Bytes.toBytes(String.format("%02d", i)));
+    }
+    return ret;
+  }
+}
+

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java?rev=1037102&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java
(added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java
Sat Nov 20 01:23:39 2010
@@ -0,0 +1,134 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.coprocessor;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.coprocessor.Coprocessor.Priority;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.CoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+
+public class TestRegionObserverStacking extends TestCase {
+  static final String DIR = "test/build/data/TestRegionObserverStacking/";
+
+  public static class ObserverA extends BaseRegionObserverCoprocessor {
+    long id;
+    @Override
+    public void postPut(final CoprocessorEnvironment e,
+        Map<byte[], List<KeyValue>> familyMap) {
+      id = System.currentTimeMillis();
+      try {
+        Thread.sleep(10);
+      } catch (InterruptedException ex) {
+      }
+    }
+  }
+
+  public static class ObserverB extends BaseRegionObserverCoprocessor {
+    long id;
+    @Override
+    public void postPut(final CoprocessorEnvironment e,
+        Map<byte[], List<KeyValue>> familyMap) {
+      id = System.currentTimeMillis();
+      try {
+        Thread.sleep(10);
+      } catch (InterruptedException ex) {
+      }
+    }
+  }
+
+  public static class ObserverC extends BaseRegionObserverCoprocessor {
+    long id;
+
+    @Override
+    public void postPut(final CoprocessorEnvironment e,
+        Map<byte[], List<KeyValue>> familyMap) {
+      id = System.currentTimeMillis();
+      try {
+        Thread.sleep(10);
+      } catch (InterruptedException ex) {
+      }
+    }
+  }
+
+  HRegion initHRegion (byte [] tableName, String callingMethod,
+      Configuration conf, byte [] ... families) throws IOException {
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    for(byte [] family : families) {
+      htd.addFamily(new HColumnDescriptor(family));
+    }
+    HRegionInfo info = new HRegionInfo(htd, null, null, false);
+    Path path = new Path(DIR + callingMethod);
+    HRegion r = HRegion.createHRegion(info, path, conf);
+    // this following piece is a hack. currently a coprocessorHost
+    // is secretly loaded at OpenRegionHandler. we don't really
+    // start a region server here, so just manually create cphost
+    // and set it to region.
+    CoprocessorHost host = new CoprocessorHost(r, null, conf);
+    r.setCoprocessorHost(host);
+    return r;
+  }
+
+  public void testRegionObserverStacking() throws Exception {
+    byte[] ROW = Bytes.toBytes("testRow");
+    byte[] TABLE = Bytes.toBytes(getClass().getName());
+    byte[] A = Bytes.toBytes("A");
+    byte[][] FAMILIES = new byte[][] { A } ;
+
+    HRegion region = initHRegion(TABLE, getClass().getName(),
+      HBaseConfiguration.create(), FAMILIES);
+    CoprocessorHost h = region.getCoprocessorHost();
+    h.load(ObserverA.class, Priority.HIGHEST);
+    h.load(ObserverB.class, Priority.USER);
+    h.load(ObserverC.class, Priority.LOWEST);
+
+    Put put = new Put(ROW);
+    put.add(A, A, A);
+    int lockid = region.obtainRowLock(ROW);
+    region.put(put, lockid);
+    region.releaseRowLock(lockid);
+
+    Coprocessor c = h.findCoprocessor(ObserverA.class.getName());
+    long idA = ((ObserverA)c).id;
+    c = h.findCoprocessor(ObserverB.class.getName());
+    long idB = ((ObserverB)c).id;
+    c = h.findCoprocessor(ObserverC.class.getName());
+    long idC = ((ObserverC)c).id;
+
+    assertTrue(idA < idB);
+    assertTrue(idB < idC);
+  }
+}
+



Mime
View raw message