hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1518334 [2/2] - in /hbase/branches/0.95: hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ hbase-protocol/src/main/protobuf/ hbase-server/src/main/java/org/a...
Date Wed, 28 Aug 2013 19:31:14 GMT
Modified: hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java?rev=1518334&r1=1518333&r2=1518334&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
(original)
+++ hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
Wed Aug 28 19:31:13 2013
@@ -1,5 +1,4 @@
-/**
- *
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -22,27 +21,35 @@ import static org.junit.Assert.assertArr
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.util.List;
-
+import java.util.Random;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.After;
@@ -55,18 +62,14 @@ public class TestMasterReplication {
 
   private static final Log LOG = LogFactory.getLog(TestReplicationBase.class);
 
-  private Configuration conf1;
-  private Configuration conf2;
-  private Configuration conf3;
-
-  private HBaseTestingUtility utility1;
-  private HBaseTestingUtility utility2;
-  private HBaseTestingUtility utility3;
-  
-  private MiniZooKeeperCluster miniZK; 
+  private Configuration baseConfiguration;
+
+  private HBaseTestingUtility[] utilities;
+  private Configuration[] configurations;
+  private MiniZooKeeperCluster miniZK;
 
   private static final long SLEEP_TIME = 500;
-  private static final int NB_RETRIES = 100;
+  private static final int NB_RETRIES = 10;
 
   private static final byte[] tableName = Bytes.toBytes("test");
   private static final byte[] famName = Bytes.toBytes("f");
@@ -85,44 +88,21 @@ public class TestMasterReplication {
 
   @Before
   public void setUp() throws Exception {
-    conf1 = HBaseConfiguration.create();
-    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
+    baseConfiguration = HBaseConfiguration.create();
     // smaller block size and capacity to trigger more operations
     // and test them
-    conf1.setInt("hbase.regionserver.hlog.blocksize", 1024*20);
-    conf1.setInt("replication.source.size.capacity", 1024);
-    conf1.setLong("replication.source.sleepforretries", 100);
-    conf1.setInt("hbase.regionserver.maxlogs", 10);
-    conf1.setLong("hbase.master.logcleaner.ttl", 10);
-    conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
-    conf1.setBoolean("dfs.support.append", true);
-    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
-    conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
+    baseConfiguration.setInt("hbase.regionserver.hlog.blocksize", 1024 * 20);
+    baseConfiguration.setInt("replication.source.size.capacity", 1024);
+    baseConfiguration.setLong("replication.source.sleepforretries", 100);
+    baseConfiguration.setInt("hbase.regionserver.maxlogs", 10);
+    baseConfiguration.setLong("hbase.master.logcleaner.ttl", 10);
+    baseConfiguration.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
+    baseConfiguration.setBoolean("dfs.support.append", true);
+    baseConfiguration.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
+    baseConfiguration.setStrings(
+        CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
         CoprocessorCounter.class.getName());
 
-    utility1 = new HBaseTestingUtility(conf1);
-    utility1.startMiniZKCluster();
-    miniZK = utility1.getZkCluster();
-    // By setting the mini ZK cluster through this method, even though this is
-    // already utility1's mini ZK cluster, we are telling utility1 not to shut
-    // the mini ZK cluster when we shut down the HBase cluster.
-    utility1.setZkCluster(miniZK);
-    new ZooKeeperWatcher(conf1, "cluster1", null, true);
-
-    conf2 = new Configuration(conf1);
-    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
-
-    utility2 = new HBaseTestingUtility(conf2);
-    utility2.setZkCluster(miniZK);
-    new ZooKeeperWatcher(conf2, "cluster2", null, true);
-
-    conf3 = new Configuration(conf1);
-    conf3.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3");
-
-    utility3 = new HBaseTestingUtility(conf3);
-    utility3.setZkCluster(miniZK);
-    new ZooKeeperWatcher(conf3, "cluster3", null, true);
-
     table = new HTableDescriptor(TableName.valueOf(tableName));
     HColumnDescriptor fam = new HColumnDescriptor(famName);
     fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
@@ -131,209 +111,325 @@ public class TestMasterReplication {
     table.addFamily(fam);
   }
 
-  @After
-  public void tearDown() throws IOException {
-    miniZK.shutdown();
+  /**
+   * It tests the replication scenario involving 0 -> 1 -> 0. It does it by
+   * adding and deleting a row to a table in each cluster, checking if it's
+   * replicated. It also tests that the puts and deletes are not replicated back
+   * to the originating cluster.
+   */
+  @Test(timeout = 300000)
+  public void testCyclicReplication1() throws Exception {
+    LOG.info("testSimplePutDelete");
+    int numClusters = 2;
+    HTable[] htables = null;
+    try {
+      startMiniClusters(numClusters);
+      createTableOnClusters(table);
+
+      htables = getHTablesOnClusters(tableName);
+
+      // Test the replication scenarios of 0 -> 1 -> 0
+      addPeer("1", 0, 1);
+      addPeer("1", 1, 0);
+
+      int[] expectedCounts = new int[] { 2, 2 };
+
+      // add rows to both clusters,
+      // make sure they are both replication
+      putAndWait(row, famName, htables[0], htables[1]);
+      putAndWait(row1, famName, htables[1], htables[0]);
+      validateCounts(htables, put, expectedCounts);
+
+      deleteAndWait(row, htables[0], htables[1]);
+      deleteAndWait(row1, htables[1], htables[0]);
+      validateCounts(htables, delete, expectedCounts);
+    } finally {
+      close(htables);
+      shutDownMiniClusters();
+    }
   }
 
-  @Test(timeout=300000)
-  public void testCyclicReplication() throws Exception {
-    LOG.info("testCyclicReplication");
-    utility1.startMiniCluster();
-    utility2.startMiniCluster();
-    utility3.startMiniCluster();
-    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
-    ReplicationAdmin admin2 = new ReplicationAdmin(conf2);
-    ReplicationAdmin admin3 = new ReplicationAdmin(conf3);
-
-    new HBaseAdmin(conf1).createTable(table);
-    new HBaseAdmin(conf2).createTable(table);
-    new HBaseAdmin(conf3).createTable(table);
-    HTable htable1 = new HTable(conf1, tableName);
-    htable1.setWriteBufferSize(1024);
-    HTable htable2 = new HTable(conf2, tableName);
-    htable2.setWriteBufferSize(1024);
-    HTable htable3 = new HTable(conf3, tableName);
-    htable3.setWriteBufferSize(1024);
-    
-    admin1.addPeer("1", utility2.getClusterKey());
-    admin2.addPeer("1", utility3.getClusterKey());
-    admin3.addPeer("1", utility1.getClusterKey());
-
-    // put "row" and wait 'til it got around
-    putAndWait(row, famName, htable1, htable3);
-    // it should have passed through table2
-    check(row,famName,htable2);
-
-    putAndWait(row1, famName, htable2, htable1);
-    check(row,famName,htable3);
-    putAndWait(row2, famName, htable3, htable2);
-    check(row,famName,htable1);
-    
-    deleteAndWait(row,htable1,htable3);
-    deleteAndWait(row1,htable2,htable1);
-    deleteAndWait(row2,htable3,htable2);
-
-    assertEquals("Puts were replicated back ", 3, getCount(htable1, put));
-    assertEquals("Puts were replicated back ", 3, getCount(htable2, put));
-    assertEquals("Puts were replicated back ", 3, getCount(htable3, put));
-    assertEquals("Deletes were replicated back ", 3, getCount(htable1, delete));
-    assertEquals("Deletes were replicated back ", 3, getCount(htable2, delete));
-    assertEquals("Deletes were replicated back ", 3, getCount(htable3, delete));
-
-    // Test HBASE-9158
-    admin2.disablePeer("1");
-    // we now have an edit that was replicated into cluster originating from cluster 1
-    putAndWait(row3, famName, htable1, htable2);
-    // now add a local edit to cluster 2
-    Put put = new Put(row4);
-    put.add(famName, row4, row4);
-    htable2.put(put);
-    // reenable replication from cluster 2 to cluster 3
-    admin2.enablePeer("1");
-    // without HBASE-9158 the edit for row4 would have been marked with cluster 1's id
-    // and hence not replicated to cluster 1
-    wait(row4, htable1);
-    
-    utility3.shutdownMiniCluster();
-    utility2.shutdownMiniCluster();
-    utility1.shutdownMiniCluster();
+  /**
+   * Tests the cyclic replication scenario of 0 -> 1 -> 2 -> 0 by adding and
+   * deleting rows to a table in each clusters and ensuring that the each of
+   * these clusters get the appropriate mutations. It also tests the grouping
+   * scenario where a cluster needs to replicate the edits originating from
+   * itself and also the edits that it received using replication from a
+   * different cluster. The scenario is explained in HBASE-9158
+   */
+  @Test(timeout = 300000)
+  public void testCyclicReplication2() throws Exception {
+    LOG.info("testCyclicReplication1");
+    int numClusters = 3;
+    HTable[] htables = null;
+    try {
+      startMiniClusters(numClusters);
+      createTableOnClusters(table);
+
+      // Test the replication scenario of 0 -> 1 -> 2 -> 0
+      addPeer("1", 0, 1);
+      addPeer("1", 1, 2);
+      addPeer("1", 2, 0);
+
+      htables = getHTablesOnClusters(tableName);
+
+      // put "row" and wait 'til it got around
+      putAndWait(row, famName, htables[0], htables[2]);
+      putAndWait(row1, famName, htables[1], htables[0]);
+      putAndWait(row2, famName, htables[2], htables[1]);
+
+      deleteAndWait(row, htables[0], htables[2]);
+      deleteAndWait(row1, htables[1], htables[0]);
+      deleteAndWait(row2, htables[2], htables[1]);
+
+      int[] expectedCounts = new int[] { 3, 3, 3 };
+      validateCounts(htables, put, expectedCounts);
+      validateCounts(htables, delete, expectedCounts);
+
+      // Test HBASE-9158
+      disablePeer("1", 2);
+      // we now have an edit that was replicated into cluster originating from
+      // cluster 0
+      putAndWait(row3, famName, htables[0], htables[1]);
+      // now add a local edit to cluster 1
+      htables[1].put(new Put(row4).add(famName, row4, row4));
+      // re-enable replication from cluster 2 to cluster 0
+      enablePeer("1", 2);
+      // without HBASE-9158 the edit for row4 would have been marked with
+      // cluster 0's id
+      // and hence not replicated to cluster 0
+      wait(row4, htables[0], true);
+    } finally {
+      close(htables);
+      shutDownMiniClusters();
+    }
   }
 
   /**
-   * Add a row to a table in each cluster, check it's replicated,
-   * delete it, check's gone
-   * Also check the puts and deletes are not replicated back to
-   * the originating cluster.
+   * Tests cyclic replication scenario of 0 -> 1 -> 2 -> 1.
    */
-  @Test(timeout=300000)
-  public void testSimplePutDelete() throws Exception {
-    LOG.info("testSimplePutDelete");
-    utility1.startMiniCluster();
-    utility2.startMiniCluster();
+  @Test(timeout = 300000)
+  public void testCyclicReplication3() throws Exception {
+    LOG.info("testCyclicReplication2");
+    int numClusters = 3;
+    HTable[] htables = null;
+    try {
+      startMiniClusters(numClusters);
+      createTableOnClusters(table);
+
+      // Test the replication scenario of 0 -> 1 -> 2 -> 1
+      addPeer("1", 0, 1);
+      addPeer("1", 1, 2);
+      addPeer("1", 2, 1);
+
+      htables = getHTablesOnClusters(tableName);
+
+      // put "row" and wait 'til it got around
+      putAndWait(row, famName, htables[0], htables[2]);
+      putAndWait(row1, famName, htables[1], htables[2]);
+      putAndWait(row2, famName, htables[2], htables[1]);
+
+      deleteAndWait(row, htables[0], htables[2]);
+      deleteAndWait(row1, htables[1], htables[2]);
+      deleteAndWait(row2, htables[2], htables[1]);
+
+      int[] expectedCounts = new int[] { 1, 3, 3 };
+      validateCounts(htables, put, expectedCounts);
+      validateCounts(htables, delete, expectedCounts);
+    } finally {
+      close(htables);
+      shutDownMiniClusters();
+    }
+  }
 
-    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
-    ReplicationAdmin admin2 = new ReplicationAdmin(conf2);
+  @After
+  public void tearDown() throws IOException {
+    configurations = null;
+    utilities = null;
+  }
 
-    new HBaseAdmin(conf1).createTable(table);
-    new HBaseAdmin(conf2).createTable(table);
-    HTable htable1 = new HTable(conf1, tableName);
-    htable1.setWriteBufferSize(1024);
-    HTable htable2 = new HTable(conf2, tableName);
-    htable2.setWriteBufferSize(1024);
+  @SuppressWarnings("resource")
+  private void startMiniClusters(int numClusters) throws Exception {
+    Random random = new Random();
+    utilities = new HBaseTestingUtility[numClusters];
+    configurations = new Configuration[numClusters];
+    for (int i = 0; i < numClusters; i++) {
+      Configuration conf = new Configuration(baseConfiguration);
+      conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/" + i + random.nextInt());
+      HBaseTestingUtility utility = new HBaseTestingUtility(conf);
+      if (i == 0) {
+        utility.startMiniZKCluster();
+        miniZK = utility.getZkCluster();
+      } else {
+        utility.setZkCluster(miniZK);
+      }
+      utility.startMiniCluster();
+      utilities[i] = utility;
+      configurations[i] = conf;
+      new ZooKeeperWatcher(conf, "cluster" + i, null, true);
+    }
+  }
 
-    // set M-M
-    admin1.addPeer("1", utility2.getClusterKey());
-    admin2.addPeer("1", utility1.getClusterKey());
+  private void shutDownMiniClusters() throws Exception {
+    int numClusters = utilities.length;
+    for (int i = numClusters - 1; i >= 0; i--) {
+      if (utilities[i] != null) {
+        utilities[i].shutdownMiniCluster();
+      }
+    }
+    miniZK.shutdown();
+  }
 
-    // add rows to both clusters,
-    // make sure they are both replication
-    putAndWait(row, famName, htable1, htable2);
-    putAndWait(row1, famName, htable2, htable1);
+  private void createTableOnClusters(HTableDescriptor table) throws Exception {
+    int numClusters = configurations.length;
+    for (int i = 0; i < numClusters; i++) {
+      HBaseAdmin hbaseAdmin = null;
+      try {
+        hbaseAdmin = new HBaseAdmin(configurations[i]);
+        hbaseAdmin.createTable(table);
+      } finally {
+        close(hbaseAdmin);
+      }
+    }
+  }
 
-    // make sure "row" did not get replicated back.
-    assertEquals("Puts were replicated back ", 2, getCount(htable1, put));
+  private void addPeer(String id, int masterClusterNumber,
+      int slaveClusterNumber) throws Exception {
+    ReplicationAdmin replicationAdmin = null;
+    try {
+      replicationAdmin = new ReplicationAdmin(
+          configurations[masterClusterNumber]);
+      replicationAdmin.addPeer(id,
+          utilities[slaveClusterNumber].getClusterKey());
+    } finally {
+      close(replicationAdmin);
+    }
+  }
 
-    // delete "row" and wait
-    deleteAndWait(row, htable1, htable2);
+  private void disablePeer(String id, int masterClusterNumber) throws Exception {
+    ReplicationAdmin replicationAdmin = null;
+    try {
+      replicationAdmin = new ReplicationAdmin(
+          configurations[masterClusterNumber]);
+      replicationAdmin.disablePeer(id);
+    } finally {
+      close(replicationAdmin);
+    }
+  }
 
-    // make the 2nd cluster replicated back
-    assertEquals("Puts were replicated back ", 2, getCount(htable2, put));
+  private void enablePeer(String id, int masterClusterNumber) throws Exception {
+    ReplicationAdmin replicationAdmin = null;
+    try {
+      replicationAdmin = new ReplicationAdmin(
+          configurations[masterClusterNumber]);
+      replicationAdmin.enablePeer(id);
+    } finally {
+      close(replicationAdmin);
+    }
+  }
 
-    deleteAndWait(row1, htable2, htable1);
+  private void close(Closeable... closeables) {
+    try {
+      if (closeables != null) {
+        for (Closeable closeable : closeables) {
+          closeable.close();
+        }
+      }
+    } catch (Exception e) {
+      LOG.warn("Exception occured while closing the object:", e);
+    }
+  }
+
+  @SuppressWarnings("resource")
+  private HTable[] getHTablesOnClusters(byte[] tableName) throws Exception {
+    int numClusters = utilities.length;
+    HTable[] htables = new HTable[numClusters];
+    for (int i = 0; i < numClusters; i++) {
+      HTable htable = new HTable(configurations[i], tableName);
+      htable.setWriteBufferSize(1024);
+      htables[i] = htable;
+    }
+    return htables;
+  }
 
-    assertEquals("Deletes were replicated back ", 2, getCount(htable1, delete));
-    utility2.shutdownMiniCluster();
-    utility1.shutdownMiniCluster();
+  private void validateCounts(HTable[] htables, byte[] type,
+      int[] expectedCounts) throws IOException {
+    for (int i = 0; i < htables.length; i++) {
+      assertEquals(Bytes.toString(type) + " were replicated back ",
+          expectedCounts[i], getCount(htables[i], type));
+    }
   }
 
-  private int getCount(HTable t, byte[] type)  throws IOException {
+  private int getCount(HTable t, byte[] type) throws IOException {
     Get test = new Get(row);
-    test.setAttribute("count", new byte[]{});
+    test.setAttribute("count", new byte[] {});
     Result res = t.get(test);
     return Bytes.toInt(res.getValue(count, type));
   }
 
   private void deleteAndWait(byte[] row, HTable source, HTable target)
-  throws Exception {
+      throws Exception {
     Delete del = new Delete(row);
     source.delete(del);
-
-    Get get = new Get(row);
-    for (int i = 0; i < NB_RETRIES; i++) {
-      if (i==NB_RETRIES-1) {
-        fail("Waited too much time for del replication");
-      }
-      Result res = target.get(get);
-      if (res.size() >= 1) {
-        LOG.info("Row not deleted");
-        Thread.sleep(SLEEP_TIME);
-      } else {
-        break;
-      }
-    }
-  }
-
-  private void check(byte[] row, byte[] fam, HTable t) throws IOException {
-    Get get = new Get(row);
-    Result res = t.get(get);
-    if (res.size() == 0) {
-      fail("Row is missing");
-    }
+    wait(row, target, true);
   }
 
   private void putAndWait(byte[] row, byte[] fam, HTable source, HTable target)
-  throws Exception {
+      throws Exception {
     Put put = new Put(row);
     put.add(fam, row, row);
     source.put(put);
-
-    wait(row, target);
+    wait(row, target, false);
   }
 
-  private void wait(byte[] row, HTable target) throws Exception {
+  private void wait(byte[] row, HTable target, boolean isDeleted)
+      throws Exception {
     Get get = new Get(row);
     for (int i = 0; i < NB_RETRIES; i++) {
-      if (i==NB_RETRIES-1) {
-        fail("Waited too much time for put replication");
+      if (i == NB_RETRIES - 1) {
+        fail("Waited too much time for replication. Row:" + Bytes.toString(row)
+            + ". IsDeleteReplication:" + isDeleted);
       }
       Result res = target.get(get);
-      if (res.size() == 0) {
-        LOG.info("Row not available");
+      boolean sleep = isDeleted ? res.size() > 0 : res.size() == 0;
+      if (sleep) {
+        LOG.info("Waiting for more time for replication. Row:"
+            + Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted);
         Thread.sleep(SLEEP_TIME);
       } else {
-        assertArrayEquals(res.value(), row);
+        if (!isDeleted) {
+          assertArrayEquals(res.value(), row);
+        }
+        LOG.info("Obtained row:"
+            + Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted);
         break;
       }
-    }    
+    }
   }
 
   /**
-   * Use a coprocessor to count puts and deletes.
-   * as KVs would be replicated back with the same timestamp
-   * there is otherwise no way to count them.
+   * Use a coprocessor to count puts and deletes. as KVs would be replicated back with the
same
+   * timestamp there is otherwise no way to count them.
    */
   public static class CoprocessorCounter extends BaseRegionObserver {
     private int nCount = 0;
     private int nDelete = 0;
 
     @Override
-    public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e,
-        final Put put, final WALEdit edit, 
-        final Durability durability)
-        throws IOException {
+    public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final
Put put,
+        final WALEdit edit, final Durability durability) throws IOException {
       nCount++;
     }
+
     @Override
     public void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
-        final Delete delete, final WALEdit edit, 
-        final Durability durability)
-        throws IOException {
+        final Delete delete, final WALEdit edit, final Durability durability) throws IOException
{
       nDelete++;
     }
+
     @Override
-    public void preGet(final ObserverContext<RegionCoprocessorEnvironment> c,
-        final Get get, final List<KeyValue> result) throws IOException {
+    public void preGet(final ObserverContext<RegionCoprocessorEnvironment> c, final
Get get,
+        final List<KeyValue> result) throws IOException {
       if (get.getAttribute("count") != null) {
         result.clear();
         // order is important!
@@ -344,5 +440,4 @@ public class TestMasterReplication {
     }
   }
 
-}
-
+}
\ No newline at end of file



Mime
View raw message