hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [2/3] hbase git commit: HBASE-12471 Task 4. replace internal ConnectionManager#{delete, get}Connection use with #close, #createConnection (0.98, 0.99) under src/main/java
Date Mon, 24 Nov 2014 18:13:26 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/f3c5f117/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
index c545dda..f5e3c30 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
@@ -26,16 +26,22 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -49,6 +55,7 @@ import org.junit.experimental.categories.Category;
  */
 @Category(MediumTests.class)
 public class TestReplicationEndpoint extends TestReplicationBase {
+  static final Log LOG = LogFactory.getLog(TestReplicationEndpoint.class);
 
   static int numRegionServers;
 
@@ -72,13 +79,14 @@ public class TestReplicationEndpoint extends TestReplicationBase {
     ReplicationEndpointForTest.contructedCount.set(0);
     ReplicationEndpointForTest.startedCount.set(0);
     ReplicationEndpointForTest.replicateCount.set(0);
+    ReplicationEndpointReturningFalse.replicated.set(false);
     ReplicationEndpointForTest.lastEntries = null;
     for (RegionServerThread rs : utility1.getMiniHBaseCluster().getRegionServerThreads())
{
       utility1.getHBaseAdmin().rollHLogWriter(rs.getRegionServer().getServerName().toString());
     }
   }
 
-  @Test
+  @Test (timeout=120000)
   public void testCustomReplicationEndpoint() throws Exception {
     // test installing a custom replication endpoint other than the default one.
     admin.addPeer("testCustomReplicationEndpoint",
@@ -117,17 +125,32 @@ public class TestReplicationEndpoint extends TestReplicationBase {
     admin.removePeer("testCustomReplicationEndpoint");
   }
 
-  @Test
+  @Test (timeout=120000)
   public void testReplicationEndpointReturnsFalseOnReplicate() throws Exception {
-    admin.addPeer("testReplicationEndpointReturnsFalseOnReplicate",
+    Assert.assertEquals(0, ReplicationEndpointForTest.replicateCount.get());
+    Assert.assertTrue(!ReplicationEndpointReturningFalse.replicated.get());
+    int peerCount = admin.getPeersCount();
+    final String id = "testReplicationEndpointReturnsFalseOnReplicate";
+    admin.addPeer(id,
       new ReplicationPeerConfig().setClusterKey(ZKUtil.getZooKeeperClusterKey(conf1))
         .setReplicationEndpointImpl(ReplicationEndpointReturningFalse.class.getName()), null);
-    // now replicate some data.
+    // This test is flakey and then there is so much stuff flying around in here its, hard
to
+    // debug.  Peer needs to be up for the edit to make it across. This wait on
+    // peer count seems to be a hack that has us not progress till peer is up.
+    if (admin.getPeersCount() <= peerCount) {
+      LOG.info("Waiting on peercount to go up from " + peerCount);
+      Threads.sleep(100);
+    }
+    // now replicate some data
     doPut(row);
 
     Waiter.waitFor(conf1, 60000, new Waiter.Predicate<Exception>() {
       @Override
       public boolean evaluate() throws Exception {
+        // Looks like replication endpoint returns false unless we put more than 10 edits.
We
+        // only send over one edit.
+        int count = ReplicationEndpointForTest.replicateCount.get();
+        LOG.info("count=" + count);
         return ReplicationEndpointReturningFalse.replicated.get();
       }
     });
@@ -138,15 +161,17 @@ public class TestReplicationEndpoint extends TestReplicationBase {
     admin.removePeer("testReplicationEndpointReturnsFalseOnReplicate");
   }
 
-  @Test
+  @Test (timeout=120000)
   public void testWALEntryFilterFromReplicationEndpoint() throws Exception {
     admin.addPeer("testWALEntryFilterFromReplicationEndpoint",
       new ReplicationPeerConfig().setClusterKey(ZKUtil.getZooKeeperClusterKey(conf1))
         .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()),
null);
     // now replicate some data.
-    doPut(Bytes.toBytes("row1"));
-    doPut(row);
-    doPut(Bytes.toBytes("row2"));
+    try (Connection connection = ConnectionFactory.createConnection(conf1)) {
+      doPut(connection, Bytes.toBytes("row1"));
+      doPut(connection, row);
+      doPut(connection, Bytes.toBytes("row2"));
+    }
 
     Waiter.waitFor(conf1, 60000, new Waiter.Predicate<Exception>() {
       @Override
@@ -161,11 +186,17 @@ public class TestReplicationEndpoint extends TestReplicationBase {
 
 
   private void doPut(byte[] row) throws IOException {
-    Put put = new Put(row);
-    put.add(famName, row, row);
-    htable1 = new HTable(conf1, tableName);
-    htable1.put(put);
-    htable1.close();
+    try (Connection connection = ConnectionFactory.createConnection(conf1)) {
+      doPut(connection, row);
+    }
+  }
+
+  private void doPut(final Connection connection, final byte [] row) throws IOException {
+    try (Table t = connection.getTable(tableName)) {
+      Put put = new Put(row);
+      put.add(famName, row, row);
+      t.put(put);
+    }
   }
 
   private static void doAssert(byte[] row) throws Exception {
@@ -217,6 +248,7 @@ public class TestReplicationEndpoint extends TestReplicationBase {
   }
 
   public static class ReplicationEndpointReturningFalse extends ReplicationEndpointForTest
{
+    static int COUNT = 10;
     static AtomicReference<Exception> ex = new AtomicReference<Exception>(null);
     static AtomicBoolean replicated = new AtomicBoolean(false);
     @Override
@@ -229,8 +261,9 @@ public class TestReplicationEndpoint extends TestReplicationBase {
       }
 
       super.replicate(replicateContext);
+      LOG.info("Replicated " + row + ", count=" + replicateCount.get());
 
-      replicated.set(replicateCount.get() > 10); // first 10 times, we return false
+      replicated.set(replicateCount.get() > COUNT); // first 10 times, we return false
       return replicated.get();
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f3c5f117/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
index 7834a0e..47384fa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
@@ -24,6 +24,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.LargeTests;
 import org.apache.hadoop.hbase.UnknownScannerException;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -54,7 +55,7 @@ public class TestReplicationKillRS extends TestReplicationBase {
     Thread killer = killARegionServer(util, 5000, rsToKill1);
 
     LOG.info("Start loading table");
-    int initialCount = utility1.loadTable(htable1, famName);
+    int initialCount = utility1.loadTable((HTable)htable1, famName);
     LOG.info("Done loading table");
     killer.join(5000);
     LOG.info("Done waiting for threads");

http://git-wip-us.apache.org/repos/asf/hbase/blob/f3c5f117/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
index 5019c8e..ca5be65 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
@@ -68,7 +68,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
    */
   @Before
   public void setUp() throws Exception {
-    htable1.setAutoFlush(true, true);
+    ((HTable)htable1).setAutoFlush(true, true);
     // Starting and stopping replication can make us miss new logs,
     // rolling like this makes sure the most recent one gets added to the queue
     for ( JVMClusterUtil.RegionServerThread r :
@@ -246,7 +246,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
     LOG.info("testSmallBatch");
     Put put;
     // normal Batch tests
-    htable1.setAutoFlush(false, true);
+    ((HTable)htable1).setAutoFlush(false, true);
     for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
       put = new Put(Bytes.toBytes(i));
       put.add(famName, row, row);
@@ -386,7 +386,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
   public void testLoading() throws Exception {
     LOG.info("Writing out rows to table1 in testLoading");
     htable1.setWriteBufferSize(1024);
-    htable1.setAutoFlush(false, true);
+    ((HTable)htable1).setAutoFlush(false, true);
     for (int i = 0; i < NB_ROWS_IN_BIG_BATCH; i++) {
       Put put = new Put(Bytes.toBytes(i));
       put.add(famName, row, row);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f3c5f117/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
index f06cf0e..acf000b2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
@@ -85,7 +85,7 @@ public class TestAccessController2 extends SecureTestUtil {
       public Object run() throws Exception {
         HTableDescriptor desc = new HTableDescriptor(TEST_TABLE.getTableName());
         desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
-        Admin admin = new HBaseAdmin(conf);
+        Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
         try {
           admin.createTable(desc);
         } finally {


Mime
View raw message