kudu-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a...@apache.org
Subject [kudu] 01/02: [java] use KuduTestHarness in TestConnectionCache
Date Sat, 14 Mar 2020 09:23:41 GMT
This is an automated email from the ASF dual-hosted git repository.

adar pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git

commit af7595bbff5a1578fc1978a8ec00f9946458c43e
Author: Alexey Serbin <alexey@apache.org>
AuthorDate: Fri Mar 13 14:11:53 2020 -0700

    [java] use KuduTestHarness in TestConnectionCache
    
    Change-Id: I8ba343de81edafd6eb563fdd506271450487b432
    Reviewed-on: http://gerrit.cloudera.org:8080/15431
    Tested-by: Kudu Jenkins
    Reviewed-by: Adar Dembo <adar@cloudera.com>
---
 .../apache/kudu/client/TestConnectionCache.java    | 113 ++++++++++-----------
 1 file changed, 54 insertions(+), 59 deletions(-)

diff --git a/java/kudu-client/src/test/java/org/apache/kudu/client/TestConnectionCache.java
b/java/kudu-client/src/test/java/org/apache/kudu/client/TestConnectionCache.java
index 19ebfc3..3486cc3 100644
--- a/java/kudu-client/src/test/java/org/apache/kudu/client/TestConnectionCache.java
+++ b/java/kudu-client/src/test/java/org/apache/kudu/client/TestConnectionCache.java
@@ -27,75 +27,70 @@ import com.stumbleupon.async.Deferred;
 import org.junit.Rule;
 import org.junit.Test;
 
-import org.apache.kudu.test.cluster.MiniKuduCluster;
-import org.apache.kudu.test.junit.RetryRule;
+import org.apache.kudu.test.KuduTestHarness;
 import org.apache.kudu.util.NetUtil;
 
 public class TestConnectionCache {
 
   @Rule
-  public RetryRule retryRule = new RetryRule();
+  public KuduTestHarness harness = new KuduTestHarness();
 
   @Test(timeout = 50000)
   @SuppressWarnings("FutureReturnValueIgnored")
   public void test() throws Exception {
-    try (MiniKuduCluster cluster = new MiniKuduCluster.MiniKuduClusterBuilder()
-                                                      .numMasterServers(3)
-                                                      .build();
-         AsyncKuduClient client = new AsyncKuduClient.AsyncKuduClientBuilder(
-             cluster.getMasterAddressesAsString()).build()) {
-      // Below we ping the masters directly using RpcProxy, so if they aren't ready to process
-      // RPCs we'll get an error. Here by listing the tables we make sure this won't happen
since
-      // it won't return until a master leader is found.
-      client.getTablesList().join();
-
-      HostAndPort masterHostPort = cluster.getMasterServers().get(0);
-      ServerInfo firstMaster = new ServerInfo("fake-uuid",
-                                              masterHostPort,
-                                              NetUtil.getInetAddress(masterHostPort.getHost()),
-                                              /*location=*/"");
-
-      // 3 masters in the cluster. Connections should have been cached since we forced
-      // a cluster connection above.
-      // No tservers have been connected to by the client since we haven't accessed
-      // any data.
-      assertEquals(3, client.getConnectionListCopy().size());
-      assertFalse(allConnectionsTerminated(client));
-
-      final RpcProxy proxy = client.newRpcProxy(firstMaster);
-
-      // Disconnect from the server.
-      proxy.getConnection().disconnect().awaitUninterruptibly();
-      waitForConnectionToTerminate(proxy.getConnection());
-      assertTrue(proxy.getConnection().isTerminated());
-
-      // Make sure not all the connections in the connection cache are disconnected yet.
Actually,
-      // only the connection to server '0' should be disconnected.
-      assertFalse(allConnectionsTerminated(client));
-
-      // For a new RpcProxy instance, a new connection to the same destination is established.
-      final RpcProxy newHelper = client.newRpcProxy(firstMaster);
-      final Connection newConnection = newHelper.getConnection();
-      assertNotNull(newConnection);
-      assertNotSame(proxy.getConnection(), newConnection);
-
-      // The client-->server connection should not be established at this point yet. Wait
a little
-      // before checking the state of the connection: this is to check for the status of
the
-      // underlying connection _after_ the negotiation is run, if a regression happens. The
-      // negotiation on the underlying connection should be run upon submitting the very
first
-      // RPC via the proxy object, not upon creating RpcProxy instance (see KUDU-1878).
-      Thread.sleep(500);
-      assertFalse(newConnection.isReady());
-      pingConnection(newHelper);
-      assertTrue(newConnection.isReady());
-
-      // Test disconnecting and make sure we cleaned up all the connections.
-      for (Connection c : client.getConnectionListCopy()) {
-        c.disconnect().awaitUninterruptibly();
-        waitForConnectionToTerminate(c);
-      }
-      assertTrue(allConnectionsTerminated(client));
+    AsyncKuduClient client = harness.getAsyncClient();
+
+    // Below we ping the masters directly using RpcProxy, so if they aren't ready to process
+    // RPCs we'll get an error. Here by listing the tables we make sure this won't happen
since
+    // it won't return until a master leader is found.
+    client.getTablesList().join();
+
+    HostAndPort masterHostPort = harness.getMasterServers().get(0);
+    ServerInfo firstMaster = new ServerInfo("fake-uuid",
+                                            masterHostPort,
+                                            NetUtil.getInetAddress(masterHostPort.getHost()),
+                                            /*location=*/"");
+
+    // 3 masters in the cluster. Connections should have been cached since we forced
+    // a cluster connection above.
+    // No tservers have been connected to by the client since we haven't accessed
+    // any data.
+    assertEquals(3, client.getConnectionListCopy().size());
+    assertFalse(allConnectionsTerminated(client));
+
+    final RpcProxy proxy = client.newRpcProxy(firstMaster);
+
+    // Disconnect from the server.
+    proxy.getConnection().disconnect().awaitUninterruptibly();
+    waitForConnectionToTerminate(proxy.getConnection());
+    assertTrue(proxy.getConnection().isTerminated());
+
+    // Make sure not all the connections in the connection cache are disconnected yet. Actually,
+    // only the connection to server '0' should be disconnected.
+    assertFalse(allConnectionsTerminated(client));
+
+    // For a new RpcProxy instance, a new connection to the same destination is established.
+    final RpcProxy newHelper = client.newRpcProxy(firstMaster);
+    final Connection newConnection = newHelper.getConnection();
+    assertNotNull(newConnection);
+    assertNotSame(proxy.getConnection(), newConnection);
+
+    // The client-->server connection should not be established at this point yet. Wait
a little
+    // before checking the state of the connection: this is to check for the status of the
+    // underlying connection _after_ the negotiation is run, if a regression happens. The
+    // negotiation on the underlying connection should be run upon submitting the very first
+    // RPC via the proxy object, not upon creating RpcProxy instance (see KUDU-1878).
+    Thread.sleep(500);
+    assertFalse(newConnection.isReady());
+    pingConnection(newHelper);
+    assertTrue(newConnection.isReady());
+
+    // Test disconnecting and make sure we cleaned up all the connections.
+    for (Connection c : client.getConnectionListCopy()) {
+      c.disconnect().awaitUninterruptibly();
+      waitForConnectionToTerminate(c);
     }
+    assertTrue(allConnectionsTerminated(client));
   }
 
   private boolean allConnectionsTerminated(AsyncKuduClient client) {


Mime
View raw message