From commits-return-22933-archive-asf-public=cust-asf.ponee.io@accumulo.apache.org Tue Jun 4 19:29:07 2019 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [207.244.88.153]) by mx-eu-01.ponee.io (Postfix) with SMTP id B139018064D for ; Tue, 4 Jun 2019 21:29:06 +0200 (CEST) Received: (qmail 72143 invoked by uid 500); 4 Jun 2019 19:29:06 -0000 Mailing-List: contact commits-help@accumulo.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@accumulo.apache.org Delivered-To: mailing list commits@accumulo.apache.org Received: (qmail 72134 invoked by uid 99); 4 Jun 2019 19:29:06 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 04 Jun 2019 19:29:06 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id EAE758AA56; Tue, 4 Jun 2019 19:29:00 +0000 (UTC) Date: Tue, 04 Jun 2019 19:29:00 +0000 To: "commits@accumulo.apache.org" Subject: [accumulo] branch 2.0 updated: Fix #1084 partial - IT tests failing on Standalone (#1164) MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Message-ID: <155967654084.12960.15144145672522372410@gitbox.apache.org> From: mmiller@apache.org X-Git-Host: gitbox.apache.org X-Git-Repo: accumulo X-Git-Refname: refs/heads/2.0 X-Git-Reftype: branch X-Git-Oldrev: d7a407a3d6367b96fbc513f5b94b3c0511a68bf0 X-Git-Newrev: fcdbc0b029fcb5c6447df37ae969b0b91d7c8e56 X-Git-Rev: fcdbc0b029fcb5c6447df37ae969b0b91d7c8e56 X-Git-NotificationType: ref_changed_plus_diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated This is an automated email from the ASF dual-hosted git repository. mmiller pushed a commit to branch 2.0 in repository https://gitbox.apache.org/repos/asf/accumulo.git The following commit(s) were added to refs/heads/2.0 by this push: new fcdbc0b Fix #1084 partial - IT tests failing on Standalone (#1164) fcdbc0b is described below commit fcdbc0b029fcb5c6447df37ae969b0b91d7c8e56 Author: hkeebler <49656678+hkeebler@users.noreply.github.com> AuthorDate: Tue Jun 4 15:28:55 2019 -0400 Fix #1084 partial - IT tests failing on Standalone (#1164) * Fixed AccumuloClientIT, ManyWariteaheadLogsIT, ReadWriteIT, YieldScannersIT, BadDeleteMarkersIT * And others that failed due to null in the command prefix * Also includes IteratorEnvIT fix --- .../accumulo/harness/AccumuloClusterHarness.java | 11 +++-- .../org/apache/accumulo/test/IteratorEnvIT.java | 52 +++++++++++++--------- .../accumulo/test/functional/AccumuloClientIT.java | 48 ++++++++++++++------ .../test/functional/ManyWriteAheadLogsIT.java | 38 ++++++++++++++++ .../accumulo/test/functional/ReadWriteIT.java | 34 +++++--------- .../accumulo/test/functional/YieldingIterator.java | 12 +++++ 6 files changed, 137 insertions(+), 58 deletions(-) diff --git a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java index 72a5eda..95984a6 100644 --- a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java +++ b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java @@ -142,14 +142,19 @@ public abstract class AccumuloClusterHarness extends AccumuloITBase standaloneCluster.setAccumuloHome(conf.getAccumuloHome()); standaloneCluster.setClientAccumuloConfDir(conf.getClientAccumuloConfDir()); standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir()); - standaloneCluster.setServerCmdPrefix(conf.getServerCmdPrefix()); - standaloneCluster.setClientCmdPrefix(conf.getClientCmdPrefix()); + // If these were not provided then ensure they are not null + standaloneCluster + .setServerCmdPrefix(conf.getServerCmdPrefix() == null ? "" : conf.getServerCmdPrefix()); + standaloneCluster + .setClientCmdPrefix(conf.getClientCmdPrefix() == null ? "" : conf.getClientCmdPrefix()); cluster = standaloneCluster; // For SASL, we need to get the Hadoop configuration files as well otherwise UGI will log in // as SIMPLE instead of KERBEROS - Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration(); if (saslEnabled()) { + // Note that getting the Hadoop config creates a servercontext which wacks up the + // AccumuloClientIT test so if SASL is enabled then the testclose() will fail + Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration(); UserGroupInformation.setConfiguration(hadoopConfiguration); // Login as the admin user to start the tests UserGroupInformation.loginUserFromKeytab(conf.getAdminPrincipal(), diff --git a/test/src/main/java/org/apache/accumulo/test/IteratorEnvIT.java b/test/src/main/java/org/apache/accumulo/test/IteratorEnvIT.java index 935b32b..9399c3a 100644 --- a/test/src/main/java/org/apache/accumulo/test/IteratorEnvIT.java +++ b/test/src/main/java/org/apache/accumulo/test/IteratorEnvIT.java @@ -17,9 +17,6 @@ package org.apache.accumulo.test; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import java.io.IOException; import java.util.Collections; @@ -80,12 +77,14 @@ public class IteratorEnvIT extends AccumuloClusterHarness { // Checking for compaction on a scan should throw an error. try { - assertFalse(env.isUserCompaction()); - fail("Expected to throw IllegalStateException when checking compaction on a scan."); + env.isUserCompaction(); + throw new RuntimeException( + "Test failed - Expected to throw IllegalStateException when checking compaction on a scan."); } catch (IllegalStateException e) {} try { - assertFalse(env.isFullMajorCompaction()); - fail("Expected to throw IllegalStateException when checking compaction on a scan."); + env.isFullMajorCompaction(); + throw new RuntimeException( + "Test failed - Expected to throw IllegalStateException when checking compaction on a scan."); } catch (IllegalStateException e) {} } } @@ -101,8 +100,16 @@ public class IteratorEnvIT extends AccumuloClusterHarness { IteratorEnvironment env) throws IOException { super.init(source, options, env); testEnv(scope, options, env); - assertTrue(env.isUserCompaction()); - assertTrue(env.isFullMajorCompaction()); + try { + env.isUserCompaction(); + } catch (IllegalStateException e) { + throw new RuntimeException("Test failed"); + } + try { + env.isFullMajorCompaction(); + } catch (IllegalStateException e) { + throw new RuntimeException("Test failed"); + } } } @@ -118,12 +125,14 @@ public class IteratorEnvIT extends AccumuloClusterHarness { super.init(source, options, env); testEnv(scope, options, env); try { - assertTrue(env.isUserCompaction()); - fail("Expected to throw IllegalStateException when checking compaction on a scan."); + env.isUserCompaction(); + throw new RuntimeException( + "Test failed - Expected to throw IllegalStateException when checking compaction on a scan."); } catch (IllegalStateException e) {} try { - assertFalse(env.isFullMajorCompaction()); - fail("Expected to throw IllegalStateException when checking compaction on a scan."); + env.isFullMajorCompaction(); + throw new RuntimeException( + "Test failed - Expected to throw IllegalStateException when checking compaction on a scan."); } catch (IllegalStateException e) {} } } @@ -135,13 +144,16 @@ public class IteratorEnvIT extends AccumuloClusterHarness { private static void testEnv(IteratorScope scope, Map opts, IteratorEnvironment env) { TableId expectedTableId = TableId.of(opts.get("expected.table.id")); - assertEquals("Expected table property not found", "value1", - env.getConfig().get("table.custom.iterator.env.test")); - assertEquals("Expected table property not found", "value1", - env.getServiceEnv().getConfiguration(env.getTableId()).getTableCustom("iterator.env.test")); - assertEquals("Error getting iterator scope", scope, env.getIteratorScope()); - assertFalse("isSamplingEnabled returned true, expected false", env.isSamplingEnabled()); - assertEquals("Error getting Table ID", expectedTableId, env.getTableId()); + if (!"value1".equals(env.getConfig().get("table.custom.iterator.env.test")) && !"value1".equals( + env.getServiceEnv().getConfiguration(env.getTableId()).getTableCustom("iterator.env.test"))) + throw new RuntimeException("Test failed - Expected table property not found."); + if (!scope.equals(env.getIteratorScope())) + throw new RuntimeException("Test failed - Error getting iterator scope"); + if (env.isSamplingEnabled()) + throw new RuntimeException("Test failed - isSamplingEnabled returned true, expected false"); + if (!expectedTableId.equals(env.getTableId())) + throw new RuntimeException("Test failed - Error getting Table ID"); + } @Before diff --git a/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java b/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java index c6840ff..af93b37 100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java @@ -23,7 +23,9 @@ import static org.junit.Assert.fail; import java.util.Map.Entry; import java.util.Properties; +import java.util.Set; +import org.apache.accumulo.cluster.ClusterUser; import org.apache.accumulo.core.client.Accumulo; import org.apache.accumulo.core.client.AccumuloClient; import org.apache.accumulo.core.client.BatchWriter; @@ -40,12 +42,28 @@ import org.apache.accumulo.core.security.Authorizations; import org.apache.accumulo.core.singletons.SingletonManager; import org.apache.accumulo.core.singletons.SingletonManager.Mode; import org.apache.accumulo.harness.AccumuloClusterHarness; +import org.junit.After; import org.junit.Test; import com.google.common.collect.Iterables; public class AccumuloClientIT extends AccumuloClusterHarness { + @After + public void deleteUsers() throws Exception { + try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) { + Set users = client.securityOperations().listLocalUsers(); + ClusterUser user1 = getUser(0); + ClusterUser user2 = getUser(1); + if (users.contains(user1.getPrincipal())) { + client.securityOperations().dropLocalUser(user1.getPrincipal()); + } + if (users.contains(user2.getPrincipal())) { + client.securityOperations().dropLocalUser(user2.getPrincipal()); + } + } + } + private interface CloseCheck { void check() throws Exception; } @@ -78,11 +96,13 @@ public class AccumuloClientIT extends AccumuloClusterHarness { AccumuloClient c = Accumulo.newClient().from(getClientProps()).build(); String instanceName = getClientInfo().getInstanceName(); String zookeepers = getClientInfo().getZooKeepers(); - final String user = "testuser"; - final String password = "testpassword"; - c.securityOperations().createLocalUser(user, new PasswordToken(password)); - AccumuloClient client = Accumulo.newClient().to(instanceName, zookeepers).as(user, password) + ClusterUser testuser1 = getUser(0); + final String user1 = testuser1.getPrincipal(); + final String password1 = testuser1.getPassword(); + c.securityOperations().createLocalUser(user1, new PasswordToken(password1)); + + AccumuloClient client = Accumulo.newClient().to(instanceName, zookeepers).as(user1, password1) .zkTimeout(1234).build(); Properties props = client.properties(); @@ -90,37 +110,39 @@ public class AccumuloClientIT extends AccumuloClusterHarness { ClientInfo info = ClientInfo.from(client.properties()); assertEquals(instanceName, info.getInstanceName()); assertEquals(zookeepers, info.getZooKeepers()); - assertEquals(user, client.whoami()); + assertEquals(user1, client.whoami()); assertEquals(1234, info.getZooKeepersSessionTimeOut()); - props = Accumulo.newClientProperties().to(instanceName, zookeepers).as(user, password).build(); + props = + Accumulo.newClientProperties().to(instanceName, zookeepers).as(user1, password1).build(); assertTrue(props.containsKey(ClientProperty.AUTH_TOKEN.getKey())); - assertEquals(password, props.get(ClientProperty.AUTH_TOKEN.getKey())); + assertEquals(password1, props.get(ClientProperty.AUTH_TOKEN.getKey())); assertEquals("password", props.get(ClientProperty.AUTH_TYPE.getKey())); assertEquals(instanceName, props.getProperty(ClientProperty.INSTANCE_NAME.getKey())); info = ClientInfo.from(props); assertEquals(instanceName, info.getInstanceName()); assertEquals(zookeepers, info.getZooKeepers()); - assertEquals(user, info.getPrincipal()); + assertEquals(user1, info.getPrincipal()); assertTrue(info.getAuthenticationToken() instanceof PasswordToken); props = new Properties(); props.put(ClientProperty.INSTANCE_NAME.getKey(), instanceName); props.put(ClientProperty.INSTANCE_ZOOKEEPERS.getKey(), zookeepers); - props.put(ClientProperty.AUTH_PRINCIPAL.getKey(), user); + props.put(ClientProperty.AUTH_PRINCIPAL.getKey(), user1); props.put(ClientProperty.INSTANCE_ZOOKEEPERS_TIMEOUT.getKey(), "22s"); - ClientProperty.setPassword(props, password); + ClientProperty.setPassword(props, password1); client.close(); client = Accumulo.newClient().from(props).build(); info = ClientInfo.from(client.properties()); assertEquals(instanceName, info.getInstanceName()); assertEquals(zookeepers, info.getZooKeepers()); - assertEquals(user, client.whoami()); + assertEquals(user1, client.whoami()); assertEquals(22000, info.getZooKeepersSessionTimeOut()); - final String user2 = "testuser2"; - final String password2 = "testpassword2"; + ClusterUser testuser2 = getUser(1); + final String user2 = testuser2.getPrincipal(); + final String password2 = testuser2.getPassword(); c.securityOperations().createLocalUser(user2, new PasswordToken(password2)); AccumuloClient client2 = Accumulo.newClient().from(client.properties()) diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ManyWriteAheadLogsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ManyWriteAheadLogsIT.java index b64199c..4387de9 100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/ManyWriteAheadLogsIT.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/ManyWriteAheadLogsIT.java @@ -32,15 +32,19 @@ import java.util.TreeSet; import org.apache.accumulo.core.client.Accumulo; import org.apache.accumulo.core.client.AccumuloClient; import org.apache.accumulo.core.client.BatchWriter; +import org.apache.accumulo.core.client.admin.InstanceOperations; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.harness.AccumuloClusterHarness; +import org.apache.accumulo.minicluster.ServerType; import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl; import org.apache.accumulo.server.ServerContext; import org.apache.accumulo.server.log.WalStateManager.WalState; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.RawLocalFileSystem; import org.apache.hadoop.io.Text; +import org.junit.After; +import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,6 +53,8 @@ public class ManyWriteAheadLogsIT extends AccumuloClusterHarness { private static final Logger log = LoggerFactory.getLogger(ManyWriteAheadLogsIT.class); + private String majcDelay, walogSize; + @Override public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { // configure a smaller walog size so the walogs will roll frequently in the test @@ -71,6 +77,38 @@ public class ManyWriteAheadLogsIT extends AccumuloClusterHarness { return 10 * 60; } + @Before + public void alterConfig() throws Exception { + if (getClusterType() == ClusterType.MINI) { + return; + } + try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) { + InstanceOperations iops = client.instanceOperations(); + Map conf = iops.getSystemConfiguration(); + majcDelay = conf.get(Property.TSERV_MAJC_DELAY.getKey()); + walogSize = conf.get(Property.TSERV_WALOG_MAX_SIZE.getKey()); + + iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1"); + iops.setProperty(Property.TSERV_WALOG_MAX_SIZE.getKey(), "1M"); + + getClusterControl().stopAllServers(ServerType.TABLET_SERVER); + getClusterControl().startAllServers(ServerType.TABLET_SERVER); + } + } + + @After + public void resetConfig() throws Exception { + if (majcDelay != null) { + try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) { + InstanceOperations iops = client.instanceOperations(); + iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay); + iops.setProperty(Property.TSERV_WALOG_MAX_SIZE.getKey(), walogSize); + } + getClusterControl().stopAllServers(ServerType.TABLET_SERVER); + getClusterControl().startAllServers(ServerType.TABLET_SERVER); + } + } + /** * This creates a situation where many tablets reference many different write ahead logs. However * not single tablet references a lot of write ahead logs. Want to ensure the tablet server forces diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java index 1590424..3b9a2e1 100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; -import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.net.URL; @@ -147,27 +146,18 @@ public class ReadWriteIT extends AccumuloClusterHarness { } String scheme = "http://"; if (getCluster() instanceof StandaloneAccumuloCluster) { - StandaloneAccumuloCluster standaloneCluster = (StandaloneAccumuloCluster) getCluster(); - File accumuloProps = - new File(standaloneCluster.getServerAccumuloConfDir(), "accumulo.properties"); - if (accumuloProps.isFile()) { - Configuration conf = new Configuration(false); - conf.addResource(new Path(accumuloProps.toURI())); - String monitorSslKeystore = conf.get(Property.MONITOR_SSL_KEYSTORE.getKey()); - if (monitorSslKeystore != null) { - log.info("Using HTTPS since monitor ssl keystore configuration was observed in {}", - accumuloProps); - scheme = "https://"; - SSLContext ctx = SSLContext.getInstance("TLSv1.2"); - TrustManager[] tm = {new TestTrustManager()}; - ctx.init(new KeyManager[0], tm, new SecureRandom()); - SSLContext.setDefault(ctx); - HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory()); - HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier()); - } - } else { - log.info("{} is not a normal file, not checking for monitor running with SSL", - accumuloProps); + String monitorSslKeystore = + getCluster().getSiteConfiguration().get(Property.MONITOR_SSL_KEYSTORE.getKey()); + if (monitorSslKeystore != null && !monitorSslKeystore.isEmpty()) { + log.info( + "Using HTTPS since monitor ssl keystore configuration was observed in accumulo configuration"); + scheme = "https://"; + SSLContext ctx = SSLContext.getInstance("TLSv1.2"); + TrustManager[] tm = {new TestTrustManager()}; + ctx.init(new KeyManager[0], tm, new SecureRandom()); + SSLContext.setDefault(ctx); + HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory()); + HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier()); } } URL url = new URL(scheme + monitorLocation); diff --git a/test/src/main/java/org/apache/accumulo/test/functional/YieldingIterator.java b/test/src/main/java/org/apache/accumulo/test/functional/YieldingIterator.java index e94558a..fb593eb 100644 --- a/test/src/main/java/org/apache/accumulo/test/functional/YieldingIterator.java +++ b/test/src/main/java/org/apache/accumulo/test/functional/YieldingIterator.java @@ -118,6 +118,10 @@ public class YieldingIterator extends WrappingIterator { .yield(range.getStartKey().followingKey(PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME)); log.info("end YieldingIterator.next: yielded at " + range.getStartKey()); } + } else { + // must be a new scan so re-initialize the counters + log.info("reseting counters"); + resetCounters(); } // if not yielding, then simply pass on the call to the source @@ -132,4 +136,12 @@ public class YieldingIterator extends WrappingIterator { public void enableYielding(YieldCallback yield) { this.yield = Optional.of(yield); } + + protected void resetCounters() { + yieldNexts.set(0); + yieldSeeks.set(0); + rebuilds.set(0); + yieldNextKey.set(false); + yieldSeekKey.set(false); + } }