accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From els...@apache.org
Subject accumulo git commit: ACCUMULO-3599 Get (un)secure ITs running against mini/standalone
Date Tue, 10 Mar 2015 22:17:12 GMT
Repository: accumulo
Updated Branches:
  refs/heads/master 13f24cbed -> 01558a4f1


ACCUMULO-3599 Get (un)secure ITs running against mini/standalone

A large series of changes to get all of the ITs running a variety of ways.
By default, ITs still run with unsecure and miniclusters. Kerberos can
be enabled with MINI. Additionally, all ITs should run against an unsecure
normal installation as well as a normal installation with Kerberos on.

* Push down Hadoop configs to PrintInfo
* Support users and keytabs (or passwords) in cluster configuration
* Use the filesystem from the cluster
* Reduce Hadoop UGI spam in the logs
* Ensure that the log reference seen is for the table we made, not the trace table.
* Get ProxyITs passing regardless of client configuration installed.
* Use a CountDownLatch to coordinate scanners running on ScanIdIT
* Move UserCompactionStrategyIT strategies into test jar to enable standalone tests.
* Apply extra quoting to args for standalone cluster exec.


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/01558a4f
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/01558a4f
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/01558a4f

Branch: refs/heads/master
Commit: 01558a4f1b00f20aad550fa89a8630187a4511cd
Parents: 13f24cb
Author: Josh Elser <elserj@apache.org>
Authored: Tue Mar 3 13:50:46 2015 -0800
Committer: Josh Elser <elserj@apache.org>
Committed: Tue Mar 10 17:24:24 2015 -0400

----------------------------------------------------------------------
 .../client/security/tokens/KerberosToken.java   |  3 +-
 .../accumulo/core/file/rfile/PrintInfo.java     | 15 +++-
 .../simple/client/TraceDumpExample.java         | 28 ++++++-
 .../standalone/StandaloneAccumuloCluster.java   | 14 ++--
 .../standalone/StandaloneClusterControl.java    |  5 +-
 .../org/apache/accumulo/proxy/ProxyServer.java  | 18 ++++-
 .../accumulo/test/SizeCompactionStrategy.java   | 59 ++++++++++++++
 .../accumulo/test/TestCompactionStrategy.java   | 72 +++++++++++++++++
 .../accumulo/harness/AccumuloClusterIT.java     |  2 +-
 .../StandaloneAccumuloClusterConfiguration.java | 14 +++-
 .../accumulo/proxy/ProxyDurabilityIT.java       |  8 +-
 .../apache/accumulo/proxy/SimpleProxyBase.java  | 20 ++++-
 .../accumulo/test/BulkImportVolumeIT.java       |  1 +
 .../accumulo/test/ConditionalWriterIT.java      | 21 +++++
 .../org/apache/accumulo/test/MetaSplitIT.java   | 39 ++++++++-
 .../org/apache/accumulo/test/NamespacesIT.java  |  3 +
 .../accumulo/test/NoMutationRecoveryIT.java     | 49 ++++++++++-
 .../org/apache/accumulo/test/ShellServerIT.java |  1 -
 .../accumulo/test/UserCompactionStrategyIT.java | 85 --------------------
 .../accumulo/test/functional/CleanUpIT.java     |  4 +-
 .../accumulo/test/functional/CloneTestIT.java   |  4 +-
 .../accumulo/test/functional/CredentialsIT.java | 15 ++--
 .../accumulo/test/functional/ExamplesIT.java    | 45 ++++++++---
 .../accumulo/test/functional/ReadWriteIT.java   | 16 +++-
 .../accumulo/test/functional/ScanIdIT.java      | 36 +++++----
 test/src/test/resources/log4j.properties        |  1 +
 26 files changed, 431 insertions(+), 147 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/core/src/main/java/org/apache/accumulo/core/client/security/tokens/KerberosToken.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/KerberosToken.java b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/KerberosToken.java
index c122a46..5412bad 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/KerberosToken.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/KerberosToken.java
@@ -70,7 +70,8 @@ public class KerberosToken implements AuthenticationToken {
     Preconditions.checkNotNull(principal, "Principal was null");
     Preconditions.checkNotNull(keytab, "Keytab was null");
     Preconditions.checkArgument(keytab.exists() && keytab.isFile(), "Keytab was not a normal file");
-    UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab.getAbsolutePath());
+    UserGroupInformation.loginUserFromKeytab(principal, keytab.getAbsolutePath());
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     this.principal = ugi.getUserName();
     this.keytab = keytab;
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
index 3915b3f..8639f09 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
@@ -55,6 +55,8 @@ public class PrintInfo implements KeywordExecutable {
     boolean histogram = false;
     @Parameter(description = " <file> { <file> ... }")
     List<String> files = new ArrayList<String>();
+    @Parameter(names = {"-c", "--config"}, variableArity = true, description = "Comma-separated Hadoop configuration files")
+    List<String> configFiles = new ArrayList<>();
   }
 
   public static void main(String[] args) throws Exception {
@@ -68,10 +70,6 @@ public class PrintInfo implements KeywordExecutable {
 
   @Override
   public void execute(final String[] args) throws Exception {
-    Configuration conf = new Configuration();
-
-    FileSystem hadoopFs = FileSystem.get(conf);
-    FileSystem localFs = FileSystem.getLocal(conf);
     Opts opts = new Opts();
     opts.parseArgs(PrintInfo.class.getName(), args);
     if (opts.files.isEmpty()) {
@@ -79,6 +77,15 @@ public class PrintInfo implements KeywordExecutable {
       System.exit(-1);
     }
 
+    Configuration conf = new Configuration();
+    for (String confFile : opts.configFiles) {
+      log.debug("Adding Hadoop configuration file " + confFile);
+      conf.addResource(new Path(confFile));
+    }
+
+    FileSystem hadoopFs = FileSystem.get(conf);
+    FileSystem localFs = FileSystem.getLocal(conf);
+
     long countBuckets[] = new long[11];
     long sizeBuckets[] = new long[countBuckets.length];
     long totalSize = 0;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
index 46e6a67..5885094 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
@@ -20,12 +20,16 @@ import org.apache.accumulo.core.cli.ClientOnDefaultTable;
 import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.tracer.TraceDump;
 import org.apache.accumulo.tracer.TraceDump.Printer;
 import org.apache.hadoop.io.Text;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.beust.jcommander.Parameter;
 
@@ -34,6 +38,7 @@ import com.beust.jcommander.Parameter;
  *
  */
 public class TraceDumpExample {
+  private static final Logger log = LoggerFactory.getLogger(TraceDumpExample.class);
 
   static class Opts extends ClientOnDefaultTable {
     public Opts() {
@@ -50,7 +55,28 @@ public class TraceDumpExample {
       throw new IllegalArgumentException("--traceid option is required");
     }
 
-    Scanner scanner = opts.getConnector().createScanner(opts.getTableName(), opts.auths);
+    final Connector conn = opts.getConnector();
+    final String principal = opts.getPrincipal();
+    final String table = opts.getTableName();
+    if (!conn.securityOperations().hasTablePermission(principal, table, TablePermission.READ)) {
+      conn.securityOperations().grantTablePermission(principal, table, TablePermission.READ);
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+        throw new RuntimeException(e);
+      }
+      while (!conn.securityOperations().hasTablePermission(principal, table, TablePermission.READ)) {
+        log.info("{} didn't propagate read permission on {}", principal, table);
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+          Thread.currentThread().interrupt();
+          throw new RuntimeException(e);
+        }
+      }
+    }
+    Scanner scanner = conn.createScanner(table, opts.auths);
     scanner.setRange(new Range(new Text(opts.traceId)));
     TraceDump.printTrace(scanner, new Printer() {
       @Override

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java
index 4e0861e..69a11df 100644
--- a/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java
@@ -81,6 +81,12 @@ public class StandaloneAccumuloCluster implements AccumuloCluster {
   }
 
   public String getHadoopConfDir() {
+    if (null == hadoopConfDir) {
+      hadoopConfDir = System.getenv("HADOOP_CONF_DIR");
+    }
+    if (null == hadoopConfDir) {
+      throw new IllegalArgumentException("Cannot determine HADOOP_CONF_DIR for standalone cluster");
+    }
     return hadoopConfDir;
   }
 
@@ -139,13 +145,7 @@ public class StandaloneAccumuloCluster implements AccumuloCluster {
   }
 
   public Configuration getHadoopConfiguration() {
-    String confDir = hadoopConfDir;
-    if (null == confDir) {
-      confDir = System.getenv("HADOOP_CONF_DIR");
-    }
-    if (null == confDir) {
-      throw new IllegalArgumentException("Cannot determine HADOOP_CONF_DIR for standalone cluster");
-    }
+    String confDir = getHadoopConfDir();
     // Using CachedConfiguration will make repeatedly calling this method much faster
     final Configuration conf = CachedConfiguration.getInstance();
     conf.addResource(new Path(confDir, "core-site.xml"));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControl.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControl.java b/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControl.java
index 9b467bc..7cc80c0 100644
--- a/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControl.java
+++ b/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControl.java
@@ -97,7 +97,10 @@ public class StandaloneClusterControl implements ClusterControl {
     String[] cmd = new String[2 + args.length];
     cmd[0] = accumuloPath;
     cmd[1] = clz.getName();
-    System.arraycopy(args, 0, cmd, 2, args.length);
+    // Quote the arguments to prevent shell expansion
+    for (int i = 0, j = 2; i < args.length; i++, j++) {
+      cmd[j] = "'" + args[i] + "'";
+    }
     log.info("Running: '{}' on {}", StringUtils.join(cmd, " "), master);
     return exec(master, cmd);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
----------------------------------------------------------------------
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index 7f636d2..5b6a9da 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -106,6 +106,8 @@ import org.apache.accumulo.proxy.thrift.UnknownWriter;
 import org.apache.accumulo.proxy.thrift.WriterOptions;
 import org.apache.accumulo.server.rpc.ThriftServerType;
 import org.apache.accumulo.server.rpc.UGIAssumingProcessor;
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.commons.configuration.PropertiesConfiguration;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 import org.apache.thrift.TException;
@@ -185,9 +187,21 @@ public class ProxyServer implements AccumuloProxy.Iface {
     String useMock = props.getProperty("useMockInstance");
     if (useMock != null && Boolean.parseBoolean(useMock))
       instance = new MockInstance();
-    else
-      instance = new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(props.getProperty("instance"))
+    else {
+      ClientConfiguration clientConf;
+      if (props.containsKey("clientConfigurationFile")) {
+        String clientConfFile = props.getProperty("clientConfigurationFile");
+        try {
+          clientConf = new ClientConfiguration(new PropertiesConfiguration(clientConfFile));
+        } catch (ConfigurationException e) {
+          throw new RuntimeException(e);
+        }
+      } else {
+        clientConf = ClientConfiguration.loadDefault();
+      }
+      instance = new ZooKeeperInstance(clientConf.withInstance(props.getProperty("instance"))
           .withZkHosts(props.getProperty("zookeepers")));
+    }
 
     try {
       String tokenProp = props.getProperty("tokenClass", PasswordToken.class.getName());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/main/java/org/apache/accumulo/test/SizeCompactionStrategy.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/SizeCompactionStrategy.java b/test/src/main/java/org/apache/accumulo/test/SizeCompactionStrategy.java
new file mode 100644
index 0000000..b0c2b29
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/SizeCompactionStrategy.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.tserver.compaction.CompactionPlan;
+import org.apache.accumulo.tserver.compaction.CompactionStrategy;
+import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
+
+public class SizeCompactionStrategy extends CompactionStrategy {
+
+  private long size = 0;
+
+  @Override
+  public void init(Map<String,String> options) {
+    size = Long.parseLong(options.get("size"));
+  }
+
+  @Override
+  public boolean shouldCompact(MajorCompactionRequest request) throws IOException {
+
+    for (DataFileValue dfv : request.getFiles().values())
+      if (dfv.getSize() < size)
+        return true;
+
+    return false;
+  }
+
+  @Override
+  public CompactionPlan getCompactionPlan(MajorCompactionRequest request) throws IOException {
+    CompactionPlan plan = new CompactionPlan();
+
+    for (Entry<FileRef,DataFileValue> entry : request.getFiles().entrySet())
+      if (entry.getValue().getSize() < size)
+        plan.inputFiles.add(entry.getKey());
+
+    return plan;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/main/java/org/apache/accumulo/test/TestCompactionStrategy.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TestCompactionStrategy.java b/test/src/main/java/org/apache/accumulo/test/TestCompactionStrategy.java
new file mode 100644
index 0000000..df82040
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/TestCompactionStrategy.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.tserver.compaction.CompactionPlan;
+import org.apache.accumulo.tserver.compaction.CompactionStrategy;
+import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
+
+public class TestCompactionStrategy extends CompactionStrategy {
+
+  private String inputPrefix = "Z";
+  private String dropPrefix = "Z";
+  private boolean shouldCompact = false;
+
+  @Override
+  public void init(Map<String,String> options) {
+    if (options.containsKey("inputPrefix"))
+      inputPrefix = options.get("inputPrefix");
+    if (options.containsKey("dropPrefix"))
+      dropPrefix = options.get("dropPrefix");
+    if (options.containsKey("shouldCompact"))
+      shouldCompact = Boolean.parseBoolean(options.get("shouldCompact"));
+  }
+
+  @Override
+  public boolean shouldCompact(MajorCompactionRequest request) throws IOException {
+    if (shouldCompact)
+      return true;
+
+    for (FileRef fref : request.getFiles().keySet()) {
+      if (fref.path().getName().startsWith(inputPrefix))
+        return true;
+      if (fref.path().getName().startsWith(dropPrefix))
+        return true;
+    }
+
+    return false;
+  }
+
+  @Override
+  public CompactionPlan getCompactionPlan(MajorCompactionRequest request) throws IOException {
+    CompactionPlan plan = new CompactionPlan();
+
+    for (FileRef fref : request.getFiles().keySet()) {
+      if (fref.path().getName().startsWith(dropPrefix)) {
+        plan.deleteFiles.add(fref);
+      } else if (fref.path().getName().startsWith(inputPrefix)) {
+        plan.inputFiles.add(fref);
+      }
+    }
+
+    return plan;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/harness/AccumuloClusterIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/harness/AccumuloClusterIT.java b/test/src/test/java/org/apache/accumulo/harness/AccumuloClusterIT.java
index 05c17f1..930b275 100644
--- a/test/src/test/java/org/apache/accumulo/harness/AccumuloClusterIT.java
+++ b/test/src/test/java/org/apache/accumulo/harness/AccumuloClusterIT.java
@@ -305,7 +305,7 @@ public abstract class AccumuloClusterIT extends AccumuloIT implements MiniCluste
       return cluster.getConnector(princ, token);
     } catch (Exception e) {
       log.error("Could not connect to Accumulo", e);
-      fail("Could not connect to Accumulo");
+      fail("Could not connect to Accumulo: " + e.getMessage());
 
       throw new RuntimeException("Could not connect to Accumulo", e);
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/harness/conf/StandaloneAccumuloClusterConfiguration.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/harness/conf/StandaloneAccumuloClusterConfiguration.java b/test/src/test/java/org/apache/accumulo/harness/conf/StandaloneAccumuloClusterConfiguration.java
index 12e9619..964cdcc 100644
--- a/test/src/test/java/org/apache/accumulo/harness/conf/StandaloneAccumuloClusterConfiguration.java
+++ b/test/src/test/java/org/apache/accumulo/harness/conf/StandaloneAccumuloClusterConfiguration.java
@@ -16,6 +16,8 @@
  */
 package org.apache.accumulo.harness.conf;
 
+import static org.junit.Assert.assertTrue;
+
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -61,6 +63,8 @@ public class StandaloneAccumuloClusterConfiguration extends AccumuloClusterPrope
   public static final String ACCUMULO_STANDALONE_USER_KEY = ACCUMULO_STANDALONE_PREFIX + "users.";
   // Keytabs for the users
   public static final String ACCUMULO_STANDALONE_USER_KEYTABS_KEY = ACCUMULO_STANDALONE_PREFIX + "keytabs.";
+  // Passwords for the users
+  public static final String ACCUMULO_STANDALONE_USER_PASSWORDS_KEY = ACCUMULO_STANDALONE_PREFIX + "password.";
 
   public static final String ACCUMULO_STANDALONE_HOME = ACCUMULO_STANDALONE_PREFIX + "home";
   public static final String ACCUMULO_STANDALONE_CONF = ACCUMULO_STANDALONE_PREFIX + "conf";
@@ -101,7 +105,15 @@ public class StandaloneAccumuloClusterConfiguration extends AccumuloClusterPrope
         String suffix = key.substring(ACCUMULO_STANDALONE_USER_KEY.length());
         String keytab = conf.get(ACCUMULO_STANDALONE_USER_KEYTABS_KEY + suffix);
         if (null != keytab) {
-          clusterUsers.add(new ClusterUser(entry.getValue(), keytab));
+          File keytabFile = new File(keytab);
+          assertTrue("Keytab doesn't exist: " + keytabFile, keytabFile.exists() && keytabFile.isFile());
+          clusterUsers.add(new ClusterUser(entry.getValue(), keytabFile));
+        } else {
+          String password = conf.get(ACCUMULO_STANDALONE_USER_PASSWORDS_KEY + suffix);
+          if (null == password) {
+            throw new IllegalArgumentException("Missing password or keytab configuration for user with offset " + suffix);
+          }
+          clusterUsers.add(new ClusterUser(entry.getValue(), password));
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/proxy/ProxyDurabilityIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/proxy/ProxyDurabilityIT.java b/test/src/test/java/org/apache/accumulo/proxy/ProxyDurabilityIT.java
index 45799c4..404a8fd 100644
--- a/test/src/test/java/org/apache/accumulo/proxy/ProxyDurabilityIT.java
+++ b/test/src/test/java/org/apache/accumulo/proxy/ProxyDurabilityIT.java
@@ -19,7 +19,9 @@ package org.apache.accumulo.proxy;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.io.File;
 import java.nio.ByteBuffer;
+import java.nio.file.Files;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
@@ -60,7 +62,7 @@ public class ProxyDurabilityIT extends ConfigurableMacIT {
   @Override
   public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
     hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "10s");
     cfg.setNumTservers(1);
   }
 
@@ -72,9 +74,13 @@ public class ProxyDurabilityIT extends ConfigurableMacIT {
   public void testDurability() throws Exception {
     Connector c = getConnector();
     Properties props = new Properties();
+    // Avoid issues with locally installed client configuration files with custom properties
+    File emptyFile = Files.createTempFile(null, null).toFile();
+    emptyFile.deleteOnExit();
     props.put("instance", c.getInstance().getInstanceName());
     props.put("zookeepers", c.getInstance().getZooKeepers());
     props.put("tokenClass", PasswordToken.class.getName());
+    props.put("clientConfigurationFile", emptyFile.toString());
 
     TJSONProtocol.Factory protocol = new TJSONProtocol.Factory();
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/proxy/SimpleProxyBase.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/proxy/SimpleProxyBase.java b/test/src/test/java/org/apache/accumulo/proxy/SimpleProxyBase.java
index 72ed278..a0300ad 100644
--- a/test/src/test/java/org/apache/accumulo/proxy/SimpleProxyBase.java
+++ b/test/src/test/java/org/apache/accumulo/proxy/SimpleProxyBase.java
@@ -26,6 +26,7 @@ import java.io.BufferedReader;
 import java.io.File;
 import java.io.InputStreamReader;
 import java.nio.ByteBuffer;
+import java.nio.file.Files;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -41,6 +42,8 @@ import java.util.TreeMap;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
@@ -97,6 +100,7 @@ import org.apache.accumulo.proxy.thrift.UnknownWriter;
 import org.apache.accumulo.proxy.thrift.WriterOptions;
 import org.apache.accumulo.server.util.PortUtils;
 import org.apache.accumulo.test.functional.SlowIterator;
+import org.apache.commons.configuration.MapConfiguration;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -129,7 +133,7 @@ public abstract class SimpleProxyBase {
   private static org.apache.accumulo.proxy.thrift.AccumuloProxy.Client client;
   private static String principal = "root";
 
-  private static Map<String,String> properties = Collections.singletonMap("password", secret);
+  private static Map<String,String> properties = new HashMap<>();
   private static ByteBuffer creds = null;
 
   private static final AtomicInteger tableCounter = new AtomicInteger(0);
@@ -163,7 +167,12 @@ public abstract class SimpleProxyBase {
     accumulo = new MiniAccumuloCluster(config);
     accumulo.start();
     // wait for accumulo to be up and functional
-    ZooKeeperInstance zoo = new ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers());
+    Map<String,String> map = new HashMap<>();
+    map.put(ClientProperty.INSTANCE_NAME.getKey(), accumulo.getInstanceName());
+    map.put(ClientProperty.INSTANCE_ZK_HOST.getKey(), accumulo.getZooKeepers());
+    MapConfiguration mapCfg = new MapConfiguration(map);
+    ClientConfiguration clientConfig = new ClientConfiguration(mapCfg);
+    ZooKeeperInstance zoo = new ZooKeeperInstance(clientConfig);
     Connector c = zoo.getConnector("root", new PasswordToken(secret.getBytes()));
     for (@SuppressWarnings("unused")
     Entry<org.apache.accumulo.core.data.Key,Value> entry : c.createScanner(MetadataTable.NAME, Authorizations.EMPTY))
@@ -173,6 +182,13 @@ public abstract class SimpleProxyBase {
     props.put("instance", accumulo.getConfig().getInstanceName());
     props.put("zookeepers", accumulo.getZooKeepers());
     props.put("tokenClass", PasswordToken.class.getName());
+    // Avoid issues with locally installed client configuration files with custom properties
+    File emptyFile = Files.createTempFile(null, null).toFile();
+    emptyFile.deleteOnExit();
+    props.put("clientConfigurationFile", emptyFile.toString());
+
+    properties.put("password", secret);
+    properties.put("clientConfigurationFile", emptyFile.toString());
 
     proxyPort = PortUtils.getRandomFreePort();
     proxyServer = Proxy.createProxyServer(HostAndPort.fromParts("localhost", proxyPort), protocol, props).server;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/test/BulkImportVolumeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/BulkImportVolumeIT.java b/test/src/test/java/org/apache/accumulo/test/BulkImportVolumeIT.java
index 3272262..8966ce3 100644
--- a/test/src/test/java/org/apache/accumulo/test/BulkImportVolumeIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/BulkImportVolumeIT.java
@@ -86,6 +86,7 @@ public class BulkImportVolumeIT extends AccumuloClusterIT {
     fs.create(bogus).close();
     log.info("bogus: {}", bogus);
     assertTrue(fs.exists(bogus));
+    log.info("Importing {} into {} with failures directory {}", bulk, tableName, err);
     to.importDirectory(tableName, bulk.toString(), err.toString(), false);
     assertEquals(1, fs.listStatus(err).length);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/test/ConditionalWriterIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ConditionalWriterIT.java b/test/src/test/java/org/apache/accumulo/test/ConditionalWriterIT.java
index 8b9926c..7ca4d72 100644
--- a/test/src/test/java/org/apache/accumulo/test/ConditionalWriterIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/ConditionalWriterIT.java
@@ -28,6 +28,7 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Random;
+import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.UUID;
@@ -91,6 +92,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 import org.junit.Assert;
 import org.junit.Assume;
+import org.junit.Before;
 import org.junit.Test;
 
 import com.google.common.collect.Iterables;
@@ -113,6 +115,18 @@ public class ConditionalWriterIT extends AccumuloClusterIT {
     return l;
   }
 
+  @Before
+  public void deleteUsers() throws Exception {
+    Connector conn = getConnector();
+    Set<String> users = conn.securityOperations().listLocalUsers();
+    for (int i = 0; i < 5; i++) {
+      ClusterUser user = getUser(i);
+      if (users.contains(user.getPrincipal())) {
+        conn.securityOperations().dropLocalUser(user.getPrincipal());
+      }
+    }
+  }
+
   @Test
   public void testBasic() throws Exception {
 
@@ -1054,6 +1068,7 @@ public class ConditionalWriterIT extends AccumuloClusterIT {
     ClientConfiguration clientConf = cluster.getClientConfig();
     final boolean saslEnabled = clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false);
 
+    // Create a new user
     ClusterUser user1 = getUser(0);
     user = user1.getPrincipal();
     if (saslEnabled) {
@@ -1065,15 +1080,18 @@ public class ConditionalWriterIT extends AccumuloClusterIT {
     String[] tables = getUniqueNames(3);
     String table1 = tables[0], table2 = tables[1], table3 = tables[2];
 
+    // Create three tables
     conn.tableOperations().create(table1);
     conn.tableOperations().create(table2);
     conn.tableOperations().create(table3);
 
+    // Grant R on table1, W on table2, R/W on table3
     conn.securityOperations().grantTablePermission(user, table1, TablePermission.READ);
     conn.securityOperations().grantTablePermission(user, table2, TablePermission.WRITE);
     conn.securityOperations().grantTablePermission(user, table3, TablePermission.READ);
     conn.securityOperations().grantTablePermission(user, table3, TablePermission.WRITE);
 
+    // Login as the user
     Connector conn2 = conn.getInstance().getConnector(user, user1.getToken());
 
     ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
@@ -1084,8 +1102,10 @@ public class ConditionalWriterIT extends AccumuloClusterIT {
     ConditionalWriter cw2 = conn2.createConditionalWriter(table2, new ConditionalWriterConfig());
     ConditionalWriter cw3 = conn2.createConditionalWriter(table3, new ConditionalWriterConfig());
 
+    // Should be able to conditional-update a table we have R/W on
     Assert.assertEquals(Status.ACCEPTED, cw3.write(cm1).getStatus());
 
+    // Conditional-update to a table we only have read on should fail
     try {
       Status status = cw1.write(cm1).getStatus();
       Assert.fail("Expected exception writing conditional mutation to table the user doesn't have write access to, Got status: " + status);
@@ -1093,6 +1113,7 @@ public class ConditionalWriterIT extends AccumuloClusterIT {
 
     }
 
+    // Conditional-update to a table we only have writer on should fail
     try {
       Status status = cw2.write(cm1).getStatus();
       Assert.fail("Expected exception writing conditional mutation to table the user doesn't have read access to. Got status: " + status);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/test/MetaSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/MetaSplitIT.java b/test/src/test/java/org/apache/accumulo/test/MetaSplitIT.java
index 82fbd01..51b462e 100644
--- a/test/src/test/java/org/apache/accumulo/test/MetaSplitIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/MetaSplitIT.java
@@ -18,26 +18,62 @@ package org.apache.accumulo.test;
 
 import static org.junit.Assert.assertEquals;
 
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.admin.TableOperations;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.harness.AccumuloClusterIT;
 import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class MetaSplitIT extends AccumuloClusterIT {
+  private static final Logger log = LoggerFactory.getLogger(MetaSplitIT.class);
+
+  private Collection<Text> metadataSplits = null;
 
   @Override
   public int defaultTimeoutSeconds() {
     return 3 * 60;
   }
 
+  @Before
+  public void saveMetadataSplits() throws Exception {
+    if (ClusterType.STANDALONE == getClusterType()) {
+      Connector conn = getConnector();
+      Collection<Text> splits = conn.tableOperations().listSplits(MetadataTable.NAME);
+      // We expect a single split
+      if (!splits.equals(Arrays.asList(new Text("~")))) {
+        log.info("Existing splits on metadata table. Saving them, and applying single original split of '~'");
+        metadataSplits = splits;
+        conn.tableOperations().merge(MetadataTable.NAME, null, null);
+        conn.tableOperations().addSplits(MetadataTable.NAME, new TreeSet<Text>(Collections.singleton(new Text("~"))));
+      }
+    }
+  }
+
+  @After
+  public void restoreMetadataSplits() throws Exception {
+    if (null != metadataSplits) {
+      log.info("Restoring split on metadata table");
+      Connector conn = getConnector();
+      conn.tableOperations().merge(MetadataTable.NAME, null, null);
+      conn.tableOperations().addSplits(MetadataTable.NAME, new TreeSet<Text>(metadataSplits));
+    }
+  }
+
   @Test(expected = AccumuloException.class)
   public void testRootTableSplit() throws Exception {
     TableOperations opts = getConnector().tableOperations();
@@ -94,7 +130,8 @@ public class MetaSplitIT extends AccumuloClusterIT {
       }
       Thread.sleep(2000);
     }
-    assertEquals(numSplits, opts.listSplits(MetadataTable.NAME).size());
+    Collection<Text> splits = opts.listSplits(MetadataTable.NAME);
+    assertEquals("Actual metadata table splits: " + splits, numSplits, splits.size());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/test/NamespacesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/NamespacesIT.java b/test/src/test/java/org/apache/accumulo/test/NamespacesIT.java
index 3fc3b39..5808d70 100644
--- a/test/src/test/java/org/apache/accumulo/test/NamespacesIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/NamespacesIT.java
@@ -106,6 +106,9 @@ public class NamespacesIT extends AccumuloClusterIT {
 
   @After
   public void swingMj├Âlnir() throws Exception {
+    if (null == c) {
+      return;
+    }
     // clean up any added tables, namespaces, and users, after each test
     for (String t : c.tableOperations().list())
       if (!Tables.qualify(t).getFirst().equals(Namespaces.ACCUMULO_NAMESPACE))

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/test/NoMutationRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/NoMutationRecoveryIT.java b/test/src/test/java/org/apache/accumulo/test/NoMutationRecoveryIT.java
index b74793a..6a9975c 100644
--- a/test/src/test/java/org/apache/accumulo/test/NoMutationRecoveryIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/NoMutationRecoveryIT.java
@@ -44,10 +44,15 @@ import org.apache.accumulo.test.functional.FunctionalTestUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 // Verify that a recovery of a log without any mutations removes the log reference
 public class NoMutationRecoveryIT extends AccumuloClusterIT {
+  private static final Logger log = LoggerFactory.getLogger(NoMutationRecoveryIT.class);
 
   @Override
   public int defaultTimeoutSeconds() {
@@ -60,10 +65,27 @@ public class NoMutationRecoveryIT extends AccumuloClusterIT {
     hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
   }
 
+  @Before
+  public void takeTraceTableOffline() throws Exception {
+    Connector conn = getConnector();
+    if (conn.tableOperations().exists("trace")) {
+      conn.tableOperations().offline("trace", true);
+    }
+  }
+
+  @After
+  public void takeTraceTableOnline() throws Exception {
+    Connector conn = getConnector();
+    if (conn.tableOperations().exists("trace")) {
+      conn.tableOperations().online("trace", true);
+    }
+  }
+
   public boolean equals(Entry<Key,Value> a, Entry<Key,Value> b) {
     // comparison, without timestamp
     Key akey = a.getKey();
     Key bkey = b.getKey();
+    log.info("Comparing {} to {}", akey.toStringNoTruncate(), bkey.toStringNoTruncate());
     return akey.compareTo(bkey, PartialKey.ROW_COLFAM_COLQUAL_COLVIS) == 0 && a.getValue().equals(b.getValue());
   }
 
@@ -73,13 +95,33 @@ public class NoMutationRecoveryIT extends AccumuloClusterIT {
     final String table = getUniqueNames(1)[0];
     conn.tableOperations().create(table);
     String tableId = conn.tableOperations().tableIdMap().get(table);
+
+    log.info("Created {} with id {}", table, tableId);
+
+    // Add a record to the table
     update(conn, table, new Text("row"), new Text("cf"), new Text("cq"), new Value("value".getBytes()));
+
+    // Get the WAL reference used by the table we just added the update to
     Entry<Key,Value> logRef = getLogRef(conn, MetadataTable.NAME);
+
+    log.info("Log reference in metadata table {} {}", logRef.getKey().toStringNoTruncate(), logRef.getValue());
+
+    // Flush the record to disk
     conn.tableOperations().flush(table, null, null, true);
-    assertEquals("should not have any refs", 0, FunctionalTestUtils.count(getLogRefs(conn, MetadataTable.NAME, Range.prefix(tableId))));
+
+    Range range = Range.prefix(tableId);
+    log.info("Fetching WAL references over " + table);
+    assertEquals("should not have any refs", 0, FunctionalTestUtils.count(getLogRefs(conn, MetadataTable.NAME, range)));
+
+    // Grant permission to the admin user to write to the Metadata table
     conn.securityOperations().grantTablePermission(conn.whoami(), MetadataTable.NAME, TablePermission.WRITE);
+
+    // Add the wal record back to the metadata table
     update(conn, MetadataTable.NAME, logRef);
+
+    // Assert that we can get the bogus update back out again
     assertTrue(equals(logRef, getLogRef(conn, MetadataTable.NAME)));
+
     conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
     conn.tableOperations().flush(RootTable.NAME, null, null, true);
 
@@ -87,6 +129,7 @@ public class NoMutationRecoveryIT extends AccumuloClusterIT {
     control.stopAllServers(ServerType.TABLET_SERVER);
     control.startAllServers(ServerType.TABLET_SERVER);
 
+    // Verify that we can read the original record we wrote
     Scanner s = conn.createScanner(table, Authorizations.EMPTY);
     int count = 0;
     for (Entry<Key,Value> e : s) {
@@ -97,8 +140,10 @@ public class NoMutationRecoveryIT extends AccumuloClusterIT {
       count++;
     }
     assertEquals(1, count);
+
+    // Verify that the bogus log reference we wrote it gone
     for (Entry<Key,Value> ref : getLogRefs(conn, MetadataTable.NAME)) {
-      assertFalse(equals(ref, logRef));
+      assertFalse("Unexpected found reference to bogus log entry: " + ref.getKey().toStringNoTruncate() + " " + ref.getValue(), equals(ref, logRef));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java b/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
index 61f4219..f7ce461 100644
--- a/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
@@ -64,7 +64,6 @@ import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.harness.SharedMiniClusterIT;
 import org.apache.accumulo.shell.Shell;
-import org.apache.accumulo.test.UserCompactionStrategyIT.TestCompactionStrategy;
 import org.apache.accumulo.test.functional.FunctionalTestUtils;
 import org.apache.accumulo.test.functional.SlowIterator;
 import org.apache.accumulo.tracer.TraceServer;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/UserCompactionStrategyIT.java b/test/src/test/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
index 7a3162b..31f7804 100644
--- a/test/src/test/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
@@ -17,7 +17,6 @@
 
 package org.apache.accumulo.test;
 
-import java.io.IOException;
 import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Map;
@@ -40,15 +39,10 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
-import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.harness.AccumuloClusterIT;
-import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.test.functional.FunctionalTestUtils;
 import org.apache.accumulo.test.functional.SlowIterator;
-import org.apache.accumulo.tserver.compaction.CompactionPlan;
-import org.apache.accumulo.tserver.compaction.CompactionStrategy;
-import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
 import org.apache.hadoop.io.Text;
 import org.junit.Assert;
 import org.junit.Test;
@@ -58,85 +52,6 @@ import com.google.common.collect.ImmutableSet;
 
 public class UserCompactionStrategyIT extends AccumuloClusterIT {
 
-  public static class SizeCompactionStrategy extends CompactionStrategy {
-
-    private long size = 0;
-
-    @Override
-    public void init(Map<String,String> options) {
-      size = Long.parseLong(options.get("size"));
-    }
-
-    @Override
-    public boolean shouldCompact(MajorCompactionRequest request) throws IOException {
-
-      for (DataFileValue dfv : request.getFiles().values())
-        if (dfv.getSize() < size)
-          return true;
-
-      return false;
-    }
-
-    @Override
-    public CompactionPlan getCompactionPlan(MajorCompactionRequest request) throws IOException {
-      CompactionPlan plan = new CompactionPlan();
-
-      for (Entry<FileRef,DataFileValue> entry : request.getFiles().entrySet())
-        if (entry.getValue().getSize() < size)
-          plan.inputFiles.add(entry.getKey());
-
-      return plan;
-    }
-
-  }
-
-  public static class TestCompactionStrategy extends CompactionStrategy {
-
-    private String inputPrefix = "Z";
-    private String dropPrefix = "Z";
-    private boolean shouldCompact = false;
-
-    @Override
-    public void init(Map<String,String> options) {
-      if (options.containsKey("inputPrefix"))
-        inputPrefix = options.get("inputPrefix");
-      if (options.containsKey("dropPrefix"))
-        dropPrefix = options.get("dropPrefix");
-      if (options.containsKey("shouldCompact"))
-        shouldCompact = Boolean.parseBoolean(options.get("shouldCompact"));
-    }
-
-    @Override
-    public boolean shouldCompact(MajorCompactionRequest request) throws IOException {
-      if (shouldCompact)
-        return true;
-
-      for (FileRef fref : request.getFiles().keySet()) {
-        if (fref.path().getName().startsWith(inputPrefix))
-          return true;
-        if (fref.path().getName().startsWith(dropPrefix))
-          return true;
-      }
-
-      return false;
-    }
-
-    @Override
-    public CompactionPlan getCompactionPlan(MajorCompactionRequest request) throws IOException {
-      CompactionPlan plan = new CompactionPlan();
-
-      for (FileRef fref : request.getFiles().keySet()) {
-        if (fref.path().getName().startsWith(dropPrefix)) {
-          plan.deleteFiles.add(fref);
-        } else if (fref.path().getName().startsWith(inputPrefix)) {
-          plan.inputFiles.add(fref);
-        }
-      }
-
-      return plan;
-    }
-  }
-
   @Test
   public void testDropA() throws Exception {
     Connector c = getConnector();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/test/functional/CleanUpIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CleanUpIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CleanUpIT.java
index 0818e67..82b8b70 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/CleanUpIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/CleanUpIT.java
@@ -28,7 +28,7 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CleanUp;
-import org.apache.accumulo.harness.AccumuloClusterIT;
+import org.apache.accumulo.harness.SharedMiniClusterIT;
 import org.apache.log4j.Logger;
 import org.junit.Assert;
 import org.junit.Test;
@@ -39,7 +39,7 @@ import org.junit.Test;
  * Because this is destructive across the current context classloader, the normal teardown methods will fail (because they attempt to create a Connector). Until
  * the ZooKeeperInstance and Connector are self-contained WRT resource management, we can't leverage the AccumuloClusterIT.
  */
-public class CleanUpIT extends AccumuloClusterIT {
+public class CleanUpIT extends SharedMiniClusterIT {
   private static final Logger log = Logger.getLogger(CleanUpIT.class);
 
   @Override

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java
index 3708bf9..4fad30b 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java
@@ -146,7 +146,6 @@ public class CloneTestIT extends AccumuloClusterIT {
 
     Key k;
     Text cf = new Text(), cq = new Text();
-    Configuration conf = new Configuration();
     int itemsInspected = 0;
     for (Entry<Key,Value> entry : s) {
       itemsInspected++;
@@ -156,8 +155,7 @@ public class CloneTestIT extends AccumuloClusterIT {
 
       if (cf.equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) {
         Path p = new Path(cq.toString());
-        // Will this actually work against HDFS?
-        FileSystem fs = p.getFileSystem(conf);
+        FileSystem fs = cluster.getFileSystem();
         Assert.assertTrue("File does not exist: " + p, fs.exists(p));
       } else if (cf.equals(MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily())) {
         Assert.assertEquals("Saw unexpected cq", MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), cq);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
index cf0f297..ba2bae3 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.fail;
 
 import java.util.Iterator;
 import java.util.Map.Entry;
+import java.util.Set;
 
 import org.apache.accumulo.cluster.ClusterUser;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -65,12 +66,16 @@ public class CredentialsIT extends AccumuloClusterIT {
     ClusterUser user = getUser(0);
     username = user.getPrincipal();
     saslEnabled = clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false);
-    PasswordToken passwdToken = null;
-    if (!saslEnabled) {
-      password = user.getPassword();
-      passwdToken = new PasswordToken(password);
+    // Create the user if it doesn't exist
+    Set<String> users = conn.securityOperations().listLocalUsers();
+    if (!users.contains(username)) {
+      PasswordToken passwdToken = null;
+      if (!saslEnabled) {
+        password = user.getPassword();
+        passwdToken = new PasswordToken(password);
+      }
+      conn.securityOperations().createLocalUser(username, passwdToken);
     }
-    conn.securityOperations().createLocalUser(username, passwdToken);
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
index 7e562b4..5ec0bee 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
@@ -31,6 +31,7 @@ import java.util.Map.Entry;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
 import org.apache.accumulo.cluster.standalone.StandaloneClusterControl;
 import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.client.BatchScanner;
@@ -101,8 +102,7 @@ public class ExamplesIT extends AccumuloClusterIT {
   private static final Logger log = Logger.getLogger(ExamplesIT.class);
   private static final BatchWriterOpts bwOpts = new BatchWriterOpts();
   private static final BatchWriterConfig bwc = new BatchWriterConfig();
-  // quoted to make sure the shell doesn't interpret the pipe
-  private static final String visibility = "\"A|B\"";
+  private static final String visibility = "A|B";
   private static final String auths = "A,B";
 
   Connector c;
@@ -218,26 +218,47 @@ public class ExamplesIT extends AccumuloClusterIT {
   public void testDirList() throws Exception {
     String[] names = getUniqueNames(3);
     String dirTable = names[0], indexTable = names[1], dataTable = names[2];
-    Path scratch = new Path(getUsableDir(), getClass().getName());
-    cluster.getFileSystem().delete(scratch, true);
-    cluster.getFileSystem().mkdirs(scratch);
     String[] args;
+    String dirListDirectory;
+    switch (getClusterType()) {
+      case MINI:
+        dirListDirectory = ((MiniAccumuloClusterImpl) getCluster()).getConfig().getDir().getAbsolutePath();
+        break;
+      case STANDALONE:
+        dirListDirectory = ((StandaloneAccumuloCluster) getCluster()).getAccumuloHome();
+        break;
+      default:
+        throw new RuntimeException("Unknown cluster type");
+    }
+    // Index a directory listing on /tmp. If this is running against a standalone cluster, we can't guarantee Accumulo source will be there.
     if (saslEnabled) {
       args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable",
-          dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), scratch.toString()};
+          dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory};
     } else {
       args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable",
-          dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), scratch.toString()};
+          dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory};
     }
     Entry<Integer,String> entry = getClusterControl().execWithStdout(Ingest.class, args);
     assertEquals("Got non-zero return code. Stdout=" + entry.getValue(), 0, entry.getKey().intValue());
 
+    String expectedFile;
+    switch (getClusterType()) {
+      case MINI:
+        // Should be present in a minicluster dir
+        expectedFile = "accumulo-site.xml";
+        break;
+      case STANDALONE:
+        // Should be in place on standalone installs (not having ot follow symlinks)
+        expectedFile = "LICENSE";
+        break;
+      default:
+        throw new RuntimeException("Unknown cluster type");
+    }
     if (saslEnabled) {
       args = new String[] {"-i", instance, "-z", keepers, "--keytab", keytab, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path",
-          "accumulo-site.xml"};
+          expectedFile};
     } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-p", passwd, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path",
-          "accumulo-site.xml"};
+      args = new String[] {"-i", instance, "-z", keepers, "-p", passwd, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path", expectedFile};
     }
     entry = getClusterControl().execWithStdout(QueryUtil.class, args);
     if (ClusterType.MINI == getClusterType()) {
@@ -249,7 +270,7 @@ public class ExamplesIT extends AccumuloClusterIT {
 
     log.info("result " + entry.getValue());
     assertEquals(0, entry.getKey().intValue());
-    assertTrue(entry.getValue().contains("accumulo-site.xml"));
+    assertTrue(entry.getValue().contains(expectedFile));
   }
 
   @Test
@@ -507,7 +528,7 @@ public class ExamplesIT extends AccumuloClusterIT {
     String tableName = getUniqueNames(1)[0];
     String[] args;
     if (saslEnabled) {
-      args = new String[]{"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName};
+      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName};
     } else {
       args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName};
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
index 0b8046d..099743d 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
@@ -25,8 +25,10 @@ import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.net.URL;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
@@ -39,6 +41,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.accumulo.cluster.ClusterControl;
+import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.cli.ScannerOpts;
@@ -72,6 +75,7 @@ import org.apache.accumulo.minicluster.ServerType;
 import org.apache.accumulo.test.TestIngest;
 import org.apache.accumulo.test.TestMultiTableIngest;
 import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -370,7 +374,17 @@ public class ReadWriteIT extends AccumuloClusterIT {
       PrintStream oldOut = System.out;
       try {
         System.setOut(newOut);
-        PrintInfo.main(new String[] {entry.getKey().getColumnQualifier().toString()});
+        List<String> args = new ArrayList<>();
+        args.add(entry.getKey().getColumnQualifier().toString());
+        if (ClusterType.STANDALONE == getClusterType() && cluster.getClientConfig().getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+          args.add("--config");
+          StandaloneAccumuloCluster sac = (StandaloneAccumuloCluster) cluster;
+          String hadoopConfDir = sac.getHadoopConfDir();
+          args.add(new Path(hadoopConfDir, "core-site.xml").toString());
+          args.add(new Path(hadoopConfDir, "hdfs-site.xml").toString());
+        }
+        log.info("Invoking PrintInfo with " + args);
+        PrintInfo.main(args.toArray(new String[args.size()]));
         newOut.flush();
         String stdout = baos.toString();
         assertTrue(stdout.contains("Locality group         : g1"));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/java/org/apache/accumulo/test/functional/ScanIdIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanIdIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanIdIT.java
index de4a47e..7216c2d 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanIdIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ScanIdIT.java
@@ -29,6 +29,7 @@ import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
@@ -104,12 +105,18 @@ public class ScanIdIT extends AccumuloClusterIT {
 
     addSplits(conn, tableName);
 
+    log.info("Splits added");
+
     generateSampleData(conn, tableName);
 
+    log.info("Generated data for {}", tableName);
+
     attachSlowIterator(conn, tableName);
 
+    CountDownLatch latch = new CountDownLatch(NUM_SCANNERS);
+
     for (int scannerIndex = 0; scannerIndex < NUM_SCANNERS; scannerIndex++) {
-      ScannerThread st = new ScannerThread(conn, scannerIndex, tableName);
+      ScannerThread st = new ScannerThread(conn, scannerIndex, tableName, latch);
       pool.submit(st);
     }
 
@@ -150,7 +157,7 @@ public class ScanIdIT extends AccumuloClusterIT {
       }
     }
 
-    assertTrue(NUM_SCANNERS <= scanIds.size());
+    assertTrue("Expected at least " + NUM_SCANNERS + " scanIds, but saw " + scanIds.size(), NUM_SCANNERS <= scanIds.size());
 
   }
 
@@ -165,13 +172,13 @@ public class ScanIdIT extends AccumuloClusterIT {
     private Scanner scanner = null;
     private final int workerIndex;
     private final String tablename;
+    private final CountDownLatch latch;
 
-    public ScannerThread(final Connector connector, final int workerIndex, final String tablename) {
-
+    public ScannerThread(final Connector connector, final int workerIndex, final String tablename, final CountDownLatch latch) {
       this.connector = connector;
       this.workerIndex = workerIndex;
       this.tablename = tablename;
-
+      this.latch = latch;
     }
 
     /**
@@ -180,15 +187,16 @@ public class ScanIdIT extends AccumuloClusterIT {
     @Override
     public void run() {
 
-      /*
-       * set random initial delay of up to to allow scanners to proceed to different points.
-       */
-
-      long delay = random.nextInt(5000);
-
-      log.trace("Start delay for worker thread {} is {}", workerIndex, delay);
+      latch.countDown();
+      try {
+        latch.await();
+      } catch (InterruptedException e) {
+        log.error("Thread interrupted with id {}", workerIndex);
+        Thread.currentThread().interrupt();
+        return;
+      }
 
-      UtilWaitThread.sleep(delay);
+      log.debug("Creating scanner in worker thread {}", workerIndex);
 
       try {
 
@@ -219,7 +227,7 @@ public class ScanIdIT extends AccumuloClusterIT {
 
         Text row = entry.getKey().getRow();
 
-        log.trace("worker {}, row {}", workerIndex, row.toString());
+        log.debug("worker {}, row {}", workerIndex, row.toString());
 
         if (entry.getValue() != null) {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01558a4f/test/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/test/src/test/resources/log4j.properties b/test/src/test/resources/log4j.properties
index 40ad717..26ea762 100644
--- a/test/src/test/resources/log4j.properties
+++ b/test/src/test/resources/log4j.properties
@@ -52,3 +52,4 @@ log4j.logger.org.apache.thrift.transport.TSaslTransport=INFO
 # From apache-ds/minikdc
 log4j.logger.org.apache.mina=INFO
 log4j.logger.org.apache.accumulo.server.thrift.UGIAssumingProcessor=TRACE
+log4j.logger.org.apache.hadoop.security.UserGroupInformation=INFO


Mime
View raw message