accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mwa...@apache.org
Subject [accumulo] branch master updated: Refactored AccumuloClient variable names (#664)
Date Wed, 26 Sep 2018 21:41:54 GMT
This is an automated email from the ASF dual-hosted git repository.

mwalch pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/master by this push:
     new 8da1ff5  Refactored AccumuloClient variable names (#664)
8da1ff5 is described below

commit 8da1ff5becfd7cf2b00c50234c68ec87ef54bf3b
Author: Mike Walch <mwalch@apache.org>
AuthorDate: Wed Sep 26 17:41:48 2018 -0400

    Refactored AccumuloClient variable names (#664)
    
    * Removed unused code
---
 .../core/client/mapred/AbstractInputFormat.java    |  17 +-
 .../core/client/mapred/AccumuloOutputFormat.java   |  10 +-
 .../core/client/mapreduce/AbstractInputFormat.java |  16 +-
 .../client/mapreduce/AccumuloOutputFormat.java     |  10 +-
 .../mapreduce/lib/impl/ConfiguratorBase.java       |   4 +-
 .../mapreduce/lib/impl/InputConfigurator.java      |  10 +-
 .../mapreduce/lib/impl/MapReduceClientOpts.java    |   8 +-
 .../core/client/ClientSideIteratorScanner.java     |   2 +-
 .../apache/accumulo/core/client/RowIterator.java   |   2 +-
 .../apache/accumulo/core/client/ScannerBase.java   |   2 +-
 .../core/client/admin/TableOperations.java         |   2 +-
 .../accumulo/core/client/impl/ClientContext.java   |  10 +-
 .../accumulo/core/client/impl/OfflineIterator.java |  10 +-
 .../client/impl/ReplicationOperationsImpl.java     |  14 +-
 .../core/metadata/schema/MetadataScanner.java      |  12 +-
 .../core/replication/ReplicationTable.java         |  20 +-
 .../java/org/apache/accumulo/core/util/Merge.java  |  38 +--
 .../core/metadata/MetadataServicerTest.java        |   8 +-
 .../org/apache/accumulo/core/util/MergeTest.java   |   4 +-
 .../apache/accumulo/cluster/AccumuloCluster.java   |   9 -
 .../standalone/StandaloneAccumuloCluster.java      |   8 -
 .../minicluster/impl/MiniAccumuloClusterImpl.java  |   8 -
 .../impl/MiniAccumuloClusterImplTest.java          |   6 +-
 .../org/apache/accumulo/server/ServerContext.java  |   6 +-
 .../replication/PrintReplicationRecords.java       |  10 +-
 .../server/replication/ReplicationUtil.java        |  14 +-
 .../accumulo/server/replication/WorkAssigner.java  |   2 +-
 .../org/apache/accumulo/server/util/Admin.java     |   6 +-
 .../accumulo/server/util/MetadataTableUtil.java    |  34 +-
 .../accumulo/server/util/ReplicationTableUtil.java |  10 +-
 .../accumulo/server/util/TableDiskUsage.java       |  20 +-
 .../server/util/VerifyTabletAssignments.java       |   6 +-
 .../server/util/ReplicationTableUtilTest.java      |  10 +-
 .../accumulo/gc/GarbageCollectWriteAheadLogs.java  |   8 +-
 .../apache/accumulo/gc/SimpleGarbageCollector.java |   4 +-
 .../replication/CloseWriteAheadLogReferences.java  |  20 +-
 .../gc/GarbageCollectWriteAheadLogsTest.java       |  36 +-
 .../master/MasterClientServiceHandler.java         |  16 +-
 .../apache/accumulo/master/TabletGroupWatcher.java |  40 +--
 .../DistributedWorkQueueWorkAssigner.java          |  18 +-
 .../master/replication/FinishedWorkUpdater.java    |  12 +-
 .../RemoveCompleteReplicationRecords.java          |  18 +-
 .../master/replication/ReplicationDriver.java      |  16 +-
 .../master/replication/SequentialWorkAssigner.java |   6 +-
 .../accumulo/master/replication/StatusMaker.java   |  14 +-
 .../master/replication/UnorderedWorkAssigner.java  |   6 +-
 .../accumulo/master/replication/WorkDriver.java    |  12 +-
 .../accumulo/master/replication/WorkMaker.java     |  12 +-
 .../apache/accumulo/master/state/MergeStats.java   |   6 +-
 .../apache/accumulo/master/tableOps/CleanUp.java   |   5 +-
 .../accumulo/master/tableOps/CompactionDriver.java |   6 +-
 .../accumulo/master/tableOps/WriteExportFiles.java |  10 +-
 .../tableOps/bulkVer1/CleanUpBulkImport.java       |   4 +-
 .../master/tableOps/bulkVer1/CopyFailed.java       |   4 +-
 .../tableOps/bulkVer2/CleanUpBulkImport.java       |   4 +-
 .../replication/SequentialWorkAssignerTest.java    |  14 +-
 .../replication/UnorderedWorkAssignerTest.java     |  12 +-
 .../rest/replication/ReplicationResource.java      |   8 +-
 .../monitor/rest/trace/TracesResource.java         |   7 +-
 .../java/org/apache/accumulo/tracer/TraceDump.java |   8 +-
 .../apache/accumulo/tracer/TraceTableStats.java    |   4 +-
 .../replication/ReplicationServicerHandler.java    |   2 +-
 .../BatchWriterReplicationReplayerTest.java        |  16 +-
 .../accumulo/shell/commands/SummariesCommand.java  |   4 +-
 .../shell/commands/DeleteAuthsCommandTest.java     |  42 +--
 .../shell/commands/DropUserCommandTest.java        |  14 +-
 .../shell/commands/SetIterCommandTest.java         |  18 +-
 .../accumulo/harness/AccumuloClusterHarness.java   |  32 +-
 .../accumulo/harness/SharedMiniClusterBase.java    |  14 +-
 .../accumulo/test/ArbitraryTablePropertiesIT.java  |  36 +-
 .../org/apache/accumulo/test/AuditMessageIT.java   |  63 ++--
 .../accumulo/test/BadDeleteMarkersCreatedIT.java   |   6 +-
 .../org/apache/accumulo/test/BalanceFasterIT.java  |   8 +-
 .../java/org/apache/accumulo/test/CleanWalIT.java  |  48 +--
 .../apache/accumulo/test/ClientSideIteratorIT.java |  22 +-
 .../java/org/apache/accumulo/test/CloneIT.java     | 106 +++---
 .../accumulo/test/CompactionRateLimitingIT.java    |  11 +-
 .../apache/accumulo/test/ConditionalWriterIT.java  | 229 ++++++-------
 .../test/ConfigurableMajorCompactionIT.java        |  32 +-
 .../org/apache/accumulo/test/ExistingMacIT.java    |  25 +-
 .../java/org/apache/accumulo/test/FindMaxIT.java   |  28 +-
 .../org/apache/accumulo/test/IMMLGBenchmark.java   |  38 +--
 .../org/apache/accumulo/test/ImportExportIT.java   |  28 +-
 .../accumulo/test/InterruptibleScannersIT.java     |  10 +-
 .../accumulo/test/IsolationAndDeepCopyIT.java      |  10 +-
 .../apache/accumulo/test/KeyValueEqualityIT.java   |  12 +-
 .../org/apache/accumulo/test/LargeSplitRowIT.java  |  67 ++--
 .../java/org/apache/accumulo/test/LocatorIT.java   |  24 +-
 .../java/org/apache/accumulo/test/MetaSplitIT.java |  14 +-
 .../test/MissingWalHeaderCompletesRecoveryIT.java  |  46 +--
 .../org/apache/accumulo/test/NamespacesIT.java     |   2 +-
 .../accumulo/test/NewTableConfigurationIT.java     | 175 +++++-----
 .../org/apache/accumulo/test/OrIteratorIT.java     |  54 +--
 .../apache/accumulo/test/QueryMetadataTable.java   |   8 +-
 .../java/org/apache/accumulo/test/SampleIT.java    | 101 +++---
 .../org/apache/accumulo/test/ShellConfigIT.java    |  10 +-
 .../org/apache/accumulo/test/ShellServerIT.java    |  12 +-
 .../accumulo/test/TableConfigurationUpdateIT.java  |   4 +-
 .../accumulo/test/TabletServerGivesUpIT.java       |  10 +-
 .../accumulo/test/TabletServerHdfsRestartIT.java   |  13 +-
 .../java/org/apache/accumulo/test/TestIngest.java  |   8 +-
 .../apache/accumulo/test/TestMultiTableIngest.java |   6 +-
 .../apache/accumulo/test/TestRandomDeletes.java    |   4 +-
 .../test/TracerRecoversAfterOfflineTableIT.java    |  16 +-
 .../java/org/apache/accumulo/test/UnusedWALIT.java |   4 +-
 .../accumulo/test/UserCompactionStrategyIT.java    |   6 +-
 .../java/org/apache/accumulo/test/UsersIT.java     |   8 +-
 .../java/org/apache/accumulo/test/VolumeIT.java    |  34 +-
 .../org/apache/accumulo/test/YieldScannersIT.java  |  16 +-
 .../accumulo/test/ZooKeeperPropertiesIT.java       |   4 +-
 .../accumulo/test/functional/AccumuloClientIT.java |  42 +--
 .../BalanceInPresenceOfOfflineTableIT.java         |   4 +-
 .../accumulo/test/functional/BulkLoadIT.java       |   3 +-
 .../test/functional/BulkSplitOptimizationIT.java   |  10 +-
 .../accumulo/test/functional/CloneTestIT.java      |  20 +-
 .../accumulo/test/functional/ConcurrencyIT.java    |   4 +-
 .../test/functional/ConfigurableCompactionIT.java  |  12 +-
 .../test/functional/CreateInitialSplitsIT.java     |  76 ++---
 .../accumulo/test/functional/CredentialsIT.java    |   6 +-
 .../test/functional/DeleteRowsSplitIT.java         |  18 +-
 .../test/functional/GarbageCollectorIT.java        |  10 +-
 .../accumulo/test/functional/KerberosIT.java       | 160 ++++-----
 .../accumulo/test/functional/KerberosProxyIT.java  |  24 +-
 .../test/functional/KerberosRenewalIT.java         |  20 +-
 .../accumulo/test/functional/LargeRowIT.java       |   4 +-
 .../accumulo/test/functional/LogicalTimeIT.java    |  13 +-
 .../apache/accumulo/test/functional/MergeIT.java   |  15 +-
 .../accumulo/test/functional/PermissionsIT.java    | 228 ++++++-------
 .../test/functional/RegexGroupBalanceIT.java       |  31 +-
 .../apache/accumulo/test/functional/ScanIdIT.java  |  34 +-
 .../functional/TabletStateChangeIteratorIT.java    |  10 +-
 .../apache/accumulo/test/functional/TimeoutIT.java |  16 +-
 .../accumulo/test/functional/WALSunnyDayIT.java    |   4 +-
 .../CloseWriteAheadLogReferencesIT.java            |  32 +-
 .../test/mapred/AccumuloRowInputFormatIT.java      |   6 +-
 .../test/mapreduce/AccumuloInputFormatIT.java      |  14 +-
 .../test/mapreduce/AccumuloRowInputFormatIT.java   |   6 +-
 .../accumulo/test/master/SuspendedTabletsIT.java   |   8 +-
 .../test/performance/ContinuousIngest.java         |   6 +-
 .../test/performance/scan/CollectTabletStats.java  |  16 +-
 .../test/replication/CyclicReplicationIT.java      |  62 ++--
 .../test/replication/FinishedWorkUpdaterIT.java    |  36 +-
 ...GarbageCollectorCommunicatesWithTServersIT.java |  61 ++--
 .../test/replication/KerberosReplicationIT.java    |  46 +--
 .../replication/MultiInstanceReplicationIT.java    | 207 ++++++------
 .../replication/MultiTserverReplicationIT.java     |  20 +-
 .../RemoveCompleteReplicationRecordsIT.java        |  66 ++--
 .../accumulo/test/replication/ReplicationIT.java   | 376 ++++++++++-----------
 .../replication/ReplicationOperationsImplIT.java   |  72 ++--
 .../ReplicationTablesPrinterThread.java            |   4 +-
 .../test/replication/SequentialWorkAssignerIT.java |  28 +-
 .../test/replication/StatusCombinerMacIT.java      |  14 +-
 .../accumulo/test/replication/StatusMakerIT.java   |  70 ++--
 .../test/replication/UnorderedWorkAssignerIT.java  |  26 +-
 .../UnorderedWorkAssignerReplicationIT.java        | 203 +++++------
 .../UnusedWalDoesntCloseReplicationStatusIT.java   |  36 +-
 .../accumulo/test/replication/WorkMakerIT.java     |  52 +--
 .../test/server/security/SystemCredentialsIT.java  |   6 +-
 158 files changed, 2206 insertions(+), 2209 deletions(-)

diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
index f9cd3a9..6d606c7 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
@@ -181,9 +181,9 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
     if (token instanceof KerberosToken) {
       log.info("Received KerberosToken, attempting to fetch DelegationToken");
       try {
-        AccumuloClient conn = Accumulo.newClient().usingClientInfo(getClientInfo(job))
+        AccumuloClient client = Accumulo.newClient().usingClientInfo(getClientInfo(job))
             .usingToken(principal, token).build();
-        token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+        token = client.securityOperations().getDelegationToken(new DelegationTokenConfig());
       } catch (Exception e) {
         log.warn("Failed to automatically obtain DelegationToken, Mappers/Reducers will likely"
             + " fail to communicate with Accumulo", e);
@@ -381,8 +381,8 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
    * @since 1.5.0
    */
   protected static void validateOptions(JobConf job) throws IOException {
-    AccumuloClient conn = InputConfigurator.getClient(CLASS, job);
-    InputConfigurator.validatePermissions(CLASS, job, conn);
+    AccumuloClient client = InputConfigurator.getClient(CLASS, job);
+    InputConfigurator.validatePermissions(CLASS, job, client);
   }
 
   /**
@@ -482,9 +482,9 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
       log.debug("Initializing input split: " + baseSplit);
 
       ClientContext context = new ClientContext(getClientInfo(job));
-      AccumuloClient conn;
+      AccumuloClient client;
       try {
-        conn = context.getClient();
+        client = context.getClient();
       } catch (AccumuloException | AccumuloSecurityException e) {
         throw new IllegalStateException(e);
       }
@@ -496,7 +496,7 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
       // configuration, but the scanner will use the table id resolved at job setup time
       InputTableConfig tableConfig = getInputTableConfig(job, baseSplit.getTableName());
 
-      log.debug("Created connector with user: " + conn.whoami());
+      log.debug("Created client with user: " + client.whoami());
       log.debug("Creating scanner for table: " + table);
       log.debug("Authorizations are: " + authorizations);
 
@@ -508,7 +508,8 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
           // Note: BatchScanner will use at most one thread per tablet, currently BatchInputSplit
           // will not span tablets
           int scanThreads = 1;
-          scanner = conn.createBatchScanner(baseSplit.getTableName(), authorizations, scanThreads);
+          scanner = client.createBatchScanner(baseSplit.getTableName(), authorizations,
+              scanThreads);
           setupIterators(job, scanner, baseSplit.getTableName(), baseSplit);
           if (null != classLoaderContext) {
             scanner.setClassLoaderContext(classLoaderContext);
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
index 244cc3e..dd3c7e8 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
@@ -408,7 +408,7 @@ public class AccumuloOutputFormat implements OutputFormat<Text,Mutation> {
     private long mutCount = 0;
     private long valCount = 0;
 
-    private AccumuloClient conn;
+    private AccumuloClient client;
 
     protected AccumuloRecordWriter(JobConf job)
         throws AccumuloException, AccumuloSecurityException, IOException {
@@ -427,8 +427,8 @@ public class AccumuloOutputFormat implements OutputFormat<Text,Mutation> {
       this.defaultTableName = (tname == null) ? null : new Text(tname);
 
       if (!simulate) {
-        this.conn = Accumulo.newClient().usingClientInfo(getClientInfo(job)).build();
-        mtbw = conn.createMultiTableBatchWriter(getBatchWriterOptions(job));
+        this.client = Accumulo.newClient().usingClientInfo(getClientInfo(job)).build();
+        mtbw = client.createMultiTableBatchWriter(getBatchWriterOptions(job));
       }
     }
 
@@ -477,9 +477,9 @@ public class AccumuloOutputFormat implements OutputFormat<Text,Mutation> {
       BatchWriter bw = null;
       String table = tableName.toString();
 
-      if (createTables && !conn.tableOperations().exists(table)) {
+      if (createTables && !client.tableOperations().exists(table)) {
         try {
-          conn.tableOperations().create(table);
+          client.tableOperations().create(table);
         } catch (AccumuloSecurityException e) {
           log.error("Accumulo security violation creating " + table, e);
           throw e;
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
index 200b3d5..9e9dd6b 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
@@ -183,9 +183,9 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
     if (token instanceof KerberosToken) {
       log.info("Received KerberosToken, attempting to fetch DelegationToken");
       try {
-        AccumuloClient conn = Accumulo.newClient().usingClientInfo(getClientInfo(job))
+        AccumuloClient client = Accumulo.newClient().usingClientInfo(getClientInfo(job))
             .usingToken(principal, token).build();
-        token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+        token = client.securityOperations().getDelegationToken(new DelegationTokenConfig());
       } catch (Exception e) {
         log.warn("Failed to automatically obtain DelegationToken, "
             + "Mappers/Reducers will likely fail to communicate with Accumulo", e);
@@ -398,8 +398,8 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
    * @since 1.5.0
    */
   protected static void validateOptions(JobContext context) throws IOException {
-    AccumuloClient conn = InputConfigurator.getClient(CLASS, context.getConfiguration());
-    InputConfigurator.validatePermissions(CLASS, context.getConfiguration(), conn);
+    AccumuloClient client = InputConfigurator.getClient(CLASS, context.getConfiguration());
+    InputConfigurator.validatePermissions(CLASS, context.getConfiguration(), client);
   }
 
   /**
@@ -489,9 +489,9 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
 
       ClientInfo info = getClientInfo(attempt);
       ClientContext context = new ClientContext(info);
-      AccumuloClient conn;
+      AccumuloClient client;
       try {
-        conn = context.getClient();
+        client = context.getClient();
       } catch (AccumuloException | AccumuloSecurityException e) {
         throw new IllegalStateException(e);
       }
@@ -504,7 +504,7 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
       // but the scanner will use the table id resolved at job setup time
       InputTableConfig tableConfig = getInputTableConfig(attempt, split.getTableName());
 
-      log.debug("Creating connector with user: " + info.getPrincipal());
+      log.debug("Creating client with user: " + info.getPrincipal());
       log.debug("Creating scanner for table: " + table);
       log.debug("Authorizations are: " + authorizations);
 
@@ -516,7 +516,7 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
           // Note: BatchScanner will use at most one thread per tablet, currently BatchInputSplit
           // will not span tablets
           int scanThreads = 1;
-          scanner = conn.createBatchScanner(split.getTableName(), authorizations, scanThreads);
+          scanner = client.createBatchScanner(split.getTableName(), authorizations, scanThreads);
           setupIterators(attempt, scanner, split.getTableName(), split);
           if (null != classLoaderContext) {
             scanner.setClassLoaderContext(classLoaderContext);
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
index c8b2e0c..c07846e 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
@@ -411,7 +411,7 @@ public class AccumuloOutputFormat extends OutputFormat<Text,Mutation> {
     private long mutCount = 0;
     private long valCount = 0;
 
-    private AccumuloClient conn;
+    private AccumuloClient client;
 
     protected AccumuloRecordWriter(TaskAttemptContext context)
         throws AccumuloException, AccumuloSecurityException, IOException {
@@ -430,8 +430,8 @@ public class AccumuloOutputFormat extends OutputFormat<Text,Mutation> {
       this.defaultTableName = (tname == null) ? null : new Text(tname);
 
       if (!simulate) {
-        this.conn = Accumulo.newClient().usingClientInfo(getClientInfo(context)).build();
-        mtbw = conn.createMultiTableBatchWriter(getBatchWriterOptions(context));
+        this.client = Accumulo.newClient().usingClientInfo(getClientInfo(context)).build();
+        mtbw = client.createMultiTableBatchWriter(getBatchWriterOptions(context));
       }
     }
 
@@ -480,9 +480,9 @@ public class AccumuloOutputFormat extends OutputFormat<Text,Mutation> {
       BatchWriter bw = null;
       String table = tableName.toString();
 
-      if (createTables && !conn.tableOperations().exists(table)) {
+      if (createTables && !client.tableOperations().exists(table)) {
         try {
-          conn.tableOperations().create(table);
+          client.tableOperations().create(table);
         } catch (AccumuloSecurityException e) {
           log.error("Accumulo security violation creating " + table, e);
           throw e;
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
index 826b783..c8b529d 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
@@ -120,8 +120,8 @@ public class ConfiguratorBase {
     if (info.getAuthenticationToken() instanceof KerberosToken) {
       log.info("Received KerberosToken, attempting to fetch DelegationToken");
       try {
-        AccumuloClient conn = Accumulo.newClient().usingClientInfo(info).build();
-        AuthenticationToken token = conn.securityOperations()
+        AccumuloClient client = Accumulo.newClient().usingClientInfo(info).build();
+        AuthenticationToken token = client.securityOperations()
             .getDelegationToken(new DelegationTokenConfig());
         result = Accumulo.newClient().usingClientInfo(info).usingToken(info.getPrincipal(), token)
             .info();
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
index 77680c6..d381b6d 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
@@ -734,12 +734,12 @@ public class InputConfigurator extends ConfiguratorBase {
    *          the class whose name will be used as a prefix for the property configuration key
    * @param conf
    *          the Hadoop configuration object to configure
-   * @param conn
-   *          the Connector
+   * @param client
+   *          the Accumulo client
    * @since 1.7.0
    */
   public static void validatePermissions(Class<?> implementingClass, Configuration conf,
-      AccumuloClient conn) throws IOException {
+      AccumuloClient client) throws IOException {
     Map<String,InputTableConfig> inputTableConfigs = getInputTableConfigs(implementingClass, conf);
     try {
       if (getInputTableConfigs(implementingClass, conf).size() == 0)
@@ -751,7 +751,7 @@ public class InputConfigurator extends ConfiguratorBase {
       }
 
       for (Map.Entry<String,InputTableConfig> tableConfig : inputTableConfigs.entrySet()) {
-        if (!conn.securityOperations().hasTablePermission(principal, tableConfig.getKey(),
+        if (!client.securityOperations().hasTablePermission(principal, tableConfig.getKey(),
             TablePermission.READ))
           throw new IOException("Unable to access table");
       }
@@ -760,7 +760,7 @@ public class InputConfigurator extends ConfiguratorBase {
         if (!tableConfig.shouldUseLocalIterators()) {
           if (tableConfig.getIterators() != null) {
             for (IteratorSetting iter : tableConfig.getIterators()) {
-              if (!conn.tableOperations().testClassLoad(tableConfigEntry.getKey(),
+              if (!client.tableOperations().testClassLoad(tableConfigEntry.getKey(),
                   iter.getIteratorClass(), SortedKeyValueIterator.class.getName()))
                 throw new AccumuloException("Servers are unable to load " + iter.getIteratorClass()
                     + " as a " + SortedKeyValueIterator.class.getName());
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/MapReduceClientOpts.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/MapReduceClientOpts.java
index 2e6eeb6..fae11f3 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/MapReduceClientOpts.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/MapReduceClientOpts.java
@@ -61,11 +61,11 @@ public class MapReduceClientOpts extends ClientOpts {
         log.info("Obtaining delegation token for {}", newPrincipal);
 
         setPrincipal(newPrincipal);
-        AccumuloClient conn = Accumulo.newClient().usingClientInfo(getClientInfo())
+        AccumuloClient client = Accumulo.newClient().usingClientInfo(getClientInfo())
             .usingToken(newPrincipal, krbToken).build();
 
         // Do the explicit check to see if the user has the permission to get a delegation token
-        if (!conn.securityOperations().hasSystemPermission(conn.whoami(),
+        if (!client.securityOperations().hasSystemPermission(client.whoami(),
             SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
           log.error(
               "{} doesn't have the {} SystemPermission neccesary to obtain a delegation"
@@ -74,11 +74,11 @@ public class MapReduceClientOpts extends ClientOpts {
                   + " MapReduce without distributing the user's credentials.",
               user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name());
           throw new IllegalStateException(
-              conn.whoami() + " does not have permission to obtain a delegation token");
+              client.whoami() + " does not have permission to obtain a delegation token");
         }
 
         // Get the delegation token from Accumulo
-        return conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+        return client.securityOperations().getDelegationToken(new DelegationTokenConfig());
       } catch (Exception e) {
         final String msg = "Failed to acquire DelegationToken for use with MapReduce";
         log.error(msg, e);
diff --git a/core/src/main/java/org/apache/accumulo/core/client/ClientSideIteratorScanner.java b/core/src/main/java/org/apache/accumulo/core/client/ClientSideIteratorScanner.java
index e0f86d1..c9ebb8e 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/ClientSideIteratorScanner.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/ClientSideIteratorScanner.java
@@ -57,7 +57,7 @@ import org.apache.hadoop.io.Text;
  *
  * <pre>
  * <code>
- * Scanner scanner = connector.createScanner(tableName, authorizations);
+ * Scanner scanner = client.createScanner(tableName, authorizations);
  * scanner = new ClientSideIteratorScanner(scanner);
  * </code>
  * </pre>
diff --git a/core/src/main/java/org/apache/accumulo/core/client/RowIterator.java b/core/src/main/java/org/apache/accumulo/core/client/RowIterator.java
index 6fc1455..014110f 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/RowIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/RowIterator.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.io.Text;
  * Group Key/Value pairs into Iterators over rows. Suggested usage:
  *
  * <pre>
- * RowIterator rowIterator = new RowIterator(connector.createScanner(tableName, authorizations));
+ * RowIterator rowIterator = new RowIterator(client.createScanner(tableName, authorizations));
  * </pre>
  */
 public class RowIterator implements Iterator<Iterator<Entry<Key,Value>>> {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/ScannerBase.java b/core/src/main/java/org/apache/accumulo/core/client/ScannerBase.java
index c32618b..cd7b5e9 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/ScannerBase.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/ScannerBase.java
@@ -207,7 +207,7 @@ public interface ScannerBase extends Iterable<Entry<Key,Value>>, AutoCloseable {
    * <code>
    *   // could cache this if creating many scanners to avoid RPCs.
    *   SamplerConfiguration samplerConfig =
-   *     connector.tableOperations().getSamplerConfiguration(table);
+   *     client.tableOperations().getSamplerConfiguration(table);
    *   // verify table's sample data is generated in an expected way before using
    *   userCode.verifySamplerConfig(samplerConfig);
    *   scanner.setSamplerConfiguration(samplerConfig);
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java
index e18cc57..0eff1ed 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java
@@ -177,7 +177,7 @@ public interface TableOperations {
    * the given byte sequences are not valid UTF-8.
    *
    * <pre>
-   * TableOperations tableOps = connector.tableOperations();
+   * TableOperations tableOps = client.tableOperations();
    * TreeSet&lt;Text&gt; splits = new TreeSet&lt;Text&gt;();
    * for (int i = 0; i &lt; 256; i++) {
    *   byte[] bytes = {(byte) i};
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ClientContext.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ClientContext.java
index 567d192..b1cdf05 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ClientContext.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ClientContext.java
@@ -68,7 +68,7 @@ public class ClientContext {
   private Credentials creds;
   private BatchWriterConfig batchWriterConfig;
   private AccumuloConfiguration serverConf;
-  protected AccumuloClient conn;
+  protected AccumuloClient client;
 
   // These fields are very frequently accessed (each time a connection is created) and expensive to
   // compute, so cache them.
@@ -210,14 +210,14 @@ public class ClientContext {
   }
 
   /**
-   * Retrieve a connector
+   * Retrieve an Accumulo client
    */
   public synchronized AccumuloClient getClient()
       throws AccumuloException, AccumuloSecurityException {
-    if (conn == null) {
-      conn = new AccumuloClientImpl(this);
+    if (client == null) {
+      client = new AccumuloClientImpl(this);
     }
-    return conn;
+    return client;
   }
 
   public BatchWriterConfig getBatchWriterConfig() {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineIterator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineIterator.java
index fc2f829..d20e820 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineIterator.java
@@ -151,7 +151,7 @@ class OfflineIterator implements Iterator<Entry<Key,Value>> {
   private SortedKeyValueIterator<Key,Value> iter;
   private Range range;
   private KeyExtent currentExtent;
-  private AccumuloClient conn;
+  private AccumuloClient client;
   private Table.ID tableId;
   private Authorizations authorizations;
   private ClientContext context;
@@ -175,8 +175,8 @@ class OfflineIterator implements Iterator<Entry<Key,Value>> {
     this.readers = new ArrayList<>();
 
     try {
-      conn = context.getClient();
-      config = new ConfigurationCopy(conn.instanceOperations().getSiteConfiguration());
+      client = context.getClient();
+      config = new ConfigurationCopy(client.instanceOperations().getSiteConfiguration());
       nextTablet();
 
       while (iter != null && !iter.hasTop())
@@ -297,7 +297,7 @@ class OfflineIterator implements Iterator<Entry<Key,Value>> {
 
   private Pair<KeyExtent,String> getTabletFiles(Range nextRange, List<String> relFiles)
       throws TableNotFoundException {
-    Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    Scanner scanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     scanner.setBatchSize(100);
     scanner.setRange(nextRange);
 
@@ -336,7 +336,7 @@ class OfflineIterator implements Iterator<Entry<Key,Value>> {
     // possible race condition here, if table is renamed
     String tableName = Tables.getTableName(context, tableId);
     AccumuloConfiguration acuTableConf = new ConfigurationCopy(
-        conn.tableOperations().getProperties(tableName));
+        client.tableOperations().getProperties(tableName));
 
     Configuration conf = CachedConfiguration.getInstance();
 
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ReplicationOperationsImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ReplicationOperationsImpl.java
index 9aeca78..7802b71 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ReplicationOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ReplicationOperationsImpl.java
@@ -120,11 +120,11 @@ public class ReplicationOperationsImpl implements ReplicationOperations {
         client -> client.drainReplicationTable(tinfo, rpcCreds, tableName, wals));
   }
 
-  protected Table.ID getTableId(AccumuloClient conn, String tableName)
+  protected Table.ID getTableId(AccumuloClient client, String tableName)
       throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    TableOperations tops = conn.tableOperations();
+    TableOperations tops = client.tableOperations();
 
-    if (!conn.tableOperations().exists(tableName)) {
+    if (!client.tableOperations().exists(tableName)) {
       throw new TableNotFoundException(null, tableName, null);
     }
 
@@ -146,13 +146,13 @@ public class ReplicationOperationsImpl implements ReplicationOperations {
 
     log.debug("Collecting referenced files for replication of table {}", tableName);
 
-    AccumuloClient conn = context.getClient();
-    Table.ID tableId = getTableId(conn, tableName);
+    AccumuloClient client = context.getClient();
+    Table.ID tableId = getTableId(client, tableName);
 
     log.debug("Found id of {} for name {}", tableId, tableName);
 
     // Get the WALs currently referenced by the table
-    BatchScanner metaBs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
+    BatchScanner metaBs = client.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
     metaBs.setRanges(Collections.singleton(MetadataSchema.TabletsSection.getRange(tableId)));
     metaBs.fetchColumnFamily(LogColumnFamily.NAME);
     Set<String> wals = new HashSet<>();
@@ -166,7 +166,7 @@ public class ReplicationOperationsImpl implements ReplicationOperations {
     }
 
     // And the WALs that need to be replicated for this table
-    metaBs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
+    metaBs = client.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
     metaBs.setRanges(Collections.singleton(ReplicationSection.getRange()));
     metaBs.fetchColumnFamily(ReplicationSection.COLF);
     try {
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataScanner.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataScanner.java
index 7e28630..7d7df91 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataScanner.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataScanner.java
@@ -60,7 +60,7 @@ public class MetadataScanner implements Iterable<TabletMetadata>, AutoCloseable
   public interface SourceOptions {
     TableOptions from(ClientContext ctx);
 
-    TableOptions from(AccumuloClient conn);
+    TableOptions from(AccumuloClient client);
   }
 
   public interface TableOptions {
@@ -147,7 +147,7 @@ public class MetadataScanner implements Iterable<TabletMetadata>, AutoCloseable
 
     private List<Text> families = new ArrayList<>();
     private List<ColumnFQ> qualifiers = new ArrayList<>();
-    private AccumuloClient conn;
+    private AccumuloClient client;
     private String table = MetadataTable.NAME;
     private Range range;
     private EnumSet<FetchedColumns> fetchedCols = EnumSet.noneOf(FetchedColumns.class);
@@ -223,7 +223,7 @@ public class MetadataScanner implements Iterable<TabletMetadata>, AutoCloseable
     public MetadataScanner build()
         throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
 
-      Scanner scanner = new IsolatedScanner(conn.createScanner(table, Authorizations.EMPTY));
+      Scanner scanner = new IsolatedScanner(client.createScanner(table, Authorizations.EMPTY));
       scanner.setRange(range);
 
       if (checkConsistency && !fetchedCols.contains(FetchedColumns.PREV_ROW)) {
@@ -257,7 +257,7 @@ public class MetadataScanner implements Iterable<TabletMetadata>, AutoCloseable
     @Override
     public TableOptions from(ClientContext ctx) {
       try {
-        this.conn = ctx.getClient();
+        this.client = ctx.getClient();
       } catch (AccumuloException | AccumuloSecurityException e) {
         throw new RuntimeException(e);
       }
@@ -265,8 +265,8 @@ public class MetadataScanner implements Iterable<TabletMetadata>, AutoCloseable
     }
 
     @Override
-    public TableOptions from(AccumuloClient conn) {
-      this.conn = conn;
+    public TableOptions from(AccumuloClient client) {
+      this.client = client;
       return this;
     }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTable.java b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTable.java
index 2a35b36..01594f9 100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTable.java
@@ -58,9 +58,9 @@ public class ReplicationTable {
   public static final Map<String,Set<Text>> LOCALITY_GROUPS = ImmutableMap.of(STATUS_LG_NAME,
       STATUS_LG_COLFAMS, WORK_LG_NAME, WORK_LG_COLFAMS);
 
-  public static Scanner getScanner(AccumuloClient conn) throws ReplicationTableOfflineException {
+  public static Scanner getScanner(AccumuloClient client) throws ReplicationTableOfflineException {
     try {
-      return conn.createScanner(NAME, Authorizations.EMPTY);
+      return client.createScanner(NAME, Authorizations.EMPTY);
     } catch (TableNotFoundException e) {
       throw new AssertionError(NAME + " should exist, but doesn't.");
     } catch (TableOfflineException e) {
@@ -68,10 +68,10 @@ public class ReplicationTable {
     }
   }
 
-  public static BatchWriter getBatchWriter(AccumuloClient conn)
+  public static BatchWriter getBatchWriter(AccumuloClient client)
       throws ReplicationTableOfflineException {
     try {
-      return conn.createBatchWriter(NAME, new BatchWriterConfig());
+      return client.createBatchWriter(NAME, new BatchWriterConfig());
     } catch (TableNotFoundException e) {
       throw new AssertionError(NAME + " should exist, but doesn't.");
     } catch (TableOfflineException e) {
@@ -79,10 +79,10 @@ public class ReplicationTable {
     }
   }
 
-  public static BatchScanner getBatchScanner(AccumuloClient conn, int queryThreads)
+  public static BatchScanner getBatchScanner(AccumuloClient client, int queryThreads)
       throws ReplicationTableOfflineException {
     try {
-      return conn.createBatchScanner(NAME, Authorizations.EMPTY, queryThreads);
+      return client.createBatchScanner(NAME, Authorizations.EMPTY, queryThreads);
     } catch (TableNotFoundException e) {
       throw new AssertionError(NAME + " should exist, but doesn't.");
     } catch (TableOfflineException e) {
@@ -90,15 +90,15 @@ public class ReplicationTable {
     }
   }
 
-  public static boolean isOnline(AccumuloClient conn) {
-    return TableState.ONLINE == Tables.getTableState(new ClientContext(conn.info()), ID);
+  public static boolean isOnline(AccumuloClient client) {
+    return TableState.ONLINE == Tables.getTableState(new ClientContext(client.info()), ID);
   }
 
-  public static void setOnline(AccumuloClient conn)
+  public static void setOnline(AccumuloClient client)
       throws AccumuloSecurityException, AccumuloException {
     try {
       log.info("Bringing replication table online");
-      conn.tableOperations().online(NAME, true);
+      client.tableOperations().online(NAME, true);
     } catch (TableNotFoundException e) {
       throw new AssertionError(NAME + " should exist, but doesn't.");
     }
diff --git a/core/src/main/java/org/apache/accumulo/core/util/Merge.java b/core/src/main/java/org/apache/accumulo/core/util/Merge.java
index 49f1d31..a0c771a 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/Merge.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/Merge.java
@@ -88,20 +88,20 @@ public class Merge {
     opts.parseArgs(Merge.class.getName(), args);
 
     try {
-      AccumuloClient conn = opts.getClient();
+      AccumuloClient client = opts.getClient();
 
-      if (!conn.tableOperations().exists(opts.getTableName())) {
+      if (!client.tableOperations().exists(opts.getTableName())) {
         System.err.println("table " + opts.getTableName() + " does not exist");
         return;
       }
       if (opts.goalSize == null || opts.goalSize < 1) {
         AccumuloConfiguration tableConfig = new ConfigurationCopy(
-            conn.tableOperations().getProperties(opts.getTableName()));
+            client.tableOperations().getProperties(opts.getTableName()));
         opts.goalSize = tableConfig.getAsBytes(Property.TABLE_SPLIT_THRESHOLD);
       }
 
       message("Merging tablets in table %s to %d bytes", opts.getTableName(), opts.goalSize);
-      mergomatic(conn, opts.getTableName(), opts.begin, opts.end, opts.goalSize, opts.force);
+      mergomatic(client, opts.getTableName(), opts.begin, opts.end, opts.goalSize, opts.force);
     } catch (Exception ex) {
       throw new MergeException(ex);
     }
@@ -122,7 +122,7 @@ public class Merge {
     long size;
   }
 
-  public void mergomatic(AccumuloClient conn, String table, Text start, Text end, long goalSize,
+  public void mergomatic(AccumuloClient client, String table, Text start, Text end, long goalSize,
       boolean force) throws MergeException {
     try {
       if (table.equals(MetadataTable.NAME)) {
@@ -131,23 +131,23 @@ public class Merge {
       List<Size> sizes = new ArrayList<>();
       long totalSize = 0;
       // Merge any until you get larger than the goal size, and then merge one less tablet
-      Iterator<Size> sizeIterator = getSizeIterator(conn, table, start, end);
+      Iterator<Size> sizeIterator = getSizeIterator(client, table, start, end);
       while (sizeIterator.hasNext()) {
         Size next = sizeIterator.next();
         totalSize += next.size;
         sizes.add(next);
         if (totalSize > goalSize) {
-          totalSize = mergeMany(conn, table, sizes, goalSize, force, false);
+          totalSize = mergeMany(client, table, sizes, goalSize, force, false);
         }
       }
       if (sizes.size() > 1)
-        mergeMany(conn, table, sizes, goalSize, force, true);
+        mergeMany(client, table, sizes, goalSize, force, true);
     } catch (Exception ex) {
       throw new MergeException(ex);
     }
   }
 
-  protected long mergeMany(AccumuloClient conn, String table, List<Size> sizes, long goalSize,
+  protected long mergeMany(AccumuloClient client, String table, List<Size> sizes, long goalSize,
       boolean force, boolean last) throws MergeException {
     // skip the big tablets, which will be the typical case
     while (!sizes.isEmpty()) {
@@ -171,13 +171,13 @@ public class Merge {
     }
 
     if (numToMerge > 1) {
-      mergeSome(conn, table, sizes, numToMerge);
+      mergeSome(client, table, sizes, numToMerge);
     } else {
       if (numToMerge == 1 && sizes.size() > 1) {
         // here we have the case of a merge candidate that is surrounded by candidates that would
         // split
         if (force) {
-          mergeSome(conn, table, sizes, 2);
+          mergeSome(client, table, sizes, 2);
         } else {
           sizes.remove(0);
         }
@@ -185,7 +185,7 @@ public class Merge {
     }
     if (numToMerge == 0 && sizes.size() > 1 && last) {
       // That's the last tablet, and we have a bunch to merge
-      mergeSome(conn, table, sizes, sizes.size());
+      mergeSome(client, table, sizes, sizes.size());
     }
     long result = 0;
     for (Size s : sizes) {
@@ -194,36 +194,36 @@ public class Merge {
     return result;
   }
 
-  protected void mergeSome(AccumuloClient conn, String table, List<Size> sizes, int numToMerge)
+  protected void mergeSome(AccumuloClient client, String table, List<Size> sizes, int numToMerge)
       throws MergeException {
-    merge(conn, table, sizes, numToMerge);
+    merge(client, table, sizes, numToMerge);
     for (int i = 0; i < numToMerge; i++) {
       sizes.remove(0);
     }
   }
 
-  protected void merge(AccumuloClient conn, String table, List<Size> sizes, int numToMerge)
+  protected void merge(AccumuloClient client, String table, List<Size> sizes, int numToMerge)
       throws MergeException {
     try {
       Text start = sizes.get(0).extent.getPrevEndRow();
       Text end = sizes.get(numToMerge - 1).extent.getEndRow();
       message("Merging %d tablets from (%s to %s]", numToMerge, start == null ? "-inf" : start,
           end == null ? "+inf" : end);
-      conn.tableOperations().merge(table, start, end);
+      client.tableOperations().merge(table, start, end);
     } catch (Exception ex) {
       throw new MergeException(ex);
     }
   }
 
-  protected Iterator<Size> getSizeIterator(AccumuloClient conn, String tablename, Text start,
+  protected Iterator<Size> getSizeIterator(AccumuloClient client, String tablename, Text start,
       Text end) throws MergeException {
     // open up metadata, walk through the tablets.
     Table.ID tableId;
     Scanner scanner;
     try {
-      ClientContext context = new ClientContext(conn.info());
+      ClientContext context = new ClientContext(client.info());
       tableId = Tables.getTableId(context, tablename);
-      scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+      scanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     } catch (Exception e) {
       throw new MergeException(e);
     }
diff --git a/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java b/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java
index bf98878..669f3e2 100644
--- a/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java
@@ -51,12 +51,12 @@ public class MetadataServicerTest {
     tableNameToIdMap.put(userTableName, userTableId.canonicalID());
 
     context = EasyMock.createMock(ClientContext.class);
-    AccumuloClient conn = EasyMock.createMock(AccumuloClient.class);
+    AccumuloClient client = EasyMock.createMock(AccumuloClient.class);
     TableOperations tableOps = EasyMock.createMock(TableOperations.class);
     EasyMock.expect(tableOps.tableIdMap()).andReturn(tableNameToIdMap).anyTimes();
-    EasyMock.expect(conn.tableOperations()).andReturn(tableOps).anyTimes();
-    EasyMock.expect(context.getClient()).andReturn(conn).anyTimes();
-    EasyMock.replay(context, conn, tableOps);
+    EasyMock.expect(client.tableOperations()).andReturn(tableOps).anyTimes();
+    EasyMock.expect(context.getClient()).andReturn(client).anyTimes();
+    EasyMock.replay(context, client, tableOps);
   }
 
   @Test
diff --git a/core/src/test/java/org/apache/accumulo/core/util/MergeTest.java b/core/src/test/java/org/apache/accumulo/core/util/MergeTest.java
index 060a3fe..11892a1 100644
--- a/core/src/test/java/org/apache/accumulo/core/util/MergeTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/util/MergeTest.java
@@ -54,7 +54,7 @@ public class MergeTest {
     protected void message(String format, Object... args) {}
 
     @Override
-    protected Iterator<Size> getSizeIterator(AccumuloClient conn, String tablename,
+    protected Iterator<Size> getSizeIterator(AccumuloClient client, String tablename,
         final Text start, final Text end) throws MergeException {
       final Iterator<Size> impl = tablets.iterator();
       return new Iterator<Size>() {
@@ -98,7 +98,7 @@ public class MergeTest {
     }
 
     @Override
-    protected void merge(AccumuloClient conn, String table, List<Size> sizes, int numToMerge)
+    protected void merge(AccumuloClient client, String table, List<Size> sizes, int numToMerge)
         throws MergeException {
       List<Size> merge = new ArrayList<>();
       for (int i = 0; i < numToMerge; i++) {
diff --git a/minicluster/src/main/java/org/apache/accumulo/cluster/AccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/cluster/AccumuloCluster.java
index 81107d1..ed47e1a 100644
--- a/minicluster/src/main/java/org/apache/accumulo/cluster/AccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/cluster/AccumuloCluster.java
@@ -22,7 +22,6 @@ import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.ClientInfo;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.server.ServerContext;
@@ -54,14 +53,6 @@ public interface AccumuloCluster {
   ServerContext getServerContext();
 
   /**
-   * Utility method to get a connector to the cluster.
-   *
-   * @deprecated since 2.0.0, replaced by {@link #getAccumuloClient(String, AuthenticationToken)}
-   */
-  Connector getConnector(String user, AuthenticationToken token)
-      throws AccumuloException, AccumuloSecurityException;
-
-  /**
    * Utility method to get a client connection to the cluster.
    *
    * @since 2.0
diff --git a/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java
index 453377c..4b7ef2e 100644
--- a/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java
@@ -31,7 +31,6 @@ import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.ClientInfo;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.impl.ClientConfConverter;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
@@ -137,13 +136,6 @@ public class StandaloneAccumuloCluster implements AccumuloCluster {
   }
 
   @Override
-  public Connector getConnector(String user, AuthenticationToken token)
-      throws AccumuloException, AccumuloSecurityException {
-    return Connector.from(Accumulo.newClient().forInstance(getInstanceName(), getZooKeepers())
-        .usingToken(user, token).build());
-  }
-
-  @Override
   public AccumuloClient getAccumuloClient(String user, AuthenticationToken token)
       throws AccumuloException, AccumuloSecurityException {
     return Accumulo.newClient().forInstance(getInstanceName(), getZooKeepers())
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
index a785a09..cfb337e 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
@@ -58,7 +58,6 @@ import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.ClientInfo;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.impl.AccumuloClientImpl;
 import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.MasterClient;
@@ -774,13 +773,6 @@ public class MiniAccumuloClusterImpl implements AccumuloCluster {
   }
 
   @Override
-  public Connector getConnector(String user, AuthenticationToken token)
-      throws AccumuloException, AccumuloSecurityException {
-    return Connector.from(new AccumuloClientImpl.AccumuloClientBuilderImpl()
-        .usingClientInfo(getClientInfo()).usingToken(user, token).build());
-  }
-
-  @Override
   public AccumuloClient getAccumuloClient(String user, AuthenticationToken token)
       throws AccumuloException, AccumuloSecurityException {
     return new AccumuloClientImpl.AccumuloClientBuilderImpl().usingClientInfo(getClientInfo())
diff --git a/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImplTest.java b/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImplTest.java
index 7676401..77b4c99 100644
--- a/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImplTest.java
+++ b/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImplTest.java
@@ -73,12 +73,12 @@ public class MiniAccumuloClusterImplTest {
     accumulo = new MiniAccumuloClusterImpl(config);
     accumulo.start();
     // create a table to ensure there are some entries in the !0 table
-    AccumuloClient conn = accumulo.getAccumuloClient("root", new PasswordToken("superSecret"));
-    TableOperations tableops = conn.tableOperations();
+    AccumuloClient client = accumulo.getAccumuloClient("root", new PasswordToken("superSecret"));
+    TableOperations tableops = client.tableOperations();
     tableops.create(TEST_TABLE);
     testTableID = tableops.tableIdMap().get(TEST_TABLE);
 
-    Scanner s = conn.createScanner(TEST_TABLE, Authorizations.EMPTY);
+    Scanner s = client.createScanner(TEST_TABLE, Authorizations.EMPTY);
     Iterators.size(s.iterator());
   }
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java b/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
index 5436739..228245c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
@@ -240,10 +240,10 @@ public class ServerContext extends ClientContext {
   @Override
   public synchronized AccumuloClient getClient()
       throws AccumuloException, AccumuloSecurityException {
-    if (conn == null) {
-      conn = new AccumuloClientImpl(this);
+    if (client == null) {
+      client = new AccumuloClientImpl(this);
     }
-    return conn;
+    return client;
   }
 
   public AccumuloClient getClient(String principal, AuthenticationToken token)
diff --git a/server/base/src/main/java/org/apache/accumulo/server/replication/PrintReplicationRecords.java b/server/base/src/main/java/org/apache/accumulo/server/replication/PrintReplicationRecords.java
index 4487975..4847eae 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/replication/PrintReplicationRecords.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/replication/PrintReplicationRecords.java
@@ -40,12 +40,12 @@ import com.google.protobuf.InvalidProtocolBufferException;
 public class PrintReplicationRecords implements Runnable {
   private static final Logger log = LoggerFactory.getLogger(PrintReplicationRecords.class);
 
-  private AccumuloClient conn;
+  private AccumuloClient client;
   private PrintStream out;
   private SimpleDateFormat sdf;
 
-  public PrintReplicationRecords(AccumuloClient conn, PrintStream out) {
-    this.conn = conn;
+  public PrintReplicationRecords(AccumuloClient client, PrintStream out) {
+    this.client = client;
     this.out = out;
     this.sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
   }
@@ -57,7 +57,7 @@ public class PrintReplicationRecords implements Runnable {
     out.println(sdf.format(new Date()) + " Replication entries from metadata table");
     out.println("------------------------------------------------------------------");
     try {
-      s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+      s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     } catch (TableNotFoundException e) {
       log.error("Metadata table does not exist");
       return;
@@ -79,7 +79,7 @@ public class PrintReplicationRecords implements Runnable {
     out.println("--------------------------------------------------------------------");
 
     try {
-      s = conn.createScanner(ReplicationTable.NAME, Authorizations.EMPTY);
+      s = client.createScanner(ReplicationTable.NAME, Authorizations.EMPTY);
     } catch (TableNotFoundException e) {
       log.error("Replication table does not exist");
       return;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java
index 8bf3399..aa6079a 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java
@@ -225,15 +225,15 @@ public class ReplicationUtil {
   /**
    * Fetches the absolute path of the file to be replicated.
    *
-   * @param conn
-   *          Accumulo Connector
+   * @param client
+   *          Accumulo Client
    * @param workQueuePath
    *          Root path for the Replication WorkQueue
    * @param queueKey
    *          The Replication work queue key
    * @return The absolute path for the file, or null if the key is no longer in ZooKeeper
    */
-  public String getAbsolutePath(AccumuloClient conn, String workQueuePath, String queueKey) {
+  public String getAbsolutePath(AccumuloClient client, String workQueuePath, String queueKey) {
     byte[] data = zooCache.get(workQueuePath + "/" + queueKey);
     if (null != data) {
       return new String(data, UTF_8);
@@ -245,22 +245,22 @@ public class ReplicationUtil {
   /**
    * Compute a progress string for the replication of the given WAL
    *
-   * @param conn
-   *          Accumulo Connector
+   * @param client
+   *          Accumulo Client
    * @param path
    *          Absolute path to a WAL, or null
    * @param target
    *          ReplicationTarget the WAL is being replicated to
    * @return A status message for a file being replicated
    */
-  public String getProgress(AccumuloClient conn, String path, ReplicationTarget target) {
+  public String getProgress(AccumuloClient client, String path, ReplicationTarget target) {
     // We could try to grep over the table, but without knowing the full file path, we
     // can't find the status quickly
     String status = "Unknown";
     if (null != path) {
       Scanner s;
       try {
-        s = ReplicationTable.getScanner(conn);
+        s = ReplicationTable.getScanner(client);
       } catch (ReplicationTableOfflineException e) {
         log.debug("Replication table no longer online", e);
         return status;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/replication/WorkAssigner.java b/server/base/src/main/java/org/apache/accumulo/server/replication/WorkAssigner.java
index 1f5fd0f..4676e36 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/replication/WorkAssigner.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/replication/WorkAssigner.java
@@ -32,7 +32,7 @@ public interface WorkAssigner {
   /**
    * Configure the WorkAssigner implementation
    */
-  void configure(AccumuloConfiguration conf, AccumuloClient conn);
+  void configure(AccumuloConfiguration conf, AccumuloClient client);
 
   /**
    * Assign work for replication
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
index feb9f5c..c401b86 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
@@ -309,13 +309,13 @@ public class Admin implements KeywordExecutable {
       @Override
       public void run() {
         try {
-          AccumuloClient conn = context.getClient();
-          Set<String> tables = conn.tableOperations().tableIdMap().keySet();
+          AccumuloClient client = context.getClient();
+          Set<String> tables = client.tableOperations().tableIdMap().keySet();
           for (String table : tables) {
             if (table.equals(MetadataTable.NAME))
               continue;
             try {
-              conn.tableOperations().flush(table, null, null, false);
+              client.tableOperations().flush(table, null, null, false);
               flushesStarted.incrementAndGet();
             } catch (TableNotFoundException e) {
               // ignore
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
index cc404ca..1d41963 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -750,7 +750,7 @@ public class MetadataTableUtil {
   }
 
   private static Iterable<TabletMetadata> createCloneScanner(String testTableName, Table.ID tableId,
-      AccumuloClient conn) throws TableNotFoundException {
+      AccumuloClient client) throws TableNotFoundException {
 
     String tableName;
     Range range;
@@ -767,7 +767,7 @@ public class MetadataTableUtil {
     }
 
     try {
-      return MetadataScanner.builder().from(conn).scanTable(tableName).overRange(range)
+      return MetadataScanner.builder().from(client).scanTable(tableName).overRange(range)
           .checkConsistency().saveKeyValues().fetchFiles().fetchLocation().fetchLast().fetchCloned()
           .fetchPrev().fetchTime().build();
     } catch (AccumuloException | AccumuloSecurityException e) {
@@ -777,10 +777,10 @@ public class MetadataTableUtil {
 
   @VisibleForTesting
   public static void initializeClone(String testTableName, Table.ID srcTableId, Table.ID tableId,
-      AccumuloClient conn, BatchWriter bw)
+      AccumuloClient client, BatchWriter bw)
       throws TableNotFoundException, MutationsRejectedException {
 
-    Iterator<TabletMetadata> ti = createCloneScanner(testTableName, srcTableId, conn).iterator();
+    Iterator<TabletMetadata> ti = createCloneScanner(testTableName, srcTableId, client).iterator();
 
     if (!ti.hasNext())
       throw new RuntimeException(" table deleted during clone?  srcTableId = " + srcTableId);
@@ -798,12 +798,12 @@ public class MetadataTableUtil {
 
   @VisibleForTesting
   public static int checkClone(String testTableName, Table.ID srcTableId, Table.ID tableId,
-      AccumuloClient conn, BatchWriter bw)
+      AccumuloClient client, BatchWriter bw)
       throws TableNotFoundException, MutationsRejectedException {
 
-    Iterator<TabletMetadata> srcIter = createCloneScanner(testTableName, srcTableId, conn)
+    Iterator<TabletMetadata> srcIter = createCloneScanner(testTableName, srcTableId, client)
         .iterator();
-    Iterator<TabletMetadata> cloneIter = createCloneScanner(testTableName, tableId, conn)
+    Iterator<TabletMetadata> cloneIter = createCloneScanner(testTableName, tableId, client)
         .iterator();
 
     if (!cloneIter.hasNext() || !srcIter.hasNext())
@@ -882,19 +882,19 @@ public class MetadataTableUtil {
   public static void cloneTable(ServerContext context, Table.ID srcTableId, Table.ID tableId,
       VolumeManager volumeManager) throws Exception {
 
-    AccumuloClient conn = context.getClient();
-    try (BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig())) {
+    AccumuloClient client = context.getClient();
+    try (BatchWriter bw = client.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig())) {
 
       while (true) {
 
         try {
-          initializeClone(null, srcTableId, tableId, conn, bw);
+          initializeClone(null, srcTableId, tableId, client, bw);
 
           // the following loop looks changes in the file that occurred during the copy.. if files
           // were dereferenced then they could have been GCed
 
           while (true) {
-            int rewrites = checkClone(null, srcTableId, tableId, conn, bw);
+            int rewrites = checkClone(null, srcTableId, tableId, client, bw);
 
             if (rewrites == 0)
               break;
@@ -918,7 +918,7 @@ public class MetadataTableUtil {
       }
 
       // delete the clone markers and create directory entries
-      Scanner mscanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+      Scanner mscanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
       mscanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
       mscanner.fetchColumnFamily(ClonedColumnFamily.NAME);
 
@@ -946,12 +946,12 @@ public class MetadataTableUtil {
     update(context, zooLock, m, extent);
   }
 
-  public static void removeBulkLoadEntries(AccumuloClient conn, Table.ID tableId, long tid)
+  public static void removeBulkLoadEntries(AccumuloClient client, Table.ID tableId, long tid)
       throws Exception {
     try (
         Scanner mscanner = new IsolatedScanner(
-            conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
-        BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig())) {
+            client.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
+        BatchWriter bw = client.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig())) {
       mscanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
       mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
       byte[] tidAsBytes = Long.toString(tid).getBytes(UTF_8);
@@ -968,10 +968,10 @@ public class MetadataTableUtil {
     }
   }
 
-  public static List<FileRef> getBulkFilesLoaded(ServerContext context, AccumuloClient conn,
+  public static List<FileRef> getBulkFilesLoaded(ServerContext context, AccumuloClient client,
       KeyExtent extent, long tid) throws IOException {
     List<FileRef> result = new ArrayList<>();
-    try (Scanner mscanner = new IsolatedScanner(conn.createScanner(
+    try (Scanner mscanner = new IsolatedScanner(client.createScanner(
         extent.isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY))) {
       VolumeManager fs = context.getVolumeManager();
       mscanner.setRange(extent.toMetadataRange());
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
index 930083e..efb8721 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
@@ -84,14 +84,14 @@ public class ReplicationTableUtil {
   synchronized static Writer getWriter(ClientContext context) {
     Writer replicationTable = writers.get(context.getCredentials());
     if (replicationTable == null) {
-      AccumuloClient conn;
+      AccumuloClient client;
       try {
-        conn = context.getClient();
+        client = context.getClient();
       } catch (AccumuloException | AccumuloSecurityException e) {
         throw new RuntimeException(e);
       }
 
-      configureMetadataTable(conn, MetadataTable.NAME);
+      configureMetadataTable(client, MetadataTable.NAME);
 
       replicationTable = new Writer(context, MetadataTable.ID);
       writers.put(context.getCredentials(), replicationTable);
@@ -99,8 +99,8 @@ public class ReplicationTableUtil {
     return replicationTable;
   }
 
-  public synchronized static void configureMetadataTable(AccumuloClient conn, String tableName) {
-    TableOperations tops = conn.tableOperations();
+  public synchronized static void configureMetadataTable(AccumuloClient client, String tableName) {
+    TableOperations tops = client.tableOperations();
     Map<String,EnumSet<IteratorScope>> iterators = null;
     try {
       iterators = tops.listIterators(tableName);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java b/server/base/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
index e538e25..d6a7f3c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
@@ -147,12 +147,12 @@ public class TableDiskUsage {
   }
 
   public static void printDiskUsage(Collection<String> tableNames, VolumeManager fs,
-      AccumuloClient conn, boolean humanReadable) throws TableNotFoundException, IOException {
-    printDiskUsage(tableNames, fs, conn, line -> System.out.println(line), humanReadable);
+      AccumuloClient client, boolean humanReadable) throws TableNotFoundException, IOException {
+    printDiskUsage(tableNames, fs, client, line -> System.out.println(line), humanReadable);
   }
 
   public static Map<TreeSet<String>,Long> getDiskUsage(Set<Table.ID> tableIds, VolumeManager fs,
-      AccumuloClient conn) throws IOException {
+      AccumuloClient client) throws IOException {
     TableDiskUsage tdu = new TableDiskUsage();
 
     // Add each tableID
@@ -167,7 +167,7 @@ public class TableDiskUsage {
     for (Table.ID tableId : tableIds) {
       Scanner mdScanner;
       try {
-        mdScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+        mdScanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
       } catch (TableNotFoundException e) {
         throw new RuntimeException(e);
       }
@@ -216,7 +216,7 @@ public class TableDiskUsage {
       }
     }
 
-    ClientContext context = new ClientContext(conn.info());
+    ClientContext context = new ClientContext(client.info());
     Map<Table.ID,String> reverseTableIdMap = Tables.getIdToNameMap(context);
 
     TreeMap<TreeSet<String>,Long> usage = new TreeMap<>((o1, o2) -> {
@@ -267,11 +267,11 @@ public class TableDiskUsage {
   }
 
   public static void printDiskUsage(Collection<String> tableNames, VolumeManager fs,
-      AccumuloClient conn, Printer printer, boolean humanReadable)
+      AccumuloClient client, Printer printer, boolean humanReadable)
       throws TableNotFoundException, IOException {
 
     HashSet<Table.ID> tableIds = new HashSet<>();
-    ClientContext context = new ClientContext(conn.info());
+    ClientContext context = new ClientContext(client.info());
 
     // Get table IDs for all tables requested to be 'du'
     for (String tableName : tableNames) {
@@ -282,7 +282,7 @@ public class TableDiskUsage {
       tableIds.add(tableId);
     }
 
-    Map<TreeSet<String>,Long> usage = getDiskUsage(tableIds, fs, conn);
+    Map<TreeSet<String>,Long> usage = getDiskUsage(tableIds, fs, client);
 
     String valueFormat = humanReadable ? "%9s" : "%,24d";
     for (Entry<TreeSet<String>,Long> entry : usage.entrySet()) {
@@ -299,9 +299,9 @@ public class TableDiskUsage {
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     opts.parseArgs(TableDiskUsage.class.getName(), args);
-    AccumuloClient conn = opts.getClient();
+    AccumuloClient client = opts.getClient();
     VolumeManager fs = opts.getServerContext().getVolumeManager();
-    org.apache.accumulo.server.util.TableDiskUsage.printDiskUsage(opts.tables, fs, conn, false);
+    org.apache.accumulo.server.util.TableDiskUsage.printDiskUsage(opts.tables, fs, client, false);
   }
 
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java b/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
index 8fbe610..8d69883 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
@@ -73,9 +73,9 @@ public class VerifyTabletAssignments {
     Opts opts = new Opts();
     opts.parseArgs(VerifyTabletAssignments.class.getName(), args);
 
-    AccumuloClient conn = opts.getClient();
-    ClientContext context = new ClientContext(conn.info());
-    for (String table : conn.tableOperations().list())
+    AccumuloClient client = opts.getClient();
+    ClientContext context = new ClientContext(client.info());
+    for (String table : client.tableOperations().list())
       checkTable(context, opts, table, null);
 
   }
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java
index 7fb39d5..3693362 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java
@@ -134,7 +134,7 @@ public class ReplicationTableUtilTest {
 
   @Test
   public void setsCombinerOnMetadataCorrectly() throws Exception {
-    AccumuloClient conn = createMock(AccumuloClient.class);
+    AccumuloClient client = createMock(AccumuloClient.class);
     TableOperations tops = createMock(TableOperations.class);
 
     String myMetadataTable = "mymetadata";
@@ -143,7 +143,7 @@ public class ReplicationTableUtilTest {
     IteratorSetting combiner = new IteratorSetting(9, "replcombiner", StatusCombiner.class);
     Combiner.setColumns(combiner, Collections.singletonList(new Column(ReplicationSection.COLF)));
 
-    expect(conn.tableOperations()).andReturn(tops);
+    expect(client.tableOperations()).andReturn(tops);
     expect(tops.listIterators(myMetadataTable)).andReturn(iterators);
     tops.attachIterator(myMetadataTable, combiner);
     expectLastCall().once();
@@ -153,10 +153,10 @@ public class ReplicationTableUtilTest {
         ReplicationTableUtil.STATUS_FORMATTER_CLASS_NAME);
     expectLastCall().once();
 
-    replay(conn, tops);
+    replay(client, tops);
 
-    ReplicationTableUtil.configureMetadataTable(conn, myMetadataTable);
+    ReplicationTableUtil.configureMetadataTable(client, myMetadataTable);
 
-    verify(conn, tops);
+    verify(client, tops);
   }
 }
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
index 650f24c..0458c28 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
@@ -350,11 +350,11 @@ public class GarbageCollectWriteAheadLogs {
 
   protected int removeReplicationEntries(Map<UUID,TServerInstance> candidates)
       throws IOException, KeeperException, InterruptedException {
-    AccumuloClient conn;
+    AccumuloClient client;
     try {
-      conn = context.getClient();
+      client = context.getClient();
       try {
-        final Scanner s = ReplicationTable.getScanner(conn);
+        final Scanner s = ReplicationTable.getScanner(client);
         StatusSection.limit(s);
         for (Entry<Key,Value> entry : s) {
           UUID id = path2uuid(new Path(entry.getKey().getRow().toString()));
@@ -365,7 +365,7 @@ public class GarbageCollectWriteAheadLogs {
         return candidates.size();
       }
 
-      final Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+      final Scanner scanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
       scanner.fetchColumnFamily(MetadataSchema.ReplicationSection.COLF);
       scanner.setRange(MetadataSchema.ReplicationSection.getRange());
       for (Entry<Key,Value> entry : scanner) {
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
index 52b7b13..bb99852 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
@@ -482,9 +482,9 @@ public class SimpleGarbageCollector implements Iface {
     @Override
     public Iterator<Entry<String,Status>> getReplicationNeededIterator()
         throws AccumuloException, AccumuloSecurityException {
-      AccumuloClient conn = getClient();
+      AccumuloClient client = getClient();
       try {
-        Scanner s = ReplicationTable.getScanner(conn);
+        Scanner s = ReplicationTable.getScanner(client);
         StatusSection.limit(s);
         return Iterators.transform(s.iterator(), input -> {
           String file = input.getKey().getRow().toString();
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
index 86c78a9..8aa4f9f 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
@@ -86,15 +86,15 @@ public class CloseWriteAheadLogReferences implements Runnable {
     // what the version they bundle uses.
     Stopwatch sw = Stopwatch.createUnstarted();
 
-    AccumuloClient conn;
+    AccumuloClient client;
     try {
-      conn = context.getClient();
+      client = context.getClient();
     } catch (Exception e) {
-      log.error("Could not create connector", e);
+      log.error("Could not create client", e);
       throw new RuntimeException(e);
     }
 
-    if (!ReplicationTable.isOnline(conn)) {
+    if (!ReplicationTable.isOnline(client)) {
       log.debug("Replication table isn't online, not attempting to clean up wals");
       return;
     }
@@ -116,7 +116,7 @@ public class CloseWriteAheadLogReferences implements Runnable {
     long recordsClosed = 0;
     try {
       sw.start();
-      recordsClosed = updateReplicationEntries(conn, closed);
+      recordsClosed = updateReplicationEntries(client, closed);
     } finally {
       sw.stop();
       updateReplicationSpan.stop();
@@ -152,18 +152,18 @@ public class CloseWriteAheadLogReferences implements Runnable {
    * Given the set of WALs which have references in the metadata table, close any status messages
    * with reference that WAL.
    *
-   * @param conn
-   *          Connector
+   * @param client
+   *          Accumulo client
    * @param closedWals
    *          {@link Set} of paths to WALs that marked as closed or unreferenced in zookeeper
    */
-  protected long updateReplicationEntries(AccumuloClient conn, Set<String> closedWals) {
+  protected long updateReplicationEntries(AccumuloClient client, Set<String> closedWals) {
     BatchScanner bs = null;
     BatchWriter bw = null;
     long recordsClosed = 0;
     try {
-      bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-      bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
+      bw = client.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+      bs = client.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
       bs.setRanges(Collections.singleton(Range.prefix(ReplicationSection.getRowPrefix())));
       bs.fetchColumnFamily(ReplicationSection.COLF);
 
diff --git a/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java b/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java
index 39b3ccf..be45084 100644
--- a/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java
+++ b/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java
@@ -152,7 +152,7 @@ public class GarbageCollectWriteAheadLogsTest {
     VolumeManager fs = EasyMock.createMock(VolumeManager.class);
     WalStateManager marker = EasyMock.createMock(WalStateManager.class);
     LiveTServerSet tserverSet = EasyMock.createMock(LiveTServerSet.class);
-    AccumuloClient conn = EasyMock.createMock(AccumuloClient.class);
+    AccumuloClient client = EasyMock.createMock(AccumuloClient.class);
     Scanner mscanner = EasyMock.createMock(Scanner.class);
     Scanner rscanner = EasyMock.createMock(Scanner.class);
 
@@ -160,15 +160,15 @@ public class GarbageCollectWriteAheadLogsTest {
     EasyMock.expect(tserverSet.getCurrentServers()).andReturn(Collections.singleton(server1));
     EasyMock.expect(marker.getAllMarkers()).andReturn(markers2).once();
     EasyMock.expect(marker.state(server2, id)).andReturn(new Pair<>(WalState.OPEN, path));
-    EasyMock.expect(context.getClient()).andReturn(conn);
+    EasyMock.expect(context.getClient()).andReturn(client);
 
-    EasyMock.expect(conn.createScanner(ReplicationTable.NAME, Authorizations.EMPTY))
+    EasyMock.expect(client.createScanner(ReplicationTable.NAME, Authorizations.EMPTY))
         .andReturn(rscanner);
     rscanner.fetchColumnFamily(ReplicationSchema.StatusSection.NAME);
     EasyMock.expectLastCall().once();
     EasyMock.expect(rscanner.iterator()).andReturn(emptyKV);
 
-    EasyMock.expect(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY))
+    EasyMock.expect(client.createScanner(MetadataTable.NAME, Authorizations.EMPTY))
         .andReturn(mscanner);
     mscanner.fetchColumnFamily(MetadataSchema.ReplicationSection.COLF);
     EasyMock.expectLastCall().once();
@@ -180,7 +180,7 @@ public class GarbageCollectWriteAheadLogsTest {
     EasyMock.expectLastCall().once();
     marker.forget(server2);
     EasyMock.expectLastCall().once();
-    EasyMock.replay(context, fs, marker, tserverSet, conn, rscanner, mscanner);
+    EasyMock.replay(context, fs, marker, tserverSet, client, rscanner, mscanner);
     GarbageCollectWriteAheadLogs gc = new GarbageCollectWriteAheadLogs(context, fs, false,
         tserverSet, marker, tabletOnServer1List) {
       @Override
@@ -189,7 +189,7 @@ public class GarbageCollectWriteAheadLogsTest {
       }
     };
     gc.collect(status);
-    EasyMock.verify(context, fs, marker, tserverSet, conn, rscanner, mscanner);
+    EasyMock.verify(context, fs, marker, tserverSet, client, rscanner, mscanner);
   }
 
   @Test
@@ -198,7 +198,7 @@ public class GarbageCollectWriteAheadLogsTest {
     VolumeManager fs = EasyMock.createMock(VolumeManager.class);
     WalStateManager marker = EasyMock.createMock(WalStateManager.class);
     LiveTServerSet tserverSet = EasyMock.createMock(LiveTServerSet.class);
-    AccumuloClient conn = EasyMock.createMock(AccumuloClient.class);
+    AccumuloClient client = EasyMock.createMock(AccumuloClient.class);
     Scanner mscanner = EasyMock.createMock(Scanner.class);
     Scanner rscanner = EasyMock.createMock(Scanner.class);
 
@@ -206,22 +206,22 @@ public class GarbageCollectWriteAheadLogsTest {
     EasyMock.expect(tserverSet.getCurrentServers()).andReturn(Collections.singleton(server1));
     EasyMock.expect(marker.getAllMarkers()).andReturn(markers2).once();
     EasyMock.expect(marker.state(server2, id)).andReturn(new Pair<>(WalState.OPEN, path));
-    EasyMock.expect(context.getClient()).andReturn(conn);
+    EasyMock.expect(context.getClient()).andReturn(client);
 
-    EasyMock.expect(conn.createScanner(ReplicationTable.NAME, Authorizations.EMPTY))
+    EasyMock.expect(client.createScanner(ReplicationTable.NAME, Authorizations.EMPTY))
         .andReturn(rscanner);
     rscanner.fetchColumnFamily(ReplicationSchema.StatusSection.NAME);
     EasyMock.expectLastCall().once();
     EasyMock.expect(rscanner.iterator()).andReturn(emptyKV);
 
-    EasyMock.expect(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY))
+    EasyMock.expect(client.createScanner(MetadataTable.NAME, Authorizations.EMPTY))
         .andReturn(mscanner);
     mscanner.fetchColumnFamily(MetadataSchema.ReplicationSection.COLF);
     EasyMock.expectLastCall().once();
     mscanner.setRange(MetadataSchema.ReplicationSection.getRange());
     EasyMock.expectLastCall().once();
     EasyMock.expect(mscanner.iterator()).andReturn(emptyKV);
-    EasyMock.replay(context, fs, marker, tserverSet, conn, rscanner, mscanner);
+    EasyMock.replay(context, fs, marker, tserverSet, client, rscanner, mscanner);
     GarbageCollectWriteAheadLogs gc = new GarbageCollectWriteAheadLogs(context, fs, false,
         tserverSet, marker, tabletOnServer2List) {
       @Override
@@ -230,7 +230,7 @@ public class GarbageCollectWriteAheadLogsTest {
       }
     };
     gc.collect(status);
-    EasyMock.verify(context, fs, marker, tserverSet, conn, rscanner, mscanner);
+    EasyMock.verify(context, fs, marker, tserverSet, client, rscanner, mscanner);
   }
 
   @Test
@@ -239,7 +239,7 @@ public class GarbageCollectWriteAheadLogsTest {
     VolumeManager fs = EasyMock.createMock(VolumeManager.class);
     WalStateManager marker = EasyMock.createMock(WalStateManager.class);
     LiveTServerSet tserverSet = EasyMock.createMock(LiveTServerSet.class);
-    AccumuloClient conn = EasyMock.createMock(AccumuloClient.class);
+    AccumuloClient client = EasyMock.createMock(AccumuloClient.class);
     Scanner mscanner = EasyMock.createMock(Scanner.class);
     Scanner rscanner = EasyMock.createMock(Scanner.class);
     String row = MetadataSchema.ReplicationSection.getRowPrefix() + path;
@@ -253,22 +253,22 @@ public class GarbageCollectWriteAheadLogsTest {
     EasyMock.expect(tserverSet.getCurrentServers()).andReturn(Collections.singleton(server1));
     EasyMock.expect(marker.getAllMarkers()).andReturn(markers).once();
     EasyMock.expect(marker.state(server1, id)).andReturn(new Pair<>(WalState.UNREFERENCED, path));
-    EasyMock.expect(context.getClient()).andReturn(conn);
+    EasyMock.expect(context.getClient()).andReturn(client);
 
-    EasyMock.expect(conn.createScanner(ReplicationTable.NAME, Authorizations.EMPTY))
+    EasyMock.expect(client.createScanner(ReplicationTable.NAME, Authorizations.EMPTY))
         .andReturn(rscanner);
     rscanner.fetchColumnFamily(ReplicationSchema.StatusSection.NAME);
     EasyMock.expectLastCall().once();
     EasyMock.expect(rscanner.iterator()).andReturn(emptyKV);
 
-    EasyMock.expect(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY))
+    EasyMock.expect(client.createScanner(MetadataTable.NAME, Authorizations.EMPTY))
         .andReturn(mscanner);
     mscanner.fetchColumnFamily(MetadataSchema.ReplicationSection.COLF);
     EasyMock.expectLastCall().once();
     mscanner.setRange(MetadataSchema.ReplicationSection.getRange());
     EasyMock.expectLastCall().once();
     EasyMock.expect(mscanner.iterator()).andReturn(replicationWork.entrySet().iterator());
-    EasyMock.replay(context, fs, marker, tserverSet, conn, rscanner, mscanner);
+    EasyMock.replay(context, fs, marker, tserverSet, client, rscanner, mscanner);
     GarbageCollectWriteAheadLogs gc = new GarbageCollectWriteAheadLogs(context, fs, false,
         tserverSet, marker, tabletOnServer1List) {
       @Override
@@ -277,6 +277,6 @@ public class GarbageCollectWriteAheadLogsTest {
       }
     };
     gc.collect(status);
-    EasyMock.verify(context, fs, marker, tserverSet, conn, rscanner, mscanner);
+    EasyMock.verify(context, fs, marker, tserverSet, client, rscanner, mscanner);
   }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java b/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
index 4c61fee..ba5eb0f 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
@@ -182,14 +182,14 @@ public class MasterClientServiceHandler extends FateServiceHandler
       serversToFlush.clear();
 
       try {
-        AccumuloClient conn = master.getClient();
+        AccumuloClient client = master.getClient();
         Scanner scanner;
         if (tableId.equals(MetadataTable.ID)) {
-          scanner = new IsolatedScanner(conn.createScanner(RootTable.NAME, Authorizations.EMPTY));
+          scanner = new IsolatedScanner(client.createScanner(RootTable.NAME, Authorizations.EMPTY));
           scanner.setRange(MetadataSchema.TabletsSection.getRange());
         } else {
           scanner = new IsolatedScanner(
-              conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
+              client.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
           Range range = new KeyExtent(tableId, null, ByteBufferUtil.toText(startRow))
               .toMetadataRange();
           scanner.setRange(range.clip(MetadataSchema.TabletsSection.getRange()));
@@ -552,11 +552,11 @@ public class MasterClientServiceHandler extends FateServiceHandler
   @Override
   public boolean drainReplicationTable(TInfo tfino, TCredentials credentials, String tableName,
       Set<String> logsToWatch) throws TException {
-    AccumuloClient conn;
+    AccumuloClient client;
     try {
-      conn = master.getClient();
+      client = master.getClient();
     } catch (AccumuloException | AccumuloSecurityException e) {
-      throw new RuntimeException("Failed to obtain connector", e);
+      throw new RuntimeException("Failed to obtain client", e);
     }
 
     final Text tableId = new Text(getTableId(master.getContext(), tableName).getUtf8());
@@ -567,7 +567,7 @@ public class MasterClientServiceHandler extends FateServiceHandler
     final Set<Range> range = Collections.singleton(new Range(ReplicationSection.getRange()));
     BatchScanner bs;
     try {
-      bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
+      bs = client.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
     } catch (TableNotFoundException e) {
       throw new RuntimeException("Could not read metadata table", e);
     }
@@ -584,7 +584,7 @@ public class MasterClientServiceHandler extends FateServiceHandler
 
     drainLog.trace("reading from replication table");
     try {
-      bs = conn.createBatchScanner(ReplicationTable.NAME, Authorizations.EMPTY, 4);
+      bs = client.createBatchScanner(ReplicationTable.NAME, Authorizations.EMPTY, 4);
     } catch (TableNotFoundException e) {
       throw new RuntimeException("Replication table was not found", e);
     }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
index ad4c8f6..4bf6e12 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
@@ -342,9 +342,9 @@ abstract class TabletGroupWatcher extends Daemon {
                 }
                 break;
               case HOSTED:
-                TServerConnection conn = this.master.tserverSet.getConnection(server);
-                if (conn != null) {
-                  conn.unloadTablet(this.master.masterLock, tls.extent, goal.howUnload(),
+                TServerConnection client = this.master.tserverSet.getConnection(server);
+                if (client != null) {
+                  client.unloadTablet(this.master.masterLock, tls.extent, goal.howUnload(),
                       master.getSteadyTime());
                   unloaded++;
                   totalUnloaded++;
@@ -598,7 +598,7 @@ abstract class TabletGroupWatcher extends Daemon {
       Master.log.debug("Found following tablet {}", followingTablet);
     }
     try {
-      AccumuloClient conn = this.master.getClient();
+      AccumuloClient client = this.master.getClient();
       Text start = extent.getPrevEndRow();
       if (start == null) {
         start = new Text();
@@ -606,7 +606,7 @@ abstract class TabletGroupWatcher extends Daemon {
       Master.log.debug("Making file deletion entries for {}", extent);
       Range deleteRange = new Range(KeyExtent.getMetadataEntry(extent.getTableId(), start), false,
           KeyExtent.getMetadataEntry(extent.getTableId(), extent.getEndRow()), true);
-      Scanner scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
+      Scanner scanner = client.createScanner(targetSystemTable, Authorizations.EMPTY);
       scanner.setRange(deleteRange);
       TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
       TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(scanner);
@@ -645,16 +645,16 @@ abstract class TabletGroupWatcher extends Daemon {
         }
       }
       MetadataTableUtil.addDeleteEntries(extent, datafiles, master.getContext());
-      BatchWriter bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
+      BatchWriter bw = client.createBatchWriter(targetSystemTable, new BatchWriterConfig());
       try {
-        deleteTablets(info, deleteRange, bw, conn);
+        deleteTablets(info, deleteRange, bw, client);
       } finally {
         bw.close();
       }
 
       if (followingTablet != null) {
         Master.log.debug("Updating prevRow of {} to {}", followingTablet, extent.getPrevEndRow());
-        bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
+        bw = client.createBatchWriter(targetSystemTable, new BatchWriterConfig());
         try {
           Mutation m = new Mutation(followingTablet.getMetadataEntry());
           TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m,
@@ -704,10 +704,10 @@ abstract class TabletGroupWatcher extends Daemon {
     BatchWriter bw = null;
     try {
       long fileCount = 0;
-      AccumuloClient conn = this.master.getClient();
+      AccumuloClient client = this.master.getClient();
       // Make file entries in highest tablet
-      bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
-      Scanner scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
+      bw = client.createBatchWriter(targetSystemTable, new BatchWriterConfig());
+      Scanner scanner = client.createScanner(targetSystemTable, Authorizations.EMPTY);
       scanner.setRange(scanRange);
       TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
       TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(scanner);
@@ -735,7 +735,7 @@ abstract class TabletGroupWatcher extends Daemon {
 
       // read the logical time from the last tablet in the merge range, it is not included in
       // the loop above
-      scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
+      scanner = client.createScanner(targetSystemTable, Authorizations.EMPTY);
       scanner.setRange(new Range(stopRow));
       TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(scanner);
       for (Entry<Key,Value> entry : scanner) {
@@ -766,7 +766,7 @@ abstract class TabletGroupWatcher extends Daemon {
       bw.addMutation(updatePrevRow);
       bw.flush();
 
-      deleteTablets(info, scanRange, bw, conn);
+      deleteTablets(info, scanRange, bw, client);
 
       // Clean-up the last chopped marker
       m = new Mutation(stopRow);
@@ -786,7 +786,7 @@ abstract class TabletGroupWatcher extends Daemon {
     }
   }
 
-  private void deleteTablets(MergeInfo info, Range scanRange, BatchWriter bw, AccumuloClient conn)
+  private void deleteTablets(MergeInfo info, Range scanRange, BatchWriter bw, AccumuloClient client)
       throws TableNotFoundException, MutationsRejectedException {
     Scanner scanner;
     Mutation m;
@@ -794,7 +794,7 @@ abstract class TabletGroupWatcher extends Daemon {
     // group all deletes into tablet into one mutation, this makes tablets
     // either disappear entirely or not all.. this is important for the case
     // where the process terminates in the loop below...
-    scanner = conn.createScanner(info.getExtent().isMeta() ? RootTable.NAME : MetadataTable.NAME,
+    scanner = client.createScanner(info.getExtent().isMeta() ? RootTable.NAME : MetadataTable.NAME,
         Authorizations.EMPTY);
     Master.log.debug("Deleting range {}", scanRange);
     scanner.setRange(scanRange);
@@ -820,8 +820,8 @@ abstract class TabletGroupWatcher extends Daemon {
 
   private KeyExtent getHighTablet(KeyExtent range) throws AccumuloException {
     try {
-      AccumuloClient conn = this.master.getClient();
-      Scanner scanner = conn.createScanner(range.isMeta() ? RootTable.NAME : MetadataTable.NAME,
+      AccumuloClient client = this.master.getClient();
+      Scanner scanner = client.createScanner(range.isMeta() ? RootTable.NAME : MetadataTable.NAME,
           Authorizations.EMPTY);
       TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
       KeyExtent start = new KeyExtent(range.getTableId(), range.getEndRow(), null);
@@ -925,9 +925,9 @@ abstract class TabletGroupWatcher extends Daemon {
     }
     assignments.addAll(assigned);
     for (Assignment a : assignments) {
-      TServerConnection conn = this.master.tserverSet.getConnection(a.server);
-      if (conn != null) {
-        conn.assignTablet(this.master.masterLock, a.tablet);
+      TServerConnection client = this.master.tserverSet.getConnection(a.server);
+      if (client != null) {
+        client.assignTablet(this.master.masterLock, a.tablet);
       } else {
         Master.log.warn("Could not connect to server {}", a.server);
       }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssigner.java b/server/master/src/main/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssigner.java
index fbba53e..ed8c7eb 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssigner.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssigner.java
@@ -61,7 +61,7 @@ public abstract class DistributedWorkQueueWorkAssigner implements WorkAssigner {
     return StatusUtil.isWorkRequired(status);
   }
 
-  protected AccumuloClient conn;
+  protected AccumuloClient client;
   protected AccumuloConfiguration conf;
   protected DistributedWorkQueue workQueue;
   protected int maxQueueSize;
@@ -71,11 +71,11 @@ public abstract class DistributedWorkQueueWorkAssigner implements WorkAssigner {
    * Getters/setters for testing purposes
    */
   protected AccumuloClient getClient() {
-    return conn;
+    return client;
   }
 
-  protected void setConnector(AccumuloClient conn) {
-    this.conn = conn;
+  protected void setClient(AccumuloClient client) {
+    this.client = client;
   }
 
   protected AccumuloConfiguration getConf() {
@@ -115,13 +115,13 @@ public abstract class DistributedWorkQueueWorkAssigner implements WorkAssigner {
    */
   protected void initializeWorkQueue(AccumuloConfiguration conf) {
     workQueue = new DistributedWorkQueue(
-        ZooUtil.getRoot(conn.getInstanceID()) + ReplicationConstants.ZOO_WORK_QUEUE, conf);
+        ZooUtil.getRoot(client.getInstanceID()) + ReplicationConstants.ZOO_WORK_QUEUE, conf);
   }
 
   @Override
-  public void configure(AccumuloConfiguration conf, AccumuloClient conn) {
+  public void configure(AccumuloConfiguration conf, AccumuloClient client) {
     this.conf = conf;
-    this.conn = conn;
+    this.client = client;
   }
 
   @Override
@@ -154,7 +154,7 @@ public abstract class DistributedWorkQueueWorkAssigner implements WorkAssigner {
     // Create a scanner over the replication table's order entries
     Scanner s;
     try {
-      s = ReplicationTable.getScanner(conn);
+      s = ReplicationTable.getScanner(client);
     } catch (ReplicationTableOfflineException e) {
       // no work to do; replication is off
       return;
@@ -181,7 +181,7 @@ public abstract class DistributedWorkQueueWorkAssigner implements WorkAssigner {
 
       Scanner workScanner;
       try {
-        workScanner = ReplicationTable.getScanner(conn);
+        workScanner = ReplicationTable.getScanner(client);
       } catch (ReplicationTableOfflineException e) {
         log.warn("Replication table is offline. Will retry...");
         sleepUninterruptibly(5, TimeUnit.SECONDS);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/FinishedWorkUpdater.java b/server/master/src/main/java/org/apache/accumulo/master/replication/FinishedWorkUpdater.java
index 4aaa66c..bbfcc79 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/FinishedWorkUpdater.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/FinishedWorkUpdater.java
@@ -55,17 +55,17 @@ import com.google.protobuf.InvalidProtocolBufferException;
 public class FinishedWorkUpdater implements Runnable {
   private static final Logger log = LoggerFactory.getLogger(FinishedWorkUpdater.class);
 
-  private final AccumuloClient conn;
+  private final AccumuloClient client;
 
-  public FinishedWorkUpdater(AccumuloClient conn) {
-    this.conn = conn;
+  public FinishedWorkUpdater(AccumuloClient client) {
+    this.client = client;
   }
 
   @Override
   public void run() {
     log.debug("Looking for finished replication work");
 
-    if (!ReplicationTable.isOnline(conn)) {
+    if (!ReplicationTable.isOnline(client)) {
       log.debug("Replication table is not yet online, will retry");
       return;
     }
@@ -73,8 +73,8 @@ public class FinishedWorkUpdater implements Runnable {
     BatchScanner bs;
     BatchWriter replBw;
     try {
-      bs = ReplicationTable.getBatchScanner(conn, 4);
-      replBw = ReplicationTable.getBatchWriter(conn);
+      bs = ReplicationTable.getBatchScanner(client, 4);
+      replBw = ReplicationTable.getBatchWriter(client);
     } catch (ReplicationTableOfflineException e) {
       log.debug("Table is no longer online, will retry");
       return;
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/RemoveCompleteReplicationRecords.java b/server/master/src/main/java/org/apache/accumulo/master/replication/RemoveCompleteReplicationRecords.java
index efd615f..f53473c 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/RemoveCompleteReplicationRecords.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/RemoveCompleteReplicationRecords.java
@@ -57,10 +57,10 @@ import com.google.protobuf.InvalidProtocolBufferException;
 public class RemoveCompleteReplicationRecords implements Runnable {
   private static final Logger log = LoggerFactory.getLogger(RemoveCompleteReplicationRecords.class);
 
-  private AccumuloClient conn;
+  private AccumuloClient client;
 
-  public RemoveCompleteReplicationRecords(AccumuloClient conn) {
-    this.conn = conn;
+  public RemoveCompleteReplicationRecords(AccumuloClient client) {
+    this.client = client;
   }
 
   @Override
@@ -68,8 +68,8 @@ public class RemoveCompleteReplicationRecords implements Runnable {
     BatchScanner bs;
     BatchWriter bw;
     try {
-      bs = ReplicationTable.getBatchScanner(conn, 4);
-      bw = ReplicationTable.getBatchWriter(conn);
+      bs = ReplicationTable.getBatchScanner(client, 4);
+      bw = ReplicationTable.getBatchWriter(client);
 
       if (bs == null || bw == null)
         throw new AssertionError("Inconceivable; an exception should have been"
@@ -90,7 +90,7 @@ public class RemoveCompleteReplicationRecords implements Runnable {
     long recordsRemoved = 0;
     try {
       sw.start();
-      recordsRemoved = removeCompleteRecords(conn, bs, bw);
+      recordsRemoved = removeCompleteRecords(client, bs, bw);
     } finally {
       if (null != bs) {
         bs.close();
@@ -115,15 +115,15 @@ public class RemoveCompleteReplicationRecords implements Runnable {
    * given {@code bw}, when that {@link Status} is fully replicated and closed, as defined by
    * {@link StatusUtil#isSafeForRemoval(org.apache.accumulo.server.replication.proto.Replication.Status)}.
    *
-   * @param conn
-   *          A Connector
+   * @param client
+   *          Accumulo client
    * @param bs
    *          A BatchScanner to read replication status records from
    * @param bw
    *          A BatchWriter to write deletes to
    * @return Number of records removed
    */
-  protected long removeCompleteRecords(AccumuloClient conn, BatchScanner bs, BatchWriter bw) {
+  protected long removeCompleteRecords(AccumuloClient client, BatchScanner bs, BatchWriter bw) {
     Text row = new Text(), colf = new Text(), colq = new Text();
     long recordsRemoved = 0;
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java b/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java
index e080708..5215371 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java
@@ -42,7 +42,7 @@ public class ReplicationDriver extends Daemon {
   private StatusMaker statusMaker;
   private FinishedWorkUpdater finishedWorkUpdater;
   private RemoveCompleteReplicationRecords rcrr;
-  private AccumuloClient conn;
+  private AccumuloClient client;
 
   public ReplicationDriver(Master master) {
     super("Replication Driver");
@@ -65,18 +65,18 @@ public class ReplicationDriver extends Daemon {
     while (master.stillMaster()) {
       if (null == workMaker) {
         try {
-          conn = master.getClient();
+          client = master.getClient();
         } catch (AccumuloException | AccumuloSecurityException e) {
-          // couldn't get a connector, try again in a "short" amount of time
-          log.warn("Error trying to get connector to process replication records", e);
+          // couldn't get a client, try again in a "short" amount of time
+          log.warn("Error trying to get client to process replication records", e);
           UtilWaitThread.sleep(2000);
           continue;
         }
 
-        statusMaker = new StatusMaker(conn, master.getFileSystem());
-        workMaker = new WorkMaker(master.getContext(), conn);
-        finishedWorkUpdater = new FinishedWorkUpdater(conn);
-        rcrr = new RemoveCompleteReplicationRecords(conn);
+        statusMaker = new StatusMaker(client, master.getFileSystem());
+        workMaker = new WorkMaker(master.getContext(), client);
+        finishedWorkUpdater = new FinishedWorkUpdater(client);
+        rcrr = new RemoveCompleteReplicationRecords(client);
       }
 
       Trace.on("masterReplicationDriver", sampler);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java b/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
index 7865779..26b35d9 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
@@ -60,8 +60,8 @@ public class SequentialWorkAssigner extends DistributedWorkQueueWorkAssigner {
 
   public SequentialWorkAssigner() {}
 
-  public SequentialWorkAssigner(AccumuloConfiguration conf, AccumuloClient conn) {
-    configure(conf, conn);
+  public SequentialWorkAssigner(AccumuloConfiguration conf, AccumuloClient client) {
+    configure(conf, client);
   }
 
   @Override
@@ -123,7 +123,7 @@ public class SequentialWorkAssigner extends DistributedWorkQueueWorkAssigner {
   protected void cleanupFinishedWork() {
     final Iterator<Entry<String,Map<Table.ID,String>>> queuedWork = queuedWorkByPeerName.entrySet()
         .iterator();
-    final String instanceId = conn.getInstanceID();
+    final String instanceId = client.getInstanceID();
 
     int elementsRemoved = 0;
     // Check the status of all the work we've queued up
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/StatusMaker.java b/server/master/src/main/java/org/apache/accumulo/master/replication/StatusMaker.java
index 3122995..059d689 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/StatusMaker.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/StatusMaker.java
@@ -58,14 +58,14 @@ import com.google.protobuf.InvalidProtocolBufferException;
 public class StatusMaker {
   private static final Logger log = LoggerFactory.getLogger(StatusMaker.class);
 
-  private final AccumuloClient conn;
+  private final AccumuloClient client;
   private final VolumeManager fs;
 
   private BatchWriter replicationWriter, metadataWriter;
   private String sourceTableName = MetadataTable.NAME;
 
-  public StatusMaker(AccumuloClient conn, VolumeManager fs) {
-    this.conn = conn;
+  public StatusMaker(AccumuloClient client, VolumeManager fs) {
+    this.client = client;
     this.fs = fs;
   }
 
@@ -87,7 +87,7 @@ public class StatusMaker {
       // Read from a source table (typically accumulo.metadata)
       final Scanner s;
       try {
-        s = conn.createScanner(sourceTableName, Authorizations.EMPTY);
+        s = client.createScanner(sourceTableName, Authorizations.EMPTY);
       } catch (TableNotFoundException e) {
         throw new RuntimeException(e);
       }
@@ -102,8 +102,8 @@ public class StatusMaker {
         if (null == replicationWriter) {
           // Ensures table is online
           try {
-            ReplicationTable.setOnline(conn);
-            replicationWriter = ReplicationTable.getBatchWriter(conn);
+            ReplicationTable.setOnline(client);
+            replicationWriter = ReplicationTable.getBatchWriter(client);
           } catch (ReplicationTableOfflineException | AccumuloSecurityException
               | AccumuloException e) {
             log.warn("Replication table did not come online");
@@ -263,7 +263,7 @@ public class StatusMaker {
     log.debug("Deleting {} from metadata table as it's no longer needed", k.toStringNoTruncate());
     if (null == metadataWriter) {
       try {
-        metadataWriter = conn.createBatchWriter(sourceTableName, new BatchWriterConfig());
+        metadataWriter = client.createBatchWriter(sourceTableName, new BatchWriterConfig());
       } catch (TableNotFoundException e) {
         throw new RuntimeException("Metadata table doesn't exist");
       }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/UnorderedWorkAssigner.java b/server/master/src/main/java/org/apache/accumulo/master/replication/UnorderedWorkAssigner.java
index 56611a6..c44e008 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/UnorderedWorkAssigner.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/UnorderedWorkAssigner.java
@@ -54,8 +54,8 @@ public class UnorderedWorkAssigner extends DistributedWorkQueueWorkAssigner {
 
   public UnorderedWorkAssigner() {}
 
-  public UnorderedWorkAssigner(AccumuloConfiguration conf, AccumuloClient conn) {
-    configure(conf, conn);
+  public UnorderedWorkAssigner(AccumuloConfiguration conf, AccumuloClient client) {
+    configure(conf, client);
   }
 
   @Override
@@ -137,7 +137,7 @@ public class UnorderedWorkAssigner extends DistributedWorkQueueWorkAssigner {
   @Override
   protected void cleanupFinishedWork() {
     final Iterator<String> work = queuedWork.iterator();
-    final String instanceId = conn.getInstanceID();
+    final String instanceId = client.getInstanceID();
     while (work.hasNext()) {
       String filename = work.next();
       // Null equates to the work was finished
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkDriver.java b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkDriver.java
index 79c36f6..e9e592f 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkDriver.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkDriver.java
@@ -38,7 +38,7 @@ public class WorkDriver extends Daemon {
   private static final Logger log = LoggerFactory.getLogger(WorkDriver.class);
 
   private Master master;
-  private AccumuloClient conn;
+  private AccumuloClient client;
   private AccumuloConfiguration conf;
 
   private WorkAssigner assigner;
@@ -47,7 +47,7 @@ public class WorkDriver extends Daemon {
   public WorkDriver(Master master) throws AccumuloException, AccumuloSecurityException {
     super();
     this.master = master;
-    this.conn = master.getClient();
+    this.client = master.getClient();
     this.conf = master.getConfiguration();
     configureWorkAssigner();
   }
@@ -67,7 +67,7 @@ public class WorkDriver extends Daemon {
         throw new RuntimeException(e);
       }
 
-      this.assigner.configure(conf, conn);
+      this.assigner.configure(conf, client);
       this.assignerImplName = assigner.getClass().getName();
       this.setName(assigner.getName());
     }
@@ -77,11 +77,11 @@ public class WorkDriver extends Daemon {
    * Getters/setters for testing purposes
    */
   protected AccumuloClient getClient() {
-    return conn;
+    return client;
   }
 
-  protected void setConnector(AccumuloClient conn) {
-    this.conn = conn;
+  protected void setClient(AccumuloClient client) {
+    this.client = client;
   }
 
   protected AccumuloConfiguration getConf() {
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
index b2a521f..2c3e66f 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
@@ -57,17 +57,17 @@ public class WorkMaker {
   private static final Logger log = LoggerFactory.getLogger(WorkMaker.class);
 
   private final ServerContext context;
-  private AccumuloClient conn;
+  private AccumuloClient client;
 
   private BatchWriter writer;
 
-  public WorkMaker(ServerContext context, AccumuloClient conn) {
+  public WorkMaker(ServerContext context, AccumuloClient client) {
     this.context = context;
-    this.conn = conn;
+    this.client = client;
   }
 
   public void run() {
-    if (!ReplicationTable.isOnline(conn)) {
+    if (!ReplicationTable.isOnline(client)) {
       log.debug("Replication table is not yet online");
       return;
     }
@@ -76,9 +76,9 @@ public class WorkMaker {
     try {
       final Scanner s;
       try {
-        s = ReplicationTable.getScanner(conn);
+        s = ReplicationTable.getScanner(client);
         if (null == writer) {
-          setBatchWriter(ReplicationTable.getBatchWriter(conn));
+          setBatchWriter(ReplicationTable.getBatchWriter(client));
         }
       } catch (ReplicationTableOfflineException e) {
         log.warn("Replication table was online, but not anymore");
diff --git a/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java b/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java
index ae79a93..d659c41 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java
@@ -261,12 +261,12 @@ public class MergeStats {
     ClientOpts opts = new ClientOpts();
     opts.parseArgs(MergeStats.class.getName(), args);
 
-    AccumuloClient conn = opts.getClient();
-    Map<String,String> tableIdMap = conn.tableOperations().tableIdMap();
+    AccumuloClient client = opts.getClient();
+    Map<String,String> tableIdMap = client.tableOperations().tableIdMap();
     ZooReaderWriter zooReaderWriter = opts.getServerContext().getZooReaderWriter();
     for (Entry<String,String> entry : tableIdMap.entrySet()) {
       final String table = entry.getKey(), tableId = entry.getValue();
-      String path = ZooUtil.getRoot(conn.getInstanceID()) + Constants.ZTABLES + "/" + tableId
+      String path = ZooUtil.getRoot(client.getInstanceID()) + Constants.ZTABLES + "/" + tableId
           + "/merge";
       MergeInfo info = new MergeInfo();
       if (zooReaderWriter.exists(path)) {
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
index 91c0348..ccf895f 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
@@ -121,8 +121,9 @@ class CleanUp extends MasterRepo {
 
     try {
       // look for other tables that references this table's files
-      AccumuloClient conn = master.getClient();
-      try (BatchScanner bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8)) {
+      AccumuloClient client = master.getClient();
+      try (BatchScanner bs = client.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY,
+          8)) {
         Range allTables = MetadataSchema.TabletsSection.getRange();
         Range tableRange = MetadataSchema.TabletsSection.getRange(tableId);
         Range beforeTable = new Range(allTables.getStartKey(), true, tableRange.getStartKey(),
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
index 62679d5..81a91c8 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
@@ -85,15 +85,15 @@ class CompactionDriver extends MasterRepo {
     }
 
     MapCounter<TServerInstance> serversToFlush = new MapCounter<>();
-    AccumuloClient conn = master.getClient();
+    AccumuloClient client = master.getClient();
 
     Scanner scanner;
 
     if (tableId.equals(MetadataTable.ID)) {
-      scanner = new IsolatedScanner(conn.createScanner(RootTable.NAME, Authorizations.EMPTY));
+      scanner = new IsolatedScanner(client.createScanner(RootTable.NAME, Authorizations.EMPTY));
       scanner.setRange(MetadataSchema.TabletsSection.getRange());
     } else {
-      scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
+      scanner = new IsolatedScanner(client.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
       Range range = new KeyExtent(tableId, null, startRow == null ? null : new Text(startRow))
           .toMetadataRange();
       scanner.setRange(range);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
index aef42cf..c618fca 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
@@ -90,11 +90,11 @@ class WriteExportFiles extends MasterRepo {
     if (reserved > 0)
       return reserved;
 
-    AccumuloClient conn = master.getClient();
+    AccumuloClient client = master.getClient();
 
     checkOffline(master.getContext());
 
-    Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    Scanner metaScanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     metaScanner.setRange(new KeyExtent(tableInfo.tableID, null, null).toMetadataRange());
 
     // scan for locations
@@ -254,11 +254,11 @@ class WriteExportFiles extends MasterRepo {
   private static void exportConfig(ServerContext context, Table.ID tableID, ZipOutputStream zipOut,
       DataOutputStream dataOut)
       throws AccumuloException, AccumuloSecurityException, TableNotFoundException, IOException {
-    AccumuloClient conn = context.getClient();
+    AccumuloClient client = context.getClient();
 
     DefaultConfiguration defaultConfig = DefaultConfiguration.getInstance();
-    Map<String,String> siteConfig = conn.instanceOperations().getSiteConfiguration();
-    Map<String,String> systemConfig = conn.instanceOperations().getSystemConfiguration();
+    Map<String,String> siteConfig = client.instanceOperations().getSiteConfiguration();
+    Map<String,String> systemConfig = client.instanceOperations().getSystemConfiguration();
 
     TableConfiguration tableConfig = context.getServerConfFactory().getTableConfiguration(tableID);
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CleanUpBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CleanUpBulkImport.java
index 9c04556..7d6c4d3 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CleanUpBulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CleanUpBulkImport.java
@@ -57,8 +57,8 @@ public class CleanUpBulkImport extends MasterRepo {
         "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
     MetadataTableUtil.addDeleteEntry(master.getContext(), tableId, bulkDir.toString());
     log.debug("removing the metadata table markers for loaded files");
-    AccumuloClient conn = master.getClient();
-    MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid);
+    AccumuloClient client = master.getClient();
+    MetadataTableUtil.removeBulkLoadEntries(client, tableId, tid);
     log.debug("releasing HDFS reservations for " + source + " and " + error);
     Utils.unreserveHdfsDirectory(master, source, tid);
     Utils.unreserveHdfsDirectory(master, error, tid);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CopyFailed.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CopyFailed.java
index c2cee48..c39b33e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CopyFailed.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CopyFailed.java
@@ -114,9 +114,9 @@ class CopyFailed extends MasterRepo {
      */
 
     // determine which failed files were loaded
-    AccumuloClient conn = master.getClient();
+    AccumuloClient client = master.getClient();
     try (Scanner mscanner = new IsolatedScanner(
-        conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY))) {
+        client.createScanner(MetadataTable.NAME, Authorizations.EMPTY))) {
       mscanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
       mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CleanUpBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CleanUpBulkImport.java
index c063b80..3050575 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CleanUpBulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CleanUpBulkImport.java
@@ -52,8 +52,8 @@ public class CleanUpBulkImport extends MasterRepo {
     MetadataTableUtil.addDeleteEntry(master.getContext(), info.tableId, bulkDir.toString());
     if (info.tableState == TableState.ONLINE) {
       log.debug("removing the metadata table markers for loaded files");
-      AccumuloClient conn = master.getClient();
-      MetadataTableUtil.removeBulkLoadEntries(conn, info.tableId, tid);
+      AccumuloClient client = master.getClient();
+      MetadataTableUtil.removeBulkLoadEntries(client, info.tableId, tid);
     }
     Utils.unreserveHdfsDirectory(master, info.sourceDir, tid);
     Utils.getReadLock(master, info.tableId, tid).unlock();
diff --git a/server/master/src/test/java/org/apache/accumulo/master/replication/SequentialWorkAssignerTest.java b/server/master/src/test/java/org/apache/accumulo/master/replication/SequentialWorkAssignerTest.java
index 366bf61..b64b97a 100644
--- a/server/master/src/test/java/org/apache/accumulo/master/replication/SequentialWorkAssignerTest.java
+++ b/server/master/src/test/java/org/apache/accumulo/master/replication/SequentialWorkAssignerTest.java
@@ -39,14 +39,14 @@ import org.junit.Test;
 
 public class SequentialWorkAssignerTest {
 
-  private AccumuloClient conn;
+  private AccumuloClient client;
   private SequentialWorkAssigner assigner;
 
   @Before
   public void init() throws Exception {
     AccumuloConfiguration conf = createMock(AccumuloConfiguration.class);
-    conn = createMock(AccumuloClient.class);
-    assigner = new SequentialWorkAssigner(conf, conn);
+    client = createMock(AccumuloClient.class);
+    assigner = new SequentialWorkAssigner(conf, client);
   }
 
   @Test
@@ -66,12 +66,12 @@ public class SequentialWorkAssignerTest {
 
     queuedWork.put("cluster1", cluster1Work);
 
-    assigner.setConnector(conn);
+    assigner.setClient(client);
     assigner.setZooCache(zooCache);
     assigner.setWorkQueue(workQueue);
     assigner.setQueuedWork(queuedWork);
 
-    expect(conn.getInstanceID()).andReturn("instance");
+    expect(client.getInstanceID()).andReturn("instance");
 
     // file1 replicated
     expect(zooCache.get(ZooUtil.getRoot("instance") + ReplicationConstants.ZOO_WORK_QUEUE + "/"
@@ -85,11 +85,11 @@ public class SequentialWorkAssignerTest {
                     new ReplicationTarget("cluster1", "2", Table.ID.of("2")))))
                         .andReturn(new byte[0]);
 
-    replay(workQueue, zooCache, conn);
+    replay(workQueue, zooCache, client);
 
     assigner.cleanupFinishedWork();
 
-    verify(workQueue, zooCache, conn);
+    verify(workQueue, zooCache, client);
 
     assertEquals(1, cluster1Work.size());
     assertEquals(
diff --git a/server/master/src/test/java/org/apache/accumulo/master/replication/UnorderedWorkAssignerTest.java b/server/master/src/test/java/org/apache/accumulo/master/replication/UnorderedWorkAssignerTest.java
index 6249190..0c2f6e7 100644
--- a/server/master/src/test/java/org/apache/accumulo/master/replication/UnorderedWorkAssignerTest.java
+++ b/server/master/src/test/java/org/apache/accumulo/master/replication/UnorderedWorkAssignerTest.java
@@ -46,14 +46,14 @@ import org.junit.Test;
 
 public class UnorderedWorkAssignerTest {
 
-  private AccumuloClient conn;
+  private AccumuloClient client;
   private UnorderedWorkAssigner assigner;
 
   @Before
   public void init() throws Exception {
     AccumuloConfiguration conf = createMock(AccumuloConfiguration.class);
-    conn = createMock(AccumuloClient.class);
-    assigner = new UnorderedWorkAssigner(conf, conn);
+    client = createMock(AccumuloClient.class);
+    assigner = new UnorderedWorkAssigner(conf, client);
   }
 
   @Test
@@ -112,17 +112,17 @@ public class UnorderedWorkAssignerTest {
     ZooCache cache = createMock(ZooCache.class);
     assigner.setZooCache(cache);
 
-    expect(conn.getInstanceID()).andReturn("id");
+    expect(client.getInstanceID()).andReturn("id");
     expect(cache.get(Constants.ZROOT + "/id" + ReplicationConstants.ZOO_WORK_QUEUE + "/wal1"))
         .andReturn(null);
     expect(cache.get(Constants.ZROOT + "/id" + ReplicationConstants.ZOO_WORK_QUEUE + "/wal2"))
         .andReturn(null);
 
-    replay(cache, conn);
+    replay(cache, client);
 
     assigner.cleanupFinishedWork();
 
-    verify(cache, conn);
+    verify(cache, client);
     assertTrue("Queued work was not emptied", queuedWork.isEmpty());
   }
 
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/replication/ReplicationResource.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/replication/ReplicationResource.java
index afa82f8..4d754d6 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/replication/ReplicationResource.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/replication/ReplicationResource.java
@@ -74,11 +74,11 @@ public class ReplicationResource {
   @GET
   public List<ReplicationInformation> getReplicationInformation()
       throws AccumuloException, AccumuloSecurityException {
-    final AccumuloClient conn = Monitor.getContext().getClient();
+    final AccumuloClient client = Monitor.getContext().getClient();
 
-    final TableOperations tops = conn.tableOperations();
+    final TableOperations tops = client.tableOperations();
 
-    final Map<String,String> properties = conn.instanceOperations().getSystemConfiguration();
+    final Map<String,String> properties = client.instanceOperations().getSystemConfiguration();
     final Map<String,String> peers = new HashMap<>();
     final String definedPeersPrefix = Property.REPLICATION_PEERS.getKey();
     final ReplicaSystemFactory replicaSystemFactory = new ReplicaSystemFactory();
@@ -147,7 +147,7 @@ public class ReplicationResource {
     // Read over the queued work
     BatchScanner bs;
     try {
-      bs = conn.createBatchScanner(ReplicationTable.NAME, Authorizations.EMPTY, 4);
+      bs = client.createBatchScanner(ReplicationTable.NAME, Authorizations.EMPTY, 4);
     } catch (TableOfflineException | TableNotFoundException e) {
       log.error("Could not read replication table", e);
       return Collections.emptyList();
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/trace/TracesResource.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/trace/TracesResource.java
index 31a0d6f..e5c8197 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/trace/TracesResource.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/trace/TracesResource.java
@@ -358,11 +358,12 @@ public class TracesResource {
   private Scanner getScanner(String table, String principal, AuthenticationToken at)
       throws AccumuloException, AccumuloSecurityException {
     try {
-      AccumuloClient conn = Monitor.getContext().getClient(principal, at);
-      if (!conn.tableOperations().exists(table)) {
+      AccumuloClient client = Monitor.getContext().getClient(principal, at);
+      if (!client.tableOperations().exists(table)) {
         return null;
       }
-      return conn.createScanner(table, conn.securityOperations().getUserAuthorizations(principal));
+      return client.createScanner(table,
+          client.securityOperations().getUserAuthorizations(principal));
     } catch (AccumuloSecurityException | TableNotFoundException ex) {
       return null;
     }
diff --git a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceDump.java b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceDump.java
index 83f41bb..33cb3d5 100644
--- a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceDump.java
+++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceDump.java
@@ -82,8 +82,8 @@ public class TraceDump {
     PrintStream out = System.out;
     long endTime = System.currentTimeMillis();
     long startTime = endTime - opts.length;
-    AccumuloClient conn = opts.getClient();
-    Scanner scanner = conn.createScanner(opts.getTableName(), opts.auths);
+    AccumuloClient client = opts.getClient();
+    Scanner scanner = client.createScanner(opts.getTableName(), opts.auths);
     scanner.setBatchSize(scanOpts.scanBatchSize);
     Range range = new Range(new Text("start:" + Long.toHexString(startTime)),
         new Text("start:" + Long.toHexString(endTime)));
@@ -104,11 +104,11 @@ public class TraceDump {
 
   private static int dumpTrace(Opts opts, ScannerOpts scanOpts) throws Exception {
     final PrintStream out = System.out;
-    AccumuloClient conn = opts.getClient();
+    AccumuloClient client = opts.getClient();
 
     int count = 0;
     for (String traceId : opts.traceIds) {
-      Scanner scanner = conn.createScanner(opts.getTableName(), opts.auths);
+      Scanner scanner = client.createScanner(opts.getTableName(), opts.auths);
       scanner.setBatchSize(scanOpts.scanBatchSize);
       Range range = new Range(new Text(traceId.toString()));
       scanner.setRange(range);
diff --git a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceTableStats.java b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceTableStats.java
index 5ae99d4..525de09 100644
--- a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceTableStats.java
+++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceTableStats.java
@@ -74,8 +74,8 @@ public class TraceTableStats {
 
   public void count(Opts opts)
       throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
-    AccumuloClient conn = opts.getClient();
-    Scanner scanner = conn.createScanner(opts.getTableName(), Authorizations.EMPTY);
+    AccumuloClient client = opts.getClient();
+    Scanner scanner = client.createScanner(opts.getTableName(), Authorizations.EMPTY);
     scanner.setRange(new Range(null, true, "idx:", false));
     Map<String,SpanTypeCount> counts = new TreeMap<>();
     final SpanTypeCount hdfs = new SpanTypeCount();
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java
index 69b7dea..5206580 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java
@@ -110,7 +110,7 @@ public class ReplicationServicerHandler implements Iface {
     } catch (AccumuloException | AccumuloSecurityException e) {
       log.error("Could not get connection", e);
       throw new RemoteReplicationException(RemoteReplicationErrorCode.CANNOT_AUTHENTICATE,
-          "Cannot get connector as " + tabletServer.getContext().getCredentials().getPrincipal());
+          "Cannot get connection as " + tabletServer.getContext().getCredentials().getPrincipal());
     }
 
     log.debug("Replicated {} mutations to {}", entriesReplicated, tableName);
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/BatchWriterReplicationReplayerTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/BatchWriterReplicationReplayerTest.java
index 3b335a6..8d8910c 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/BatchWriterReplicationReplayerTest.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/BatchWriterReplicationReplayerTest.java
@@ -52,24 +52,24 @@ import com.google.common.collect.Lists;
 public class BatchWriterReplicationReplayerTest {
 
   private ClientContext context;
-  private AccumuloClient conn;
+  private AccumuloClient client;
   private AccumuloConfiguration conf;
   private BatchWriter bw;
 
   @Before
   public void setUpContext() throws AccumuloException, AccumuloSecurityException {
-    conn = createMock(AccumuloClient.class);
+    client = createMock(AccumuloClient.class);
     conf = createMock(AccumuloConfiguration.class);
     bw = createMock(BatchWriter.class);
     context = createMock(ClientContext.class);
     expect(context.getConfiguration()).andReturn(conf).anyTimes();
-    expect(context.getClient()).andReturn(conn).anyTimes();
+    expect(context.getClient()).andReturn(client).anyTimes();
     replay(context);
   }
 
   @After
   public void verifyMock() {
-    verify(context, conn, conf, bw);
+    verify(context, client, conf, bw);
   }
 
   @Test
@@ -124,7 +124,7 @@ public class BatchWriterReplicationReplayerTest {
 
     expect(conf.getAsBytes(Property.TSERV_REPLICATION_BW_REPLAYER_MEMORY))
         .andReturn(bwCfg.getMaxMemory());
-    expect(conn.createBatchWriter(tableName, bwCfg)).andReturn(bw);
+    expect(client.createBatchWriter(tableName, bwCfg)).andReturn(bw);
 
     bw.addMutations(Lists.newArrayList(expectedMutation));
     expectLastCall().once();
@@ -132,7 +132,7 @@ public class BatchWriterReplicationReplayerTest {
     bw.close();
     expectLastCall().once();
 
-    replay(conn, conf, bw);
+    replay(client, conf, bw);
 
     replayer.replicateLog(context, tableName, edits);
   }
@@ -196,7 +196,7 @@ public class BatchWriterReplicationReplayerTest {
 
     expect(conf.getAsBytes(Property.TSERV_REPLICATION_BW_REPLAYER_MEMORY))
         .andReturn(bwCfg.getMaxMemory());
-    expect(conn.createBatchWriter(tableName, bwCfg)).andReturn(bw);
+    expect(client.createBatchWriter(tableName, bwCfg)).andReturn(bw);
 
     bw.addMutations(Lists.newArrayList(expectedMutation));
     expectLastCall().once();
@@ -204,7 +204,7 @@ public class BatchWriterReplicationReplayerTest {
     bw.close();
     expectLastCall().once();
 
-    replay(conn, conf, bw);
+    replay(client, conf, bw);
 
     replayer.replicateLog(context, tableName, edits);
   }
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/SummariesCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/SummariesCommand.java
index 7094c5e..f963f7c 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/SummariesCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/SummariesCommand.java
@@ -52,8 +52,8 @@ public class SummariesCommand extends TableOperation {
   @Override
   protected void doTableOp(final Shell shellState, final String tableName)
       throws AccumuloException, AccumuloSecurityException, TableNotFoundException, IOException {
-    AccumuloClient conn = shellState.getAccumuloClient();
-    SummaryRetriever retriever = conn.tableOperations().summaries(tableName)
+    AccumuloClient client = shellState.getAccumuloClient();
+    SummaryRetriever retriever = client.tableOperations().summaries(tableName)
         .withMatchingConfiguration(selectionRegex);
     if (startRow != null) {
       retriever.startRow(startRow);
diff --git a/shell/src/test/java/org/apache/accumulo/shell/commands/DeleteAuthsCommandTest.java b/shell/src/test/java/org/apache/accumulo/shell/commands/DeleteAuthsCommandTest.java
index 300e2df..f82dc27 100644
--- a/shell/src/test/java/org/apache/accumulo/shell/commands/DeleteAuthsCommandTest.java
+++ b/shell/src/test/java/org/apache/accumulo/shell/commands/DeleteAuthsCommandTest.java
@@ -41,89 +41,89 @@ public class DeleteAuthsCommandTest {
 
   @Test
   public void deleteExistingAuth() throws Exception {
-    AccumuloClient conn = EasyMock.createMock(AccumuloClient.class);
+    AccumuloClient client = EasyMock.createMock(AccumuloClient.class);
     CommandLine cli = EasyMock.createMock(CommandLine.class);
     Shell shellState = EasyMock.createMock(Shell.class);
     ConsoleReader reader = EasyMock.createMock(ConsoleReader.class);
     SecurityOperations secOps = EasyMock.createMock(SecurityOperations.class);
 
-    EasyMock.expect(shellState.getAccumuloClient()).andReturn(conn);
+    EasyMock.expect(shellState.getAccumuloClient()).andReturn(client);
 
     // We're the root user
-    EasyMock.expect(conn.whoami()).andReturn("root");
+    EasyMock.expect(client.whoami()).andReturn("root");
     EasyMock.expect(cli.getOptionValue("u", "root")).andReturn("foo");
     EasyMock.expect(cli.getOptionValue("s")).andReturn("abc");
 
-    EasyMock.expect(conn.securityOperations()).andReturn(secOps);
-    EasyMock.expect(conn.securityOperations()).andReturn(secOps);
+    EasyMock.expect(client.securityOperations()).andReturn(secOps);
+    EasyMock.expect(client.securityOperations()).andReturn(secOps);
     EasyMock.expect(secOps.getUserAuthorizations("foo"))
         .andReturn(new Authorizations("abc", "123"));
     secOps.changeUserAuthorizations("foo", new Authorizations("123"));
     EasyMock.expectLastCall();
 
-    EasyMock.replay(conn, cli, shellState, reader, secOps);
+    EasyMock.replay(client, cli, shellState, reader, secOps);
 
     cmd.execute("deleteauths -u foo -s abc", cli, shellState);
 
-    EasyMock.verify(conn, cli, shellState, reader, secOps);
+    EasyMock.verify(client, cli, shellState, reader, secOps);
   }
 
   @Test
   public void deleteNonExistingAuth() throws Exception {
-    AccumuloClient conn = EasyMock.createMock(AccumuloClient.class);
+    AccumuloClient client = EasyMock.createMock(AccumuloClient.class);
     CommandLine cli = EasyMock.createMock(CommandLine.class);
     Shell shellState = EasyMock.createMock(Shell.class);
     ConsoleReader reader = EasyMock.createMock(ConsoleReader.class);
     SecurityOperations secOps = EasyMock.createMock(SecurityOperations.class);
 
-    EasyMock.expect(shellState.getAccumuloClient()).andReturn(conn);
+    EasyMock.expect(shellState.getAccumuloClient()).andReturn(client);
 
     // We're the root user
-    EasyMock.expect(conn.whoami()).andReturn("root");
+    EasyMock.expect(client.whoami()).andReturn("root");
     EasyMock.expect(cli.getOptionValue("u", "root")).andReturn("foo");
     EasyMock.expect(cli.getOptionValue("s")).andReturn("def");
 
-    EasyMock.expect(conn.securityOperations()).andReturn(secOps);
-    EasyMock.expect(conn.securityOperations()).andReturn(secOps);
+    EasyMock.expect(client.securityOperations()).andReturn(secOps);
+    EasyMock.expect(client.securityOperations()).andReturn(secOps);
     EasyMock.expect(secOps.getUserAuthorizations("foo"))
         .andReturn(new Authorizations("abc", "123"));
     secOps.changeUserAuthorizations("foo", new Authorizations("abc", "123"));
     EasyMock.expectLastCall();
 
-    EasyMock.replay(conn, cli, shellState, reader, secOps);
+    EasyMock.replay(client, cli, shellState, reader, secOps);
 
     cmd.execute("deleteauths -u foo -s def", cli, shellState);
 
-    EasyMock.verify(conn, cli, shellState, reader, secOps);
+    EasyMock.verify(client, cli, shellState, reader, secOps);
   }
 
   @Test
   public void deleteAllAuth() throws Exception {
-    AccumuloClient conn = EasyMock.createMock(AccumuloClient.class);
+    AccumuloClient client = EasyMock.createMock(AccumuloClient.class);
     CommandLine cli = EasyMock.createMock(CommandLine.class);
     Shell shellState = EasyMock.createMock(Shell.class);
     ConsoleReader reader = EasyMock.createMock(ConsoleReader.class);
     SecurityOperations secOps = EasyMock.createMock(SecurityOperations.class);
 
-    EasyMock.expect(shellState.getAccumuloClient()).andReturn(conn);
+    EasyMock.expect(shellState.getAccumuloClient()).andReturn(client);
 
     // We're the root user
-    EasyMock.expect(conn.whoami()).andReturn("root");
+    EasyMock.expect(client.whoami()).andReturn("root");
     EasyMock.expect(cli.getOptionValue("u", "root")).andReturn("foo");
     EasyMock.expect(cli.getOptionValue("s")).andReturn("abc,123");
 
-    EasyMock.expect(conn.securityOperations()).andReturn(secOps);
-    EasyMock.expect(conn.securityOperations()).andReturn(secOps);
+    EasyMock.expect(client.securityOperations()).andReturn(secOps);
+    EasyMock.expect(client.securityOperations()).andReturn(secOps);
     EasyMock.expect(secOps.getUserAuthorizations("foo"))
         .andReturn(new Authorizations("abc", "123"));
     secOps.changeUserAuthorizations("foo", new Authorizations());
     EasyMock.expectLastCall();
 
-    EasyMock.replay(conn, cli, shellState, reader, secOps);
+    EasyMock.replay(client, cli, shellState, reader, secOps);
 
     cmd.execute("deleteauths -u foo -s abc,123", cli, shellState);
 
-    EasyMock.verify(conn, cli, shellState, reader, secOps);
+    EasyMock.verify(client, cli, shellState, reader, secOps);
   }
 
 }
diff --git a/shell/src/test/java/org/apache/accumulo/shell/commands/DropUserCommandTest.java b/shell/src/test/java/org/apache/accumulo/shell/commands/DropUserCommandTest.java
index 2fc0c57..1d4aa79 100644
--- a/shell/src/test/java/org/apache/accumulo/shell/commands/DropUserCommandTest.java
+++ b/shell/src/test/java/org/apache/accumulo/shell/commands/DropUserCommandTest.java
@@ -40,19 +40,19 @@ public class DropUserCommandTest {
 
   @Test
   public void dropUserWithoutForcePrompts() throws Exception {
-    AccumuloClient conn = EasyMock.createMock(AccumuloClient.class);
+    AccumuloClient client = EasyMock.createMock(AccumuloClient.class);
     CommandLine cli = EasyMock.createMock(CommandLine.class);
     Shell shellState = EasyMock.createMock(Shell.class);
     ConsoleReader reader = EasyMock.createMock(ConsoleReader.class);
     SecurityOperations secOps = EasyMock.createMock(SecurityOperations.class);
 
-    EasyMock.expect(shellState.getAccumuloClient()).andReturn(conn);
+    EasyMock.expect(shellState.getAccumuloClient()).andReturn(client);
 
     // The user we want to remove
     EasyMock.expect(cli.getArgs()).andReturn(new String[] {"user"});
 
     // We're the root user
-    EasyMock.expect(conn.whoami()).andReturn("root");
+    EasyMock.expect(client.whoami()).andReturn("root");
 
     // Force option was not provided
     EasyMock.expect(cli.hasOption("f")).andReturn(false);
@@ -63,17 +63,17 @@ public class DropUserCommandTest {
     // Fake a "yes" response
     EasyMock.expect(shellState.getReader()).andReturn(reader);
     EasyMock.expect(reader.readLine(EasyMock.anyObject(String.class))).andReturn("yes");
-    EasyMock.expect(shellState.getAccumuloClient()).andReturn(conn);
+    EasyMock.expect(shellState.getAccumuloClient()).andReturn(client);
 
-    EasyMock.expect(conn.securityOperations()).andReturn(secOps);
+    EasyMock.expect(client.securityOperations()).andReturn(secOps);
     secOps.dropLocalUser("user");
     EasyMock.expectLastCall();
 
-    EasyMock.replay(conn, cli, shellState, reader, secOps);
+    EasyMock.replay(client, cli, shellState, reader, secOps);
 
     cmd.execute("dropuser foo -f", cli, shellState);
 
-    EasyMock.verify(conn, cli, shellState, reader, secOps);
+    EasyMock.verify(client, cli, shellState, reader, secOps);
   }
 
 }
diff --git a/shell/src/test/java/org/apache/accumulo/shell/commands/SetIterCommandTest.java b/shell/src/test/java/org/apache/accumulo/shell/commands/SetIterCommandTest.java
index 2cf7a6c..b4fdc97 100644
--- a/shell/src/test/java/org/apache/accumulo/shell/commands/SetIterCommandTest.java
+++ b/shell/src/test/java/org/apache/accumulo/shell/commands/SetIterCommandTest.java
@@ -48,7 +48,7 @@ public class SetIterCommandTest {
 
   @Test
   public void addColumnAgeOffFilter() throws Exception {
-    AccumuloClient conn = EasyMock.createMock(AccumuloClient.class);
+    AccumuloClient client = EasyMock.createMock(AccumuloClient.class);
     CommandLine cli = EasyMock.createMock(CommandLine.class);
     Shell shellState = EasyMock.createMock(Shell.class);
     ConsoleReader reader = EasyMock.createMock(ConsoleReader.class);
@@ -98,33 +98,33 @@ public class SetIterCommandTest {
     // Shell asking for another unnamed option; we pass in an empty string to signal that we are
     // done adding options
     EasyMock.expect(reader.readLine(EasyMock.anyObject(String.class))).andReturn("");
-    EasyMock.expect(shellState.getAccumuloClient()).andReturn(conn);
+    EasyMock.expect(shellState.getAccumuloClient()).andReturn(client);
 
     // Table exists
-    EasyMock.expect(conn.tableOperations()).andReturn(tableOperations);
+    EasyMock.expect(client.tableOperations()).andReturn(tableOperations);
     EasyMock.expect(tableOperations.exists("foo")).andReturn(true);
 
     // Testing class load
-    EasyMock.expect(shellState.getAccumuloClient()).andReturn(conn);
-    EasyMock.expect(conn.tableOperations()).andReturn(tableOperations);
+    EasyMock.expect(shellState.getAccumuloClient()).andReturn(client);
+    EasyMock.expect(client.tableOperations()).andReturn(tableOperations);
     EasyMock.expect(tableOperations.testClassLoad("foo",
         "org.apache.accumulo.core.iterators.user.ColumnAgeOffFilter",
         SortedKeyValueIterator.class.getName())).andReturn(true);
 
     // Attach iterator
-    EasyMock.expect(shellState.getAccumuloClient()).andReturn(conn);
-    EasyMock.expect(conn.tableOperations()).andReturn(tableOperations);
+    EasyMock.expect(shellState.getAccumuloClient()).andReturn(client);
+    EasyMock.expect(client.tableOperations()).andReturn(tableOperations);
     tableOperations.attachIterator(EasyMock.eq("foo"), EasyMock.anyObject(IteratorSetting.class),
         EasyMock.eq(EnumSet.allOf(IteratorScope.class)));
     EasyMock.expectLastCall().once();
 
-    EasyMock.replay(conn, cli, shellState, reader, tableOperations);
+    EasyMock.replay(client, cli, shellState, reader, tableOperations);
 
     cmd.execute(
         "setiter -all -p 21 -t foo"
             + " -class org.apache.accumulo.core.iterators.user.ColumnAgeOffFilter",
         cli, shellState);
 
-    EasyMock.verify(conn, cli, shellState, reader, tableOperations);
+    EasyMock.verify(client, cli, shellState, reader, tableOperations);
   }
 }
diff --git a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
index 7807fc8..4aaf67b 100644
--- a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
+++ b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
@@ -29,7 +29,6 @@ import org.apache.accumulo.cluster.ClusterUsers;
 import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.ClientInfo;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.admin.SecurityOperations;
 import org.apache.accumulo.core.client.admin.TableOperations;
 import org.apache.accumulo.core.client.impl.ClientContext;
@@ -180,29 +179,29 @@ public abstract class AccumuloClusterHarness extends AccumuloITBase
           UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(),
               systemUser.getKeytab().getAbsolutePath());
 
-          // Open a connector as the system user (ensures the user will exist for us to assign
+          // Create client as the system user (ensures the user will exist for us to assign
           // permissions to)
           UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(),
               systemUser.getKeytab().getAbsolutePath());
-          AccumuloClient conn = cluster.getAccumuloClient(systemUser.getPrincipal(),
+          AccumuloClient client = cluster.getAccumuloClient(systemUser.getPrincipal(),
               new KerberosToken());
 
           // Then, log back in as the "root" user and do the grant
           UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
               rootUser.getKeytab().getAbsolutePath());
-          conn = getAccumuloClient();
+          client = getAccumuloClient();
 
           // Create the trace table
-          conn.tableOperations().create(traceTable);
+          client.tableOperations().create(traceTable);
 
           // Trace user (which is the same kerberos principal as the system user, but using a normal
           // KerberosToken) needs
           // to have the ability to read, write and alter the trace table
-          conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
+          client.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
               TablePermission.READ);
-          conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
+          client.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
               TablePermission.WRITE);
-          conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
+          client.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
               TablePermission.ALTER_TABLE);
         }
         break;
@@ -347,7 +346,7 @@ public abstract class AccumuloClusterHarness extends AccumuloITBase
     try {
       String princ = getAdminPrincipal();
       AuthenticationToken token = getAdminToken();
-      log.debug("Creating client connection as {} with {}", princ, token);
+      log.debug("Creating client as {} with {}", princ, token);
       return cluster.getAccumuloClient(princ, token);
     } catch (Exception e) {
       log.error("Could not connect to Accumulo", e);
@@ -357,21 +356,6 @@ public abstract class AccumuloClusterHarness extends AccumuloITBase
     }
   }
 
-  public Connector getConnector() {
-    try {
-      log.warn("No longer used since version 2.0, please call getAccumuloClient() instead.");
-      String princ = getAdminPrincipal();
-      AuthenticationToken token = getAdminToken();
-      log.debug("Creating connector as {} with {}", princ, token);
-      return cluster.getConnector(princ, token);
-    } catch (Exception e) {
-      log.error("Could not connect to Accumulo", e);
-      fail("Could not connect to Accumulo: " + e.getMessage());
-
-      throw new RuntimeException("Could not connect to Accumulo", e);
-    }
-  }
-
   // TODO Really don't want this here. Will ultimately need to abstract configuration method away
   // from MAConfig
   // and change over to something more generic
diff --git a/test/src/main/java/org/apache/accumulo/harness/SharedMiniClusterBase.java b/test/src/main/java/org/apache/accumulo/harness/SharedMiniClusterBase.java
index fac39d1..2073580 100644
--- a/test/src/main/java/org/apache/accumulo/harness/SharedMiniClusterBase.java
+++ b/test/src/main/java/org/apache/accumulo/harness/SharedMiniClusterBase.java
@@ -113,29 +113,29 @@ public abstract class SharedMiniClusterBase extends AccumuloITBase implements Cl
       final String traceTable = Property.TRACE_TABLE.getDefaultValue();
       final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();
       // Login as the trace user
-      // Open a connector as the system user (ensures the user will exist for us to assign
+      // Open a client as the system user (ensures the user will exist for us to assign
       // permissions to)
       UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(),
           systemUser.getKeytab().getAbsolutePath());
-      AccumuloClient conn = cluster.getAccumuloClient(systemUser.getPrincipal(),
+      AccumuloClient client = cluster.getAccumuloClient(systemUser.getPrincipal(),
           new KerberosToken());
 
       // Then, log back in as the "root" user and do the grant
       UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
           rootUser.getKeytab().getAbsolutePath());
-      conn = cluster.getAccumuloClient(principal, token);
+      client = cluster.getAccumuloClient(principal, token);
 
       // Create the trace table
-      conn.tableOperations().create(traceTable);
+      client.tableOperations().create(traceTable);
 
       // Trace user (which is the same kerberos principal as the system user, but using a normal
       // KerberosToken) needs
       // to have the ability to read, write and alter the trace table
-      conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
+      client.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
           TablePermission.READ);
-      conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
+      client.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
           TablePermission.WRITE);
-      conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
+      client.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
           TablePermission.ALTER_TABLE);
     }
   }
diff --git a/test/src/main/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java b/test/src/main/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java
index 6d596d9..5a5d8b6 100644
--- a/test/src/main/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java
@@ -61,8 +61,8 @@ public class ArbitraryTablePropertiesIT extends SharedMiniClusterBase {
 
     // make a table
     final String tableName = getUniqueNames(1)[0];
-    final AccumuloClient conn = getClient();
-    conn.tableOperations().create(tableName);
+    final AccumuloClient client = getClient();
+    client.tableOperations().create(tableName);
 
     // Set variables for the property name to use and the initial value
     String propertyName = "table.custom.description";
@@ -71,11 +71,11 @@ public class ArbitraryTablePropertiesIT extends SharedMiniClusterBase {
     // Make sure the property name is valid
     assertTrue(Property.isValidPropertyKey(propertyName));
     // Set the property to the desired value
-    conn.tableOperations().setProperty(tableName, propertyName, description1);
+    client.tableOperations().setProperty(tableName, propertyName, description1);
 
     // Loop through properties to make sure the new property is added to the list
     int count = 0;
-    for (Entry<String,String> property : conn.tableOperations().getProperties(tableName)) {
+    for (Entry<String,String> property : client.tableOperations().getProperties(tableName)) {
       if (property.getKey().equals(propertyName) && property.getValue().equals(description1))
         count++;
     }
@@ -83,22 +83,22 @@ public class ArbitraryTablePropertiesIT extends SharedMiniClusterBase {
 
     // Set the property as something different
     String description2 = "set second";
-    conn.tableOperations().setProperty(tableName, propertyName, description2);
+    client.tableOperations().setProperty(tableName, propertyName, description2);
 
     // / Loop through properties to make sure the new property is added to the list
     count = 0;
-    for (Entry<String,String> property : conn.tableOperations().getProperties(tableName)) {
+    for (Entry<String,String> property : client.tableOperations().getProperties(tableName)) {
       if (property.getKey().equals(propertyName) && property.getValue().equals(description2))
         count++;
     }
     assertEquals(count, 1);
 
     // Remove the property and make sure there is no longer a value associated with it
-    conn.tableOperations().removeProperty(tableName, propertyName);
+    client.tableOperations().removeProperty(tableName, propertyName);
 
     // / Loop through properties to make sure the new property is added to the list
     count = 0;
-    for (Entry<String,String> property : conn.tableOperations().getProperties(tableName)) {
+    for (Entry<String,String> property : client.tableOperations().getProperties(tableName)) {
       if (property.getKey().equals(propertyName))
         count++;
     }
@@ -133,13 +133,13 @@ public class ArbitraryTablePropertiesIT extends SharedMiniClusterBase {
     assertTrue(Property.isValidPropertyKey(propertyName));
 
     // Getting a fresh token will ensure we're logged in as this user (if necessary)
-    AccumuloClient testConn = c.changeUser(testUser, user.getToken());
+    AccumuloClient testclient = c.changeUser(testUser, user.getToken());
     // Set the property to the desired value
-    testConn.tableOperations().setProperty(tableName, propertyName, description1);
+    testclient.tableOperations().setProperty(tableName, propertyName, description1);
 
     // Loop through properties to make sure the new property is added to the list
     int count = 0;
-    for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName)) {
+    for (Entry<String,String> property : testclient.tableOperations().getProperties(tableName)) {
       if (property.getKey().equals(propertyName) && property.getValue().equals(description1))
         count++;
     }
@@ -147,22 +147,22 @@ public class ArbitraryTablePropertiesIT extends SharedMiniClusterBase {
 
     // Set the property as something different
     String description2 = "set second";
-    testConn.tableOperations().setProperty(tableName, propertyName, description2);
+    testclient.tableOperations().setProperty(tableName, propertyName, description2);
 
     // / Loop through properties to make sure the new property is added to the list
     count = 0;
-    for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName)) {
+    for (Entry<String,String> property : testclient.tableOperations().getProperties(tableName)) {
       if (property.getKey().equals(propertyName) && property.getValue().equals(description2))
         count++;
     }
     assertEquals(count, 1);
 
     // Remove the property and make sure there is no longer a value associated with it
-    testConn.tableOperations().removeProperty(tableName, propertyName);
+    testclient.tableOperations().removeProperty(tableName, propertyName);
 
     // / Loop through properties to make sure the new property is added to the list
     count = 0;
-    for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName)) {
+    for (Entry<String,String> property : testclient.tableOperations().getProperties(tableName)) {
       if (property.getKey().equals(propertyName))
         count++;
     }
@@ -197,18 +197,18 @@ public class ArbitraryTablePropertiesIT extends SharedMiniClusterBase {
     assertTrue(Property.isValidPropertyKey(propertyName));
 
     // Getting a fresh token will ensure we're logged in as this user (if necessary)
-    AccumuloClient testConn = c.changeUser(testUser, user.getToken());
+    AccumuloClient testclient = c.changeUser(testUser, user.getToken());
 
     // Try to set the property to the desired value.
     // If able to set it, the test fails, since permission was never granted
     try {
-      testConn.tableOperations().setProperty(tableName, propertyName, description1);
+      testclient.tableOperations().setProperty(tableName, propertyName, description1);
       fail("Was able to set property without permissions");
     } catch (AccumuloSecurityException e) {}
 
     // Loop through properties to make sure the new property is not added to the list
     int count = 0;
-    for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName)) {
+    for (Entry<String,String> property : testclient.tableOperations().getProperties(tableName)) {
       if (property.getKey().equals(propertyName))
         count++;
     }
diff --git a/test/src/main/java/org/apache/accumulo/test/AuditMessageIT.java b/test/src/main/java/org/apache/accumulo/test/AuditMessageIT.java
index a3c2cad..a71defe 100644
--- a/test/src/main/java/org/apache/accumulo/test/AuditMessageIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/AuditMessageIT.java
@@ -88,7 +88,7 @@ public class AuditMessageIT extends ConfigurableMacBase {
   // Must be static to survive Junit re-initialising the class every time.
   private static String lastAuditTimestamp;
   private AccumuloClient auditAccumuloClient;
-  private AccumuloClient conn;
+  private AccumuloClient client;
 
   private static long findAuditMessage(ArrayList<String> input, String pattern) {
     return input.stream().filter(s -> s.matches(".*" + pattern + ".*")).count();
@@ -153,19 +153,19 @@ public class AuditMessageIT extends ConfigurableMacBase {
     return result;
   }
 
-  private void grantEverySystemPriv(AccumuloClient conn, String user)
+  private void grantEverySystemPriv(AccumuloClient client, String user)
       throws AccumuloSecurityException, AccumuloException {
     SystemPermission[] arrayOfP = {SystemPermission.SYSTEM, SystemPermission.ALTER_TABLE,
         SystemPermission.ALTER_USER, SystemPermission.CREATE_TABLE, SystemPermission.CREATE_USER,
         SystemPermission.DROP_TABLE, SystemPermission.DROP_USER};
     for (SystemPermission p : arrayOfP) {
-      conn.securityOperations().grantSystemPermission(user, p);
+      client.securityOperations().grantSystemPermission(user, p);
     }
   }
 
   @Before
   public void resetInstance() throws Exception {
-    conn = getClient();
+    client = getClient();
 
     removeUsersAndTables();
 
@@ -176,12 +176,12 @@ public class AuditMessageIT extends ConfigurableMacBase {
   @After
   public void removeUsersAndTables() throws Exception {
     for (String user : Arrays.asList(AUDIT_USER_1, AUDIT_USER_2)) {
-      if (conn.securityOperations().listLocalUsers().contains(user)) {
-        conn.securityOperations().dropLocalUser(user);
+      if (client.securityOperations().listLocalUsers().contains(user)) {
+        client.securityOperations().dropLocalUser(user);
       }
     }
 
-    TableOperations tops = conn.tableOperations();
+    TableOperations tops = client.tableOperations();
     for (String table : Arrays.asList(THIRD_TEST_TABLE_NAME, NEW_TEST_TABLE_NAME,
         OLD_TEST_TABLE_NAME)) {
       if (tops.exists(table)) {
@@ -194,9 +194,9 @@ public class AuditMessageIT extends ConfigurableMacBase {
   public void testTableOperationsAudits() throws AccumuloException, AccumuloSecurityException,
       TableExistsException, TableNotFoundException, IOException, InterruptedException {
 
-    conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
-    conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
-    conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.CREATE_TABLE);
+    client.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
+    client.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
+    client.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.CREATE_TABLE);
 
     // Connect as Audit User and do a bunch of stuff.
     // Testing activity begins here
@@ -233,10 +233,10 @@ public class AuditMessageIT extends ConfigurableMacBase {
   public void testUserOperationsAudits() throws AccumuloSecurityException, AccumuloException,
       TableExistsException, InterruptedException, IOException {
 
-    conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
-    conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
-    conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.CREATE_USER);
-    grantEverySystemPriv(conn, AUDIT_USER_1);
+    client.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
+    client.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
+    client.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.CREATE_USER);
+    grantEverySystemPriv(client, AUDIT_USER_1);
 
     // Connect as Audit User and do a bunch of stuff.
     // Start testing activities here
@@ -245,12 +245,12 @@ public class AuditMessageIT extends ConfigurableMacBase {
         new PasswordToken(PASSWORD));
 
     // It seems only root can grant stuff.
-    conn.securityOperations().grantSystemPermission(AUDIT_USER_2, SystemPermission.ALTER_TABLE);
-    conn.securityOperations().revokeSystemPermission(AUDIT_USER_2, SystemPermission.ALTER_TABLE);
+    client.securityOperations().grantSystemPermission(AUDIT_USER_2, SystemPermission.ALTER_TABLE);
+    client.securityOperations().revokeSystemPermission(AUDIT_USER_2, SystemPermission.ALTER_TABLE);
     auditAccumuloClient.tableOperations().create(NEW_TEST_TABLE_NAME);
-    conn.securityOperations().grantTablePermission(AUDIT_USER_2, NEW_TEST_TABLE_NAME,
+    client.securityOperations().grantTablePermission(AUDIT_USER_2, NEW_TEST_TABLE_NAME,
         TablePermission.READ);
-    conn.securityOperations().revokeTablePermission(AUDIT_USER_2, NEW_TEST_TABLE_NAME,
+    client.securityOperations().revokeTablePermission(AUDIT_USER_2, NEW_TEST_TABLE_NAME,
         TablePermission.READ);
     auditAccumuloClient.securityOperations().changeLocalUserPassword(AUDIT_USER_2,
         new PasswordToken("anything"));
@@ -287,10 +287,10 @@ public class AuditMessageIT extends ConfigurableMacBase {
       throws AccumuloSecurityException, AccumuloException, TableExistsException,
       TableNotFoundException, IOException, InterruptedException {
 
-    conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
-    conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
-    conn.securityOperations().changeUserAuthorizations(AUDIT_USER_1, auths);
-    grantEverySystemPriv(conn, AUDIT_USER_1);
+    client.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
+    client.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
+    client.securityOperations().changeUserAuthorizations(AUDIT_USER_1, auths);
+    grantEverySystemPriv(client, AUDIT_USER_1);
 
     // Connect as Audit User and do a bunch of stuff.
     // Start testing activities here
@@ -375,10 +375,10 @@ public class AuditMessageIT extends ConfigurableMacBase {
   public void testDataOperationsAudits() throws AccumuloSecurityException, AccumuloException,
       TableExistsException, TableNotFoundException, IOException, InterruptedException {
 
-    conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
-    conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
-    conn.securityOperations().changeUserAuthorizations(AUDIT_USER_1, auths);
-    grantEverySystemPriv(conn, AUDIT_USER_1);
+    client.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
+    client.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
+    client.securityOperations().changeUserAuthorizations(AUDIT_USER_1, auths);
+    grantEverySystemPriv(client, AUDIT_USER_1);
 
     // Connect as Audit User and do a bunch of stuff.
     // Start testing activities here
@@ -434,8 +434,8 @@ public class AuditMessageIT extends ConfigurableMacBase {
       TableExistsException, TableNotFoundException, IOException, InterruptedException {
 
     // Create our user with no privs
-    conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
-    conn.tableOperations().create(OLD_TEST_TABLE_NAME);
+    client.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
+    client.tableOperations().create(OLD_TEST_TABLE_NAME);
     auditAccumuloClient = getCluster().getAccumuloClient(AUDIT_USER_1, new PasswordToken(PASSWORD));
 
     // Start testing activities
@@ -508,13 +508,14 @@ public class AuditMessageIT extends ConfigurableMacBase {
     // We don't want the thrown exceptions to stop our tests, and we are not testing that the
     // Exceptions are thrown.
     try {
-      conn.securityOperations().dropLocalUser(AUDIT_USER_2);
+      client.securityOperations().dropLocalUser(AUDIT_USER_2);
     } catch (AccumuloSecurityException ex) {}
     try {
-      conn.securityOperations().revokeSystemPermission(AUDIT_USER_2, SystemPermission.ALTER_TABLE);
+      client.securityOperations().revokeSystemPermission(AUDIT_USER_2,
+          SystemPermission.ALTER_TABLE);
     } catch (AccumuloSecurityException ex) {}
     try {
-      conn.securityOperations().createLocalUser("root", new PasswordToken("super secret"));
+      client.securityOperations().createLocalUser("root", new PasswordToken("super secret"));
     } catch (AccumuloSecurityException ex) {}
     ArrayList<String> auditMessages = getAuditMessages("testFailedAudits");
     // ... that will do for now.
diff --git a/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java b/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
index 47a02ed..25b7a01 100644
--- a/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
@@ -95,11 +95,11 @@ public class BadDeleteMarkersCreatedIT extends AccumuloClusterHarness {
 
     getCluster().getClusterControl().stopAllServers(ServerType.GARBAGE_COLLECTOR);
 
-    AccumuloClient conn = getAccumuloClient();
-    ClientInfo info = conn.info();
+    AccumuloClient client = getAccumuloClient();
+    ClientInfo info = client.info();
     ZooCache zcache = new ZooCache(info.getZooKeepers(), info.getZooKeepersSessionTimeOut());
     zcache.clear();
-    String path = ZooUtil.getRoot(conn.getInstanceID()) + Constants.ZGC_LOCK;
+    String path = ZooUtil.getRoot(client.getInstanceID()) + Constants.ZGC_LOCK;
     byte[] gcLockData;
     do {
       gcLockData = ZooLock.getLockData(zcache, path, null);
diff --git a/test/src/main/java/org/apache/accumulo/test/BalanceFasterIT.java b/test/src/main/java/org/apache/accumulo/test/BalanceFasterIT.java
index 92897e2..3dac3ec 100644
--- a/test/src/main/java/org/apache/accumulo/test/BalanceFasterIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/BalanceFasterIT.java
@@ -65,18 +65,18 @@ public class BalanceFasterIT extends ConfigurableMacBase {
   public void test() throws Exception {
     // create a table, add a bunch of splits
     String tableName = getUniqueNames(1)[0];
-    AccumuloClient conn = getClient();
-    conn.tableOperations().create(tableName);
+    AccumuloClient client = getClient();
+    client.tableOperations().create(tableName);
     SortedSet<Text> splits = new TreeSet<>();
     for (int i = 0; i < 1000; i++) {
       splits.add(new Text("" + i));
     }
-    conn.tableOperations().addSplits(tableName, splits);
+    client.tableOperations().addSplits(tableName, splits);
     // give a short wait for balancing
     sleepUninterruptibly(10, TimeUnit.SECONDS);
     // find out where the tablets are
     Iterator<Integer> i;
-    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+    try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
       s.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
       s.setRange(MetadataSchema.TabletsSection.getRange());
       Map<String,Integer> counts = new HashMap<>();
diff --git a/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java b/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java
index a4437a5..81b5325 100644
--- a/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java
@@ -68,22 +68,22 @@ public class CleanWalIT extends AccumuloClusterHarness {
 
   @Before
   public void offlineTraceTable() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
-    String traceTable = conn.instanceOperations().getSystemConfiguration()
+    AccumuloClient client = getAccumuloClient();
+    String traceTable = client.instanceOperations().getSystemConfiguration()
         .get(Property.TRACE_TABLE.getKey());
-    if (conn.tableOperations().exists(traceTable)) {
-      conn.tableOperations().offline(traceTable, true);
+    if (client.tableOperations().exists(traceTable)) {
+      client.tableOperations().offline(traceTable, true);
     }
   }
 
   @After
   public void onlineTraceTable() throws Exception {
     if (null != cluster) {
-      AccumuloClient conn = getAccumuloClient();
-      String traceTable = conn.instanceOperations().getSystemConfiguration()
+      AccumuloClient client = getAccumuloClient();
+      String traceTable = client.instanceOperations().getSystemConfiguration()
           .get(Property.TRACE_TABLE.getKey());
-      if (conn.tableOperations().exists(traceTable)) {
-        conn.tableOperations().online(traceTable, true);
+      if (client.tableOperations().exists(traceTable)) {
+        client.tableOperations().online(traceTable, true);
       }
     }
   }
@@ -91,10 +91,10 @@ public class CleanWalIT extends AccumuloClusterHarness {
   // test for ACCUMULO-1830
   @Test
   public void test() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    client.tableOperations().create(tableName);
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
     Mutation m = new Mutation("row");
     m.put("cf", "cq", "value");
     bw.addMutation(m);
@@ -105,35 +105,35 @@ public class CleanWalIT extends AccumuloClusterHarness {
     getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
 
     for (String table : new String[] {MetadataTable.NAME, RootTable.NAME})
-      conn.tableOperations().flush(table, null, null, true);
+      client.tableOperations().flush(table, null, null, true);
     log.debug("Checking entries for {}", tableName);
-    assertEquals(1, count(tableName, conn));
+    assertEquals(1, count(tableName, client));
     for (String table : new String[] {MetadataTable.NAME, RootTable.NAME}) {
       log.debug("Checking logs for {}", table);
-      assertEquals("Found logs for " + table, 0, countLogs(table, conn));
+      assertEquals("Found logs for " + table, 0, countLogs(table, client));
     }
 
-    bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    bw = client.createBatchWriter(tableName, new BatchWriterConfig());
     m = new Mutation("row");
     m.putDelete("cf", "cq");
     bw.addMutation(m);
     bw.close();
-    assertEquals(0, count(tableName, conn));
-    conn.tableOperations().flush(tableName, null, null, true);
-    conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
-    conn.tableOperations().flush(RootTable.NAME, null, null, true);
+    assertEquals(0, count(tableName, client));
+    client.tableOperations().flush(tableName, null, null, true);
+    client.tableOperations().flush(MetadataTable.NAME, null, null, true);
+    client.tableOperations().flush(RootTable.NAME, null, null, true);
     try {
       getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
       sleepUninterruptibly(3, TimeUnit.SECONDS);
     } finally {
       getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
     }
-    assertEquals(0, count(tableName, conn));
+    assertEquals(0, count(tableName, client));
   }
 
-  private int countLogs(String tableName, AccumuloClient conn) throws TableNotFoundException {
+  private int countLogs(String tableName, AccumuloClient client) throws TableNotFoundException {
     int count = 0;
-    try (Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
       scanner.fetchColumnFamily(MetadataSchema.TabletsSection.LogColumnFamily.NAME);
       scanner.setRange(MetadataSchema.TabletsSection.getRange());
       for (Entry<Key,Value> entry : scanner) {
@@ -144,8 +144,8 @@ public class CleanWalIT extends AccumuloClusterHarness {
     return count;
   }
 
-  int count(String tableName, AccumuloClient conn) throws Exception {
-    try (Scanner s = conn.createScanner(tableName, Authorizations.EMPTY)) {
+  int count(String tableName, AccumuloClient client) throws Exception {
+    try (Scanner s = client.createScanner(tableName, Authorizations.EMPTY)) {
       return Iterators.size(s.iterator());
     }
   }
diff --git a/test/src/main/java/org/apache/accumulo/test/ClientSideIteratorIT.java b/test/src/main/java/org/apache/accumulo/test/ClientSideIteratorIT.java
index 6b383b3..413a9cd 100644
--- a/test/src/main/java/org/apache/accumulo/test/ClientSideIteratorIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ClientSideIteratorIT.java
@@ -71,19 +71,19 @@ public class ClientSideIteratorIT extends AccumuloClusterHarness {
     assertEquals(i, results.size());
   }
 
-  private AccumuloClient conn;
+  private AccumuloClient client;
   private String tableName;
 
   @Before
   public void setupInstance() throws Exception {
-    conn = getAccumuloClient();
+    client = getAccumuloClient();
     tableName = getUniqueNames(1)[0];
   }
 
   @Test
   public void testIntersect() throws Exception {
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    client.tableOperations().create(tableName);
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
     Mutation m = new Mutation("part1");
     m.put("bar", "doc1", "value");
     m.put("bar", "doc2", "value");
@@ -102,7 +102,7 @@ public class ClientSideIteratorIT extends AccumuloClusterHarness {
 
     final IteratorSetting si = new IteratorSetting(10, tableName, IntersectingIterator.class);
     try (ClientSideIteratorScanner csis = new ClientSideIteratorScanner(
-        conn.createScanner(tableName, new Authorizations()))) {
+        client.createScanner(tableName, new Authorizations()))) {
       IntersectingIterator.setColumnFamilies(si, new Text[] {new Text("bar"), new Text("foo")});
       csis.addScanIterator(si);
       checkResults(csis, resultSet3, PartialKey.ROW_COLFAM_COLQUAL);
@@ -111,11 +111,11 @@ public class ClientSideIteratorIT extends AccumuloClusterHarness {
 
   @Test
   public void testVersioning() throws Exception {
-    conn.tableOperations().create(tableName);
-    conn.tableOperations().removeProperty(tableName, "table.iterator.scan.vers");
-    conn.tableOperations().removeProperty(tableName, "table.iterator.majc.vers");
-    conn.tableOperations().removeProperty(tableName, "table.iterator.minc.vers");
-    final BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    client.tableOperations().create(tableName);
+    client.tableOperations().removeProperty(tableName, "table.iterator.scan.vers");
+    client.tableOperations().removeProperty(tableName, "table.iterator.majc.vers");
+    client.tableOperations().removeProperty(tableName, "table.iterator.minc.vers");
+    final BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
     Mutation m = new Mutation("row1");
     m.put("colf", "colq", 1L, "value");
     m.put("colf", "colq", 2L, "value");
@@ -127,7 +127,7 @@ public class ClientSideIteratorIT extends AccumuloClusterHarness {
     bw.addMutation(m);
     bw.flush();
 
-    try (Scanner scanner = conn.createScanner(tableName, new Authorizations());
+    try (Scanner scanner = client.createScanner(tableName, new Authorizations());
         ClientSideIteratorScanner csis = new ClientSideIteratorScanner(scanner)) {
 
       final IteratorSetting si = new IteratorSetting(10, "localvers", VersioningIterator.class);
diff --git a/test/src/main/java/org/apache/accumulo/test/CloneIT.java b/test/src/main/java/org/apache/accumulo/test/CloneIT.java
index 06b824b..d845cb3 100644
--- a/test/src/main/java/org/apache/accumulo/test/CloneIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CloneIT.java
@@ -46,9 +46,9 @@ public class CloneIT extends AccumuloClusterHarness {
 
   @Test
   public void testNoFiles() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
     KeyExtent ke = new KeyExtent(Table.ID.of("0"), null, null);
     Mutation mut = ke.getPrevRowUpdateMutation();
@@ -57,17 +57,18 @@ public class CloneIT extends AccumuloClusterHarness {
     TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut,
         new Value("/default_tablet".getBytes()));
 
-    BatchWriter bw1 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw1 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     bw1.addMutation(mut);
 
     bw1.close();
 
-    BatchWriter bw2 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw2 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
-    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client, bw2);
 
-    int rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    int rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client,
+        bw2);
 
     assertEquals(0, rc);
 
@@ -77,9 +78,9 @@ public class CloneIT extends AccumuloClusterHarness {
 
   @Test
   public void testFilesChange() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
     KeyExtent ke = new KeyExtent(Table.ID.of("0"), null, null);
     Mutation mut = ke.getPrevRowUpdateMutation();
@@ -90,15 +91,15 @@ public class CloneIT extends AccumuloClusterHarness {
     mut.put(DataFileColumnFamily.NAME.toString(), "/default_tablet/0_0.rf",
         new DataFileValue(1, 200).encodeAsString());
 
-    BatchWriter bw1 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw1 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     bw1.addMutation(mut);
 
     bw1.flush();
 
-    BatchWriter bw2 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw2 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
-    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client, bw2);
 
     Mutation mut2 = new Mutation(ke.getMetadataEntry());
     mut2.putDelete(DataFileColumnFamily.NAME.toString(), "/default_tablet/0_0.rf");
@@ -108,17 +109,18 @@ public class CloneIT extends AccumuloClusterHarness {
     bw1.addMutation(mut2);
     bw1.flush();
 
-    int rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    int rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client,
+        bw2);
 
     assertEquals(1, rc);
 
-    rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client, bw2);
 
     assertEquals(0, rc);
 
     HashSet<String> files = new HashSet<>();
 
-    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
       scanner.setRange(new KeyExtent(Table.ID.of("1"), null, null).toMetadataRange());
       for (Entry<Key,Value> entry : scanner) {
         if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME))
@@ -132,33 +134,34 @@ public class CloneIT extends AccumuloClusterHarness {
   // test split where files of children are the same
   @Test
   public void testSplit1() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    BatchWriter bw1 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw1 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     bw1.addMutation(createTablet("0", null, null, "/default_tablet", "/default_tablet/0_0.rf"));
 
     bw1.flush();
 
-    BatchWriter bw2 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw2 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
-    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client, bw2);
 
     bw1.addMutation(createTablet("0", "m", null, "/default_tablet", "/default_tablet/0_0.rf"));
     bw1.addMutation(createTablet("0", null, "m", "/t-1", "/default_tablet/0_0.rf"));
 
     bw1.flush();
 
-    int rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    int rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client,
+        bw2);
 
     assertEquals(0, rc);
 
     HashSet<String> files = new HashSet<>();
     int count = 0;
 
-    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
       scanner.setRange(new KeyExtent(Table.ID.of("1"), null, null).toMetadataRange());
       for (Entry<Key,Value> entry : scanner) {
         if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
@@ -175,19 +178,19 @@ public class CloneIT extends AccumuloClusterHarness {
   // test split where files of children differ... like majc and split occurred
   @Test
   public void testSplit2() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    BatchWriter bw1 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw1 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     bw1.addMutation(createTablet("0", null, null, "/default_tablet", "/default_tablet/0_0.rf"));
 
     bw1.flush();
 
-    BatchWriter bw2 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw2 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
-    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client, bw2);
 
     bw1.addMutation(createTablet("0", "m", null, "/default_tablet", "/default_tablet/1_0.rf"));
     Mutation mut3 = createTablet("0", null, "m", "/t-1", "/default_tablet/1_0.rf");
@@ -196,18 +199,19 @@ public class CloneIT extends AccumuloClusterHarness {
 
     bw1.flush();
 
-    int rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    int rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client,
+        bw2);
 
     assertEquals(1, rc);
 
-    rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client, bw2);
 
     assertEquals(0, rc);
 
     HashSet<String> files = new HashSet<>();
     int count = 0;
 
-    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
       scanner.setRange(new KeyExtent(Table.ID.of("1"), null, null).toMetadataRange());
       for (Entry<Key,Value> entry : scanner) {
         if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
@@ -251,20 +255,20 @@ public class CloneIT extends AccumuloClusterHarness {
   // test two tablets splitting into four
   @Test
   public void testSplit3() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    BatchWriter bw1 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw1 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     bw1.addMutation(createTablet("0", "m", null, "/d1", "/d1/file1"));
     bw1.addMutation(createTablet("0", null, "m", "/d2", "/d2/file2"));
 
     bw1.flush();
 
-    BatchWriter bw2 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw2 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
-    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client, bw2);
 
     bw1.addMutation(createTablet("0", "f", null, "/d1", "/d1/file3"));
     bw1.addMutation(createTablet("0", "m", "f", "/d3", "/d1/file1"));
@@ -273,14 +277,15 @@ public class CloneIT extends AccumuloClusterHarness {
 
     bw1.flush();
 
-    int rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    int rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client,
+        bw2);
 
     assertEquals(0, rc);
 
     HashSet<String> files = new HashSet<>();
     int count = 0;
 
-    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
       scanner.setRange(new KeyExtent(Table.ID.of("1"), null, null).toMetadataRange());
       for (Entry<Key,Value> entry : scanner) {
         if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
@@ -298,20 +303,20 @@ public class CloneIT extends AccumuloClusterHarness {
   // test cloned marker
   @Test
   public void testClonedMarker() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    BatchWriter bw1 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw1 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     bw1.addMutation(createTablet("0", "m", null, "/d1", "/d1/file1"));
     bw1.addMutation(createTablet("0", null, "m", "/d2", "/d2/file2"));
 
     bw1.flush();
 
-    BatchWriter bw2 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw2 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
-    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client, bw2);
 
     bw1.addMutation(deleteTablet("0", "m", null, "/d1", "/d1/file1"));
     bw1.addMutation(deleteTablet("0", null, "m", "/d2", "/d2/file2"));
@@ -325,7 +330,8 @@ public class CloneIT extends AccumuloClusterHarness {
 
     bw1.flush();
 
-    int rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    int rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client,
+        bw2);
 
     assertEquals(1, rc);
 
@@ -337,14 +343,14 @@ public class CloneIT extends AccumuloClusterHarness {
 
     bw1.flush();
 
-    rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client, bw2);
 
     assertEquals(0, rc);
 
     HashSet<String> files = new HashSet<>();
     int count = 0;
 
-    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
       scanner.setRange(new KeyExtent(Table.ID.of("1"), null, null).toMetadataRange());
       for (Entry<Key,Value> entry : scanner) {
         if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
@@ -363,20 +369,20 @@ public class CloneIT extends AccumuloClusterHarness {
   // test two tablets splitting into four
   @Test
   public void testMerge() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    BatchWriter bw1 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw1 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     bw1.addMutation(createTablet("0", "m", null, "/d1", "/d1/file1"));
     bw1.addMutation(createTablet("0", null, "m", "/d2", "/d2/file2"));
 
     bw1.flush();
 
-    BatchWriter bw2 = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw2 = client.createBatchWriter(tableName, new BatchWriterConfig());
 
-    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client, bw2);
 
     bw1.addMutation(deleteTablet("0", "m", null, "/d1", "/d1/file1"));
     Mutation mut = createTablet("0", null, null, "/d2", "/d2/file2");
@@ -387,7 +393,7 @@ public class CloneIT extends AccumuloClusterHarness {
     bw1.flush();
 
     try {
-      MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
+      MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), client, bw2);
       fail();
     } catch (TabletDeletedException tde) {}
   }
diff --git a/test/src/main/java/org/apache/accumulo/test/CompactionRateLimitingIT.java b/test/src/main/java/org/apache/accumulo/test/CompactionRateLimitingIT.java
index 6095bfa..14b15c7 100644
--- a/test/src/main/java/org/apache/accumulo/test/CompactionRateLimitingIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CompactionRateLimitingIT.java
@@ -47,9 +47,10 @@ public class CompactionRateLimitingIT extends ConfigurableMacBase {
   public void majorCompactionsAreRateLimited() throws Exception {
     long bytesWritten = 0;
     String tableName = getUniqueNames(1)[0];
-    AccumuloClient conn = getCluster().getAccumuloClient("root", new PasswordToken(ROOT_PASSWORD));
-    conn.tableOperations().create(tableName);
-    try (BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig())) {
+    AccumuloClient client = getCluster().getAccumuloClient("root",
+        new PasswordToken(ROOT_PASSWORD));
+    client.tableOperations().create(tableName);
+    try (BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig())) {
       Random r = new SecureRandom();
       while (bytesWritten < BYTES_TO_WRITE) {
         byte[] rowKey = new byte[32];
@@ -69,10 +70,10 @@ public class CompactionRateLimitingIT extends ConfigurableMacBase {
       }
     }
 
-    conn.tableOperations().flush(tableName, null, null, true);
+    client.tableOperations().flush(tableName, null, null, true);
 
     long compactionStart = System.currentTimeMillis();
-    conn.tableOperations().compact(tableName, null, null, false, true);
+    client.tableOperations().compact(tableName, null, null, false, true);
     long duration = System.currentTimeMillis() - compactionStart;
     // The rate will be "bursty", try to account for that by taking 80% of the expected rate (allow
     // for 20% under the maximum expected duration)
diff --git a/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java b/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
index 161189f..b889a3c 100644
--- a/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
@@ -135,26 +135,26 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
 
   @Before
   public void deleteUsers() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
-    Set<String> users = conn.securityOperations().listLocalUsers();
+    AccumuloClient client = getAccumuloClient();
+    Set<String> users = client.securityOperations().listLocalUsers();
     ClusterUser user = getUser(0);
     if (users.contains(user.getPrincipal())) {
-      conn.securityOperations().dropLocalUser(user.getPrincipal());
+      client.securityOperations().dropLocalUser(user.getPrincipal());
     }
   }
 
   @Test
   public void testBasic() throws Exception {
 
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
 
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
     try (
-        ConditionalWriter cw = conn.createConditionalWriter(tableName,
+        ConditionalWriter cw = client.createConditionalWriter(tableName,
             new ConditionalWriterConfig());
-        Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+        Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
 
       // mutation conditional on column tx:seq not existing
       ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq"));
@@ -237,7 +237,7 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   @Test
   public void testFields() throws Exception {
 
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
 
     String user = null;
@@ -246,23 +246,23 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
     user = user1.getPrincipal();
     if (saslEnabled()) {
       // The token is pointless for kerberos
-      conn.securityOperations().createLocalUser(user, null);
+      client.securityOperations().createLocalUser(user, null);
     } else {
-      conn.securityOperations().createLocalUser(user, new PasswordToken(user1.getPassword()));
+      client.securityOperations().createLocalUser(user, new PasswordToken(user1.getPassword()));
     }
 
     Authorizations auths = new Authorizations("A", "B");
 
-    conn.securityOperations().changeUserAuthorizations(user, auths);
-    conn.securityOperations().grantSystemPermission(user, SystemPermission.CREATE_TABLE);
+    client.securityOperations().changeUserAuthorizations(user, auths);
+    client.securityOperations().grantSystemPermission(user, SystemPermission.CREATE_TABLE);
 
-    conn = conn.changeUser(user, user1.getToken());
-    conn.tableOperations().create(tableName);
+    client = client.changeUser(user, user1.getToken());
+    client.tableOperations().create(tableName);
 
     try (
-        ConditionalWriter cw = conn.createConditionalWriter(tableName,
+        ConditionalWriter cw = client.createConditionalWriter(tableName,
             new ConditionalWriterConfig().setAuthorizations(auths));
-        Scanner scanner = conn.createScanner(tableName, auths)) {
+        Scanner scanner = client.createScanner(tableName, auths)) {
 
       ColumnVisibility cva = new ColumnVisibility("A");
       ColumnVisibility cvb = new ColumnVisibility("B");
@@ -342,14 +342,14 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   public void testBadColVis() throws Exception {
     // test when a user sets a col vis in a condition that can never be seen
 
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
 
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
     Authorizations auths = new Authorizations("A", "B");
 
-    conn.securityOperations().changeUserAuthorizations(getAdminPrincipal(), auths);
+    client.securityOperations().changeUserAuthorizations(getAdminPrincipal(), auths);
 
     Authorizations filteredAuths = new Authorizations("A");
 
@@ -357,7 +357,7 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
     ColumnVisibility cvb = new ColumnVisibility("B");
     ColumnVisibility cvc = new ColumnVisibility("C");
 
-    try (ConditionalWriter cw = conn.createConditionalWriter(tableName,
+    try (ConditionalWriter cw = client.createConditionalWriter(tableName,
         new ConditionalWriterConfig().setAuthorizations(filteredAuths))) {
 
       // User has authorization, but didn't include it in the writer
@@ -429,7 +429,7 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
     // test passing auths that exceed users configured auths
 
     Authorizations exceedingAuths = new Authorizations("A", "B", "D");
-    try (ConditionalWriter cw2 = conn.createConditionalWriter(tableName,
+    try (ConditionalWriter cw2 = client.createConditionalWriter(tableName,
         new ConditionalWriterConfig().setAuthorizations(exceedingAuths))) {
 
       ConditionalMutation cm8 = new ConditionalMutation("99006",
@@ -453,18 +453,18 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   public void testConstraints() throws Exception {
     // ensure constraint violations are properly reported
 
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
 
-    conn.tableOperations().create(tableName);
-    conn.tableOperations().addConstraint(tableName, AlphaNumKeyConstraint.class.getName());
-    conn.tableOperations().clone(tableName, tableName + "_clone", true, new HashMap<>(),
+    client.tableOperations().create(tableName);
+    client.tableOperations().addConstraint(tableName, AlphaNumKeyConstraint.class.getName());
+    client.tableOperations().clone(tableName, tableName + "_clone", true, new HashMap<>(),
         new HashSet<>());
 
     try (
-        ConditionalWriter cw = conn.createConditionalWriter(tableName + "_clone",
+        ConditionalWriter cw = client.createConditionalWriter(tableName + "_clone",
             new ConditionalWriterConfig());
-        Scanner scanner = conn.createScanner(tableName + "_clone", new Authorizations())) {
+        Scanner scanner = client.createScanner(tableName + "_clone", new Authorizations())) {
 
       ConditionalMutation cm0 = new ConditionalMutation("99006+", new Condition("tx", "seq"));
       cm0.put("tx", "seq", "1");
@@ -485,12 +485,13 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   @Test
   public void testIterators() throws Exception {
 
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
 
-    conn.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators());
+    client.tableOperations().create(tableName,
+        new NewTableConfiguration().withoutDefaultIterators());
 
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     Mutation m = new Mutation("ACCUMULO-1000");
     m.put("count", "comments", "1");
@@ -523,7 +524,7 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
     IteratorSetting iterConfig3 = new IteratorSetting(5, VersioningIterator.class);
     VersioningIterator.setMaxVersions(iterConfig3, 1);
 
-    try (Scanner scanner = conn.createScanner(tableName, new Authorizations())) {
+    try (Scanner scanner = client.createScanner(tableName, new Authorizations())) {
       scanner.addScanIterator(iterConfig);
       scanner.setRange(new Range("ACCUMULO-1000"));
       scanner.fetchColumn(new Text("count"), new Text("comments"));
@@ -531,7 +532,7 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
       Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
       assertEquals("3", entry.getValue().toString());
 
-      try (ConditionalWriter cw = conn.createConditionalWriter(tableName,
+      try (ConditionalWriter cw = client.createConditionalWriter(tableName,
           new ConditionalWriterConfig())) {
 
         ConditionalMutation cm0 = new ConditionalMutation("ACCUMULO-1000",
@@ -633,7 +634,7 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   public void testTableAndConditionIterators() throws Exception {
 
     // test w/ table that has iterators configured
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
 
     IteratorSetting aiConfig1 = new IteratorSetting(30, "AI1", AddingIterator.class);
@@ -643,9 +644,9 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
     IteratorSetting aiConfig3 = new IteratorSetting(40, "AI2", AddingIterator.class);
     aiConfig3.addOption("amount", "5");
 
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     Mutation m = new Mutation("ACCUMULO-1000");
     m.put("count", "comments", "6");
@@ -661,14 +662,14 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
 
     bw.close();
 
-    conn.tableOperations().attachIterator(tableName, aiConfig1, EnumSet.of(IteratorScope.scan));
-    conn.tableOperations().offline(tableName, true);
-    conn.tableOperations().online(tableName, true);
+    client.tableOperations().attachIterator(tableName, aiConfig1, EnumSet.of(IteratorScope.scan));
+    client.tableOperations().offline(tableName, true);
+    client.tableOperations().online(tableName, true);
 
     try (
-        ConditionalWriter cw = conn.createConditionalWriter(tableName,
+        ConditionalWriter cw = client.createConditionalWriter(tableName,
             new ConditionalWriterConfig());
-        Scanner scanner = conn.createScanner(tableName, new Authorizations())) {
+        Scanner scanner = client.createScanner(tableName, new Authorizations())) {
 
       ConditionalMutation cm6 = new ConditionalMutation("ACCUMULO-1000",
           new Condition("count", "comments").setValue("8"));
@@ -731,12 +732,12 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   @Test
   public void testBatch() throws Exception {
 
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
 
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    conn.securityOperations().changeUserAuthorizations(getAdminPrincipal(),
+    client.securityOperations().changeUserAuthorizations(getAdminPrincipal(),
         new Authorizations("A", "B"));
 
     ColumnVisibility cvab = new ColumnVisibility("A|B");
@@ -765,9 +766,9 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
     mutations.add(cm2);
 
     try (
-        ConditionalWriter cw = conn.createConditionalWriter(tableName,
+        ConditionalWriter cw = client.createConditionalWriter(tableName,
             new ConditionalWriterConfig().setAuthorizations(new Authorizations("A")));
-        Scanner scanner = conn.createScanner(tableName, new Authorizations("A"))) {
+        Scanner scanner = client.createScanner(tableName, new Authorizations("A"))) {
       Iterator<Result> results = cw.write(mutations.iterator());
       int count = 0;
       while (results.hasNext()) {
@@ -789,7 +790,7 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
       TreeSet<Text> splits = new TreeSet<>();
       splits.add(new Text("7"));
       splits.add(new Text("3"));
-      conn.tableOperations().addSplits(tableName, splits);
+      client.tableOperations().addSplits(tableName, splits);
 
       mutations.clear();
 
@@ -848,11 +849,11 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   @Test
   public void testBigBatch() throws Exception {
 
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
 
-    conn.tableOperations().create(tableName);
-    conn.tableOperations().addSplits(tableName, nss("2", "4", "6"));
+    client.tableOperations().create(tableName);
+    client.tableOperations().addSplits(tableName, nss("2", "4", "6"));
 
     sleepUninterruptibly(2, TimeUnit.SECONDS);
 
@@ -877,7 +878,7 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
       cml.add(cm);
     }
 
-    try (ConditionalWriter cw = conn.createConditionalWriter(tableName,
+    try (ConditionalWriter cw = client.createConditionalWriter(tableName,
         new ConditionalWriterConfig())) {
 
       Iterator<Result> results = cw.write(cml.iterator());
@@ -922,15 +923,15 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   @Test
   public void testBatchErrors() throws Exception {
 
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
 
-    conn.tableOperations().create(tableName);
-    conn.tableOperations().addConstraint(tableName, AlphaNumKeyConstraint.class.getName());
-    conn.tableOperations().clone(tableName, tableName + "_clone", true, new HashMap<>(),
+    client.tableOperations().create(tableName);
+    client.tableOperations().addConstraint(tableName, AlphaNumKeyConstraint.class.getName());
+    client.tableOperations().clone(tableName, tableName + "_clone", true, new HashMap<>(),
         new HashSet<>());
 
-    conn.securityOperations().changeUserAuthorizations(getAdminPrincipal(),
+    client.securityOperations().changeUserAuthorizations(getAdminPrincipal(),
         new Authorizations("A", "B"));
 
     ColumnVisibility cvaob = new ColumnVisibility("A|B");
@@ -938,10 +939,10 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
 
     switch ((new SecureRandom()).nextInt(3)) {
       case 1:
-        conn.tableOperations().addSplits(tableName, nss("6"));
+        client.tableOperations().addSplits(tableName, nss("6"));
         break;
       case 2:
-        conn.tableOperations().addSplits(tableName, nss("2", "95"));
+        client.tableOperations().addSplits(tableName, nss("2", "95"));
         break;
     }
 
@@ -976,9 +977,9 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
     mutations.add(cm3);
 
     try (
-        ConditionalWriter cw = conn.createConditionalWriter(tableName,
+        ConditionalWriter cw = client.createConditionalWriter(tableName,
             new ConditionalWriterConfig().setAuthorizations(new Authorizations("A")));
-        Scanner scanner = conn.createScanner(tableName, new Authorizations("A"))) {
+        Scanner scanner = client.createScanner(tableName, new Authorizations("A"))) {
       Iterator<Result> results = cw.write(mutations.iterator());
       HashSet<String> rows = new HashSet<>();
       while (results.hasNext()) {
@@ -1008,12 +1009,12 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   public void testSameRow() throws Exception {
     // test multiple mutations for same row in same batch
 
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
 
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    try (ConditionalWriter cw = conn.createConditionalWriter(tableName,
+    try (ConditionalWriter cw = client.createConditionalWriter(tableName,
         new ConditionalWriterConfig())) {
 
       ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
@@ -1137,14 +1138,14 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
     String tableName;
     ArrayList<ByteSequence> rows;
     ConditionalWriter cw;
-    AccumuloClient conn;
+    AccumuloClient client;
     AtomicBoolean failed;
 
-    public MutatorTask(String tableName, AccumuloClient conn, ArrayList<ByteSequence> rows,
+    public MutatorTask(String tableName, AccumuloClient client, ArrayList<ByteSequence> rows,
         ConditionalWriter cw, AtomicBoolean failed) {
       this.tableName = tableName;
       this.rows = rows;
-      this.conn = conn;
+      this.client = client;
       this.cw = cw;
       this.failed = failed;
     }
@@ -1152,7 +1153,7 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
     @Override
     public void run() {
       try (Scanner scanner = new IsolatedScanner(
-          conn.createScanner(tableName, Authorizations.EMPTY))) {
+          client.createScanner(tableName, Authorizations.EMPTY))) {
         Random rand = new SecureRandom();
 
         for (int i = 0; i < 20; i++) {
@@ -1195,22 +1196,22 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
     // test multiple threads using a single conditional writer
 
     String tableName = getUniqueNames(1)[0];
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
 
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
     Random rand = new SecureRandom();
 
     switch (rand.nextInt(3)) {
       case 1:
-        conn.tableOperations().addSplits(tableName, nss("4"));
+        client.tableOperations().addSplits(tableName, nss("4"));
         break;
       case 2:
-        conn.tableOperations().addSplits(tableName, nss("3", "5"));
+        client.tableOperations().addSplits(tableName, nss("3", "5"));
         break;
     }
 
-    try (ConditionalWriter cw = conn.createConditionalWriter(tableName,
+    try (ConditionalWriter cw = client.createConditionalWriter(tableName,
         new ConditionalWriterConfig())) {
 
       ArrayList<ByteSequence> rows = new ArrayList<>();
@@ -1242,7 +1243,7 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
 
       ExecutorService tp = Executors.newFixedThreadPool(5);
       for (int i = 0; i < 5; i++) {
-        tp.submit(new MutatorTask(tableName, conn, rows, cw, failed));
+        tp.submit(new MutatorTask(tableName, client, rows, cw, failed));
       }
 
       tp.shutdown();
@@ -1254,7 +1255,7 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
       assertFalse("A MutatorTask failed with an exception", failed.get());
     }
 
-    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
       RowIterator rowIter = new RowIterator(scanner);
 
       while (rowIter.hasNext()) {
@@ -1275,34 +1276,34 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   @Test
   public void testSecurity() throws Exception {
     // test against table user does not have read and/or write permissions for
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String user = null;
 
     // Create a new user
     ClusterUser user1 = getUser(0);
     user = user1.getPrincipal();
     if (saslEnabled()) {
-      conn.securityOperations().createLocalUser(user, null);
+      client.securityOperations().createLocalUser(user, null);
     } else {
-      conn.securityOperations().createLocalUser(user, new PasswordToken(user1.getPassword()));
+      client.securityOperations().createLocalUser(user, new PasswordToken(user1.getPassword()));
     }
 
     String[] tables = getUniqueNames(3);
     String table1 = tables[0], table2 = tables[1], table3 = tables[2];
 
     // Create three tables
-    conn.tableOperations().create(table1);
-    conn.tableOperations().create(table2);
-    conn.tableOperations().create(table3);
+    client.tableOperations().create(table1);
+    client.tableOperations().create(table2);
+    client.tableOperations().create(table3);
 
     // Grant R on table1, W on table2, R/W on table3
-    conn.securityOperations().grantTablePermission(user, table1, TablePermission.READ);
-    conn.securityOperations().grantTablePermission(user, table2, TablePermission.WRITE);
-    conn.securityOperations().grantTablePermission(user, table3, TablePermission.READ);
-    conn.securityOperations().grantTablePermission(user, table3, TablePermission.WRITE);
+    client.securityOperations().grantTablePermission(user, table1, TablePermission.READ);
+    client.securityOperations().grantTablePermission(user, table2, TablePermission.WRITE);
+    client.securityOperations().grantTablePermission(user, table3, TablePermission.READ);
+    client.securityOperations().grantTablePermission(user, table3, TablePermission.WRITE);
 
     // Login as the user
-    AccumuloClient conn2 = conn.changeUser(user, user1.getToken());
+    AccumuloClient conn2 = client.changeUser(user, user1.getToken());
 
     ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
     cm1.put("tx", "seq", "1");
@@ -1341,16 +1342,16 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
 
   @Test
   public void testTimeout() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
 
     String table = getUniqueNames(1)[0];
 
-    conn.tableOperations().create(table);
+    client.tableOperations().create(table);
 
     try (
-        ConditionalWriter cw = conn.createConditionalWriter(table,
+        ConditionalWriter cw = client.createConditionalWriter(table,
             new ConditionalWriterConfig().setTimeout(3, TimeUnit.SECONDS));
-        Scanner scanner = conn.createScanner(table, Authorizations.EMPTY)) {
+        Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
 
       ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
       cm1.put("tx", "seq", "1");
@@ -1393,19 +1394,19 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   @Test
   public void testDeleteTable() throws Exception {
     String table = getUniqueNames(1)[0];
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
 
     try {
-      conn.createConditionalWriter(table, new ConditionalWriterConfig());
+      client.createConditionalWriter(table, new ConditionalWriterConfig());
       fail("Creating conditional writer for table that doesn't exist should fail");
     } catch (TableNotFoundException e) {}
 
-    conn.tableOperations().create(table);
+    client.tableOperations().create(table);
 
-    try (
-        ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig())) {
+    try (ConditionalWriter cw = client.createConditionalWriter(table,
+        new ConditionalWriterConfig())) {
 
-      conn.tableOperations().delete(table);
+      client.tableOperations().delete(table);
 
       ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
       cm1.put("tx", "seq", "1");
@@ -1426,14 +1427,14 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   @Test
   public void testOffline() throws Exception {
     String table = getUniqueNames(1)[0];
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
 
-    conn.tableOperations().create(table);
+    client.tableOperations().create(table);
 
-    try (
-        ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig())) {
+    try (ConditionalWriter cw = client.createConditionalWriter(table,
+        new ConditionalWriterConfig())) {
 
-      conn.tableOperations().offline(table, true);
+      client.tableOperations().offline(table, true);
 
       ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
       cm1.put("tx", "seq", "1");
@@ -1450,7 +1451,7 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
       }
 
       try {
-        conn.createConditionalWriter(table, new ConditionalWriterConfig());
+        client.createConditionalWriter(table, new ConditionalWriterConfig());
         fail("Expected exception creating conditional writer to offline table");
       } catch (TableOfflineException e) {}
     }
@@ -1459,12 +1460,12 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   @Test
   public void testError() throws Exception {
     String table = getUniqueNames(1)[0];
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
 
-    conn.tableOperations().create(table);
+    client.tableOperations().create(table);
 
-    try (
-        ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig())) {
+    try (ConditionalWriter cw = client.createConditionalWriter(table,
+        new ConditionalWriterConfig())) {
 
       IteratorSetting iterSetting = new IteratorSetting(5, BadIterator.class);
 
@@ -1489,12 +1490,12 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
   public void testNoConditions() throws AccumuloException, AccumuloSecurityException,
       TableExistsException, TableNotFoundException {
     String table = getUniqueNames(1)[0];
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
 
-    conn.tableOperations().create(table);
+    client.tableOperations().create(table);
 
-    try (
-        ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig())) {
+    try (ConditionalWriter cw = client.createConditionalWriter(table,
+        new ConditionalWriterConfig())) {
 
       ConditionalMutation cm1 = new ConditionalMutation("r1");
       cm1.put("tx", "seq", "1");
@@ -1509,23 +1510,23 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
     // Need to add a getClientConfig() to AccumuloCluster
     Assume.assumeTrue(getClusterType() == ClusterType.MINI);
     Process tracer = null;
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     AccumuloCluster cluster = getCluster();
     MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
-    if (!conn.tableOperations().exists("trace")) {
+    if (!client.tableOperations().exists("trace")) {
       tracer = mac.exec(TraceServer.class);
-      while (!conn.tableOperations().exists("trace")) {
+      while (!client.tableOperations().exists("trace")) {
         sleepUninterruptibly(1, TimeUnit.SECONDS);
       }
     }
 
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
     DistributedTrace.enable("localhost", "testTrace", mac.getClientInfo().getProperties());
     sleepUninterruptibly(1, TimeUnit.SECONDS);
     Span root = Trace.on("traceTest");
-    try (ConditionalWriter cw = conn.createConditionalWriter(tableName,
+    try (ConditionalWriter cw = client.createConditionalWriter(tableName,
         new ConditionalWriterConfig())) {
 
       // mutation conditional on column tx:seq not exiting
@@ -1537,7 +1538,7 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
       root.stop();
     }
 
-    try (Scanner scanner = conn.createScanner("trace", Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner("trace", Authorizations.EMPTY)) {
       scanner.setRange(new Range(new Text(Long.toHexString(root.traceId()))));
       loop: while (true) {
         final StringBuilder finalBuffer = new StringBuilder();
diff --git a/test/src/main/java/org/apache/accumulo/test/ConfigurableMajorCompactionIT.java b/test/src/main/java/org/apache/accumulo/test/ConfigurableMajorCompactionIT.java
index 4e17a70..1664135 100644
--- a/test/src/main/java/org/apache/accumulo/test/ConfigurableMajorCompactionIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ConfigurableMajorCompactionIT.java
@@ -81,41 +81,41 @@ public class ConfigurableMajorCompactionIT extends ConfigurableMacBase {
 
   @Test
   public void test() throws Exception {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
-    conn.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY.getKey(),
+    client.tableOperations().create(tableName);
+    client.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY.getKey(),
         TestCompactionStrategy.class.getName());
-    writeFile(conn, tableName);
-    writeFile(conn, tableName);
-    writeFile(conn, tableName);
-    writeFile(conn, tableName);
+    writeFile(client, tableName);
+    writeFile(client, tableName);
+    writeFile(client, tableName);
+    writeFile(client, tableName);
     UtilWaitThread.sleep(2 * 1000);
-    assertEquals(4, countFiles(conn));
-    writeFile(conn, tableName);
-    int count = countFiles(conn);
+    assertEquals(4, countFiles(client));
+    writeFile(client, tableName);
+    int count = countFiles(client);
     assertTrue(count == 1 || count == 5);
     while (count != 1) {
       UtilWaitThread.sleep(250);
-      count = countFiles(conn);
+      count = countFiles(client);
     }
   }
 
-  private int countFiles(AccumuloClient conn) throws Exception {
-    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+  private int countFiles(AccumuloClient client) throws Exception {
+    try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
       s.setRange(MetadataSchema.TabletsSection.getRange());
       s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
       return Iterators.size(s.iterator());
     }
   }
 
-  private void writeFile(AccumuloClient conn, String tableName) throws Exception {
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+  private void writeFile(AccumuloClient client, String tableName) throws Exception {
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
     Mutation m = new Mutation("row");
     m.put("cf", "cq", "value");
     bw.addMutation(m);
     bw.close();
-    conn.tableOperations().flush(tableName, null, null, true);
+    client.tableOperations().flush(tableName, null, null, true);
   }
 
 }
diff --git a/test/src/main/java/org/apache/accumulo/test/ExistingMacIT.java b/test/src/main/java/org/apache/accumulo/test/ExistingMacIT.java
index 606c61d..ddad2f3 100644
--- a/test/src/main/java/org/apache/accumulo/test/ExistingMacIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ExistingMacIT.java
@@ -87,11 +87,12 @@ public class ExistingMacIT extends ConfigurableMacBase {
   @Test
   public void testExistingInstance() throws Exception {
 
-    AccumuloClient conn = getCluster().getAccumuloClient("root", new PasswordToken(ROOT_PASSWORD));
+    AccumuloClient client = getCluster().getAccumuloClient("root",
+        new PasswordToken(ROOT_PASSWORD));
 
-    conn.tableOperations().create("table1");
+    client.tableOperations().create("table1");
 
-    BatchWriter bw = conn.createBatchWriter("table1", new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter("table1", new BatchWriterConfig());
 
     Mutation m1 = new Mutation("00081");
     m1.put("math", "sqroot", "9");
@@ -100,10 +101,10 @@ public class ExistingMacIT extends ConfigurableMacBase {
     bw.addMutation(m1);
     bw.close();
 
-    conn.tableOperations().flush("table1", null, null, true);
+    client.tableOperations().flush("table1", null, null, true);
     // TODO use constants
-    conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
-    conn.tableOperations().flush(RootTable.NAME, null, null, true);
+    client.tableOperations().flush(MetadataTable.NAME, null, null, true);
+    client.tableOperations().flush(RootTable.NAME, null, null, true);
 
     Set<Entry<ServerType,Collection<ProcessReference>>> procs = getCluster().getProcesses()
         .entrySet();
@@ -119,7 +120,7 @@ public class ExistingMacIT extends ConfigurableMacBase {
         getCluster().getConfig().getSiteConfig().get(Property.INSTANCE_ZK_TIMEOUT.getKey()));
     IZooReaderWriter zrw = new ZooReaderWriterFactory().getZooReaderWriter(
         getCluster().getZooKeepers(), (int) zkTimeout, defaultConfig.get(Property.INSTANCE_SECRET));
-    final String zInstanceRoot = Constants.ZROOT + "/" + conn.getInstanceID();
+    final String zInstanceRoot = Constants.ZROOT + "/" + client.getInstanceID();
     while (!AccumuloStatus.isAccumuloOffline(zrw, zInstanceRoot)) {
       log.debug("Accumulo services still have their ZK locks held");
       Thread.sleep(1000);
@@ -141,9 +142,9 @@ public class ExistingMacIT extends ConfigurableMacBase {
     MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
     accumulo2.start();
 
-    conn = accumulo2.getAccumuloClient("root", new PasswordToken(ROOT_PASSWORD));
+    client = accumulo2.getAccumuloClient("root", new PasswordToken(ROOT_PASSWORD));
 
-    try (Scanner scanner = conn.createScanner("table1", Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner("table1", Authorizations.EMPTY)) {
       int sum = 0;
       for (Entry<Key,Value> entry : scanner) {
         sum += Integer.parseInt(entry.getValue().toString());
@@ -157,10 +158,10 @@ public class ExistingMacIT extends ConfigurableMacBase {
   @Test
   public void testExistingRunningInstance() throws Exception {
     final String table = getUniqueNames(1)[0];
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     // Ensure that a master and tserver are up so the existing instance check won't fail.
-    conn.tableOperations().create(table);
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    client.tableOperations().create(table);
+    BatchWriter bw = client.createBatchWriter(table, new BatchWriterConfig());
     Mutation m = new Mutation("foo");
     m.put("cf", "cq", "value");
     bw.addMutation(m);
diff --git a/test/src/main/java/org/apache/accumulo/test/FindMaxIT.java b/test/src/main/java/org/apache/accumulo/test/FindMaxIT.java
index 9c3d271..f366d80 100644
--- a/test/src/main/java/org/apache/accumulo/test/FindMaxIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/FindMaxIT.java
@@ -50,12 +50,12 @@ public class FindMaxIT extends AccumuloClusterHarness {
 
   @Test
   public void test1() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
 
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     bw.addMutation(nm(new byte[] {0}));
     bw.addMutation(nm(new byte[] {0, 0}));
@@ -73,7 +73,7 @@ public class FindMaxIT extends AccumuloClusterHarness {
 
     bw.close();
 
-    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
 
       ArrayList<Text> rows = new ArrayList<>();
 
@@ -82,41 +82,41 @@ public class FindMaxIT extends AccumuloClusterHarness {
       }
 
       for (int i = rows.size() - 1; i > 0; i--) {
-        Text max = conn.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, null, true,
+        Text max = client.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, null, true,
             rows.get(i), false);
         assertEquals(rows.get(i - 1), max);
 
-        max = conn.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, rows.get(i - 1),
+        max = client.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, rows.get(i - 1),
             true, rows.get(i), false);
         assertEquals(rows.get(i - 1), max);
 
-        max = conn.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, rows.get(i - 1),
+        max = client.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, rows.get(i - 1),
             false, rows.get(i), false);
         assertNull(max);
 
-        max = conn.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, null, true,
+        max = client.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, null, true,
             rows.get(i), true);
         assertEquals(rows.get(i), max);
 
-        max = conn.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, rows.get(i), true,
+        max = client.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, rows.get(i), true,
             rows.get(i), true);
         assertEquals(rows.get(i), max);
 
-        max = conn.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, rows.get(i - 1),
+        max = client.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, rows.get(i - 1),
             false, rows.get(i), true);
         assertEquals(rows.get(i), max);
 
       }
 
-      Text max = conn.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, null, true, null,
-          true);
+      Text max = client.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, null, true,
+          null, true);
       assertEquals(rows.get(rows.size() - 1), max);
 
-      max = conn.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, null, true,
+      max = client.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, null, true,
           new Text(new byte[] {0}), false);
       assertNull(max);
 
-      max = conn.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, null, true,
+      max = client.tableOperations().getMaxRow(tableName, Authorizations.EMPTY, null, true,
           new Text(new byte[] {0}), true);
       assertEquals(rows.get(0), max);
     }
diff --git a/test/src/main/java/org/apache/accumulo/test/IMMLGBenchmark.java b/test/src/main/java/org/apache/accumulo/test/IMMLGBenchmark.java
index a3752ba..d249691 100644
--- a/test/src/main/java/org/apache/accumulo/test/IMMLGBenchmark.java
+++ b/test/src/main/java/org/apache/accumulo/test/IMMLGBenchmark.java
@@ -51,7 +51,7 @@ import com.google.common.collect.Iterators;
 public class IMMLGBenchmark {
   public static void main(String[] args) throws Exception {
 
-    AccumuloClient conn = Accumulo.newClient().forInstance("test16", "localhost")
+    AccumuloClient client = Accumulo.newClient().forInstance("test16", "localhost")
         .usingPassword("root", "secret").build();
 
     int numlg = Integer.parseInt(args[0]);
@@ -65,7 +65,7 @@ public class IMMLGBenchmark {
     Map<String,Stat> stats = new TreeMap<>();
 
     for (int i = 0; i < 5; i++) {
-      runTest(conn, numlg, cfset, i > 1 ? stats : null);
+      runTest(client, numlg, cfset, i > 1 ? stats : null);
       System.out.println();
     }
 
@@ -75,27 +75,27 @@ public class IMMLGBenchmark {
 
   }
 
-  private static void runTest(AccumuloClient conn, int numlg, ArrayList<byte[]> cfset,
+  private static void runTest(AccumuloClient client, int numlg, ArrayList<byte[]> cfset,
       Map<String,Stat> stats) throws Exception {
     String table = "immlgb";
 
     try {
-      conn.tableOperations().delete(table);
+      client.tableOperations().delete(table);
     } catch (TableNotFoundException tnfe) {}
 
-    conn.tableOperations().create(table);
-    conn.tableOperations().setProperty(table, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(),
+    client.tableOperations().create(table);
+    client.tableOperations().setProperty(table, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(),
         "snappy");
 
-    setupLocalityGroups(conn, numlg, cfset, table);
+    setupLocalityGroups(client, numlg, cfset, table);
 
-    addStat(stats, "write", write(conn, cfset, table));
-    addStat(stats, "scan cf", scan(conn, cfset, table, false));
-    addStat(stats, "scan cf:cq", scan(conn, cfset, table, true));
+    addStat(stats, "write", write(client, cfset, table));
+    addStat(stats, "scan cf", scan(client, cfset, table, false));
+    addStat(stats, "scan cf:cq", scan(client, cfset, table, true));
     // TODO time reading all data
 
     long t1 = System.currentTimeMillis();
-    conn.tableOperations().flush(table, null, null, true);
+    client.tableOperations().flush(table, null, null, true);
     long t2 = System.currentTimeMillis();
 
     addStat(stats, "flush", t2 - t1);
@@ -115,9 +115,9 @@ public class IMMLGBenchmark {
     stat.addStat(wt);
   }
 
-  private static long scan(AccumuloClient conn, ArrayList<byte[]> cfset, String table, boolean cq)
+  private static long scan(AccumuloClient client, ArrayList<byte[]> cfset, String table, boolean cq)
       throws TableNotFoundException {
-    try (Scanner scanner = conn.createScanner(table, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
 
       if (!cq)
         scanner.fetchColumnFamily(new Text(cfset.get(15)));
@@ -134,13 +134,13 @@ public class IMMLGBenchmark {
     }
   }
 
-  private static long write(AccumuloClient conn, ArrayList<byte[]> cfset, String table)
+  private static long write(AccumuloClient client, ArrayList<byte[]> cfset, String table)
       throws TableNotFoundException, MutationsRejectedException {
     Random rand = new SecureRandom();
 
     byte val[] = new byte[50];
 
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(table, new BatchWriterConfig());
 
     long t1 = System.currentTimeMillis();
 
@@ -164,7 +164,7 @@ public class IMMLGBenchmark {
     return t2 - t1;
   }
 
-  private static void setupLocalityGroups(AccumuloClient conn, int numlg, ArrayList<byte[]> cfset,
+  private static void setupLocalityGroups(AccumuloClient client, int numlg, ArrayList<byte[]> cfset,
       String table) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     if (numlg > 1) {
       int numCF = cfset.size() / numlg;
@@ -181,10 +181,10 @@ public class IMMLGBenchmark {
         groups.put("lg" + (gNum++), groupCols);
       }
 
-      conn.tableOperations().setLocalityGroups(table, groups);
-      conn.tableOperations().offline(table);
+      client.tableOperations().setLocalityGroups(table, groups);
+      client.tableOperations().offline(table);
       sleepUninterruptibly(1, TimeUnit.SECONDS);
-      conn.tableOperations().online(table);
+      client.tableOperations().online(table);
     }
   }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/ImportExportIT.java b/test/src/main/java/org/apache/accumulo/test/ImportExportIT.java
index d198583..0977159 100644
--- a/test/src/main/java/org/apache/accumulo/test/ImportExportIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ImportExportIT.java
@@ -68,13 +68,13 @@ public class ImportExportIT extends AccumuloClusterHarness {
 
   @Test
   public void testExportImportThenScan() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
 
     String[] tableNames = getUniqueNames(2);
     String srcTable = tableNames[0], destTable = tableNames[1];
-    conn.tableOperations().create(srcTable);
+    client.tableOperations().create(srcTable);
 
-    BatchWriter bw = conn.createBatchWriter(srcTable, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(srcTable, new BatchWriterConfig());
     for (int row = 0; row < 1000; row++) {
       Mutation m = new Mutation(Integer.toString(row));
       for (int col = 0; col < 100; col++) {
@@ -85,7 +85,7 @@ public class ImportExportIT extends AccumuloClusterHarness {
 
     bw.close();
 
-    conn.tableOperations().compact(srcTable, null, null, true, true);
+    client.tableOperations().compact(srcTable, null, null, true, true);
 
     // Make a directory we can use to throw the export and import directories
     // Must exist on the filesystem the cluster is running.
@@ -109,9 +109,9 @@ public class ImportExportIT extends AccumuloClusterHarness {
     log.info("Importing table from {}", importDir);
 
     // Offline the table
-    conn.tableOperations().offline(srcTable, true);
+    client.tableOperations().offline(srcTable, true);
     // Then export it
-    conn.tableOperations().exportTable(srcTable, exportDir.toString());
+    client.tableOperations().exportTable(srcTable, exportDir.toString());
 
     // Make sure the distcp.txt file that exporttable creates is available
     Path distcp = new Path(exportDir, "distcp.txt");
@@ -135,16 +135,16 @@ public class ImportExportIT extends AccumuloClusterHarness {
     log.info("Import dir: {}", Arrays.toString(fs.listStatus(importDir)));
 
     // Import the exported data into a new table
-    conn.tableOperations().importTable(destTable, importDir.toString());
+    client.tableOperations().importTable(destTable, importDir.toString());
 
     // Get the table ID for the table that the importtable command created
-    final String tableId = conn.tableOperations().tableIdMap().get(destTable);
+    final String tableId = client.tableOperations().tableIdMap().get(destTable);
     assertNotNull(tableId);
 
     // Get all `file` colfams from the metadata table for the new table
     log.info("Imported into table with ID: {}", tableId);
 
-    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+    try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
       s.setRange(MetadataSchema.TabletsSection
           .getRange(org.apache.accumulo.core.client.impl.Table.ID.of(tableId)));
       s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
@@ -171,15 +171,15 @@ public class ImportExportIT extends AccumuloClusterHarness {
 
     }
     // Online the original table before we verify equivalence
-    conn.tableOperations().online(srcTable, true);
+    client.tableOperations().online(srcTable, true);
 
-    verifyTableEquality(conn, srcTable, destTable);
+    verifyTableEquality(client, srcTable, destTable);
   }
 
-  private void verifyTableEquality(AccumuloClient conn, String srcTable, String destTable)
+  private void verifyTableEquality(AccumuloClient client, String srcTable, String destTable)
       throws Exception {
-    Iterator<Entry<Key,Value>> src = conn.createScanner(srcTable, Authorizations.EMPTY).iterator(),
-        dest = conn.createScanner(destTable, Authorizations.EMPTY).iterator();
+    Iterator<Entry<Key,Value>> src = client.createScanner(srcTable, Authorizations.EMPTY)
+        .iterator(), dest = client.createScanner(destTable, Authorizations.EMPTY).iterator();
     assertTrue("Could not read any data from source table", src.hasNext());
     assertTrue("Could not read any data from destination table", dest.hasNext());
     while (src.hasNext() && dest.hasNext()) {
diff --git a/test/src/main/java/org/apache/accumulo/test/InterruptibleScannersIT.java b/test/src/main/java/org/apache/accumulo/test/InterruptibleScannersIT.java
index f48ae21..04c614b 100644
--- a/test/src/main/java/org/apache/accumulo/test/InterruptibleScannersIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/InterruptibleScannersIT.java
@@ -51,11 +51,11 @@ public class InterruptibleScannersIT extends AccumuloClusterHarness {
   public void test() throws Exception {
     // make a table
     final String tableName = getUniqueNames(1)[0];
-    final AccumuloClient conn = getAccumuloClient();
-    conn.tableOperations().create(tableName);
+    final AccumuloClient client = getAccumuloClient();
+    client.tableOperations().create(tableName);
 
     // make the world's slowest scanner
-    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
       final IteratorSetting cfg = new IteratorSetting(100, SlowIterator.class);
       // Wait long enough to be sure we can catch it, but not indefinitely.
       SlowIterator.setSeekSleepTime(cfg, 60 * 1000);
@@ -67,10 +67,10 @@ public class InterruptibleScannersIT extends AccumuloClusterHarness {
         public void run() {
           try {
             // ensure the scan is running: not perfect, the metadata tables could be scanned, too.
-            String tserver = conn.instanceOperations().getTabletServers().iterator().next();
+            String tserver = client.instanceOperations().getTabletServers().iterator().next();
             do {
               ArrayList<ActiveScan> scans = new ArrayList<>(
-                  conn.instanceOperations().getActiveScans(tserver));
+                  client.instanceOperations().getActiveScans(tserver));
               Iterator<ActiveScan> iter = scans.iterator();
               while (iter.hasNext()) {
                 ActiveScan scan = iter.next();
diff --git a/test/src/main/java/org/apache/accumulo/test/IsolationAndDeepCopyIT.java b/test/src/main/java/org/apache/accumulo/test/IsolationAndDeepCopyIT.java
index 1006bab..7a9bbf2 100644
--- a/test/src/main/java/org/apache/accumulo/test/IsolationAndDeepCopyIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/IsolationAndDeepCopyIT.java
@@ -46,11 +46,11 @@ public class IsolationAndDeepCopyIT extends AccumuloClusterHarness {
     // test bug fox for ACCUMULO-3977
 
     String table = super.getUniqueNames(1)[0];
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
 
-    conn.tableOperations().create(table);
+    client.tableOperations().create(table);
 
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(table, new BatchWriterConfig());
 
     addDocument(bw, "000A", "dog", "cat", "hamster", "iguana", "the");
     addDocument(bw, "000B", "java", "perl", "C++", "pascal", "the");
@@ -60,14 +60,14 @@ public class IsolationAndDeepCopyIT extends AccumuloClusterHarness {
     bw.close();
 
     // its a bug when using rfiles, so flush
-    conn.tableOperations().flush(table, null, null, true);
+    client.tableOperations().flush(table, null, null, true);
 
     IteratorSetting iterCfg = new IteratorSetting(30, "ayeaye",
         IntersectingIterator.class.getName());
     IntersectingIterator.setColumnFamilies(iterCfg,
         new Text[] {new Text("the"), new Text("hamster")});
 
-    try (Scanner scanner = conn.createScanner(table, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
       scanner.enableIsolation();
       scanner.addScanIterator(iterCfg);
 
diff --git a/test/src/main/java/org/apache/accumulo/test/KeyValueEqualityIT.java b/test/src/main/java/org/apache/accumulo/test/KeyValueEqualityIT.java
index 19b6fc5..96d96d8 100644
--- a/test/src/main/java/org/apache/accumulo/test/KeyValueEqualityIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/KeyValueEqualityIT.java
@@ -42,17 +42,17 @@ public class KeyValueEqualityIT extends AccumuloClusterHarness {
 
   @Test
   public void testEquality() throws Exception {
-    AccumuloClient conn = this.getAccumuloClient();
+    AccumuloClient client = this.getAccumuloClient();
     final BatchWriterConfig config = new BatchWriterConfig();
 
     final String[] tables = getUniqueNames(2);
     final String table1 = tables[0], table2 = tables[1];
-    final TableOperations tops = conn.tableOperations();
+    final TableOperations tops = client.tableOperations();
     tops.create(table1);
     tops.create(table2);
 
-    final BatchWriter bw1 = conn.createBatchWriter(table1, config),
-        bw2 = conn.createBatchWriter(table2, config);
+    final BatchWriter bw1 = client.createBatchWriter(table1, config),
+        bw2 = client.createBatchWriter(table2, config);
 
     for (int row = 0; row < 100; row++) {
       Mutation m = new Mutation(Integer.toString(row));
@@ -66,8 +66,8 @@ public class KeyValueEqualityIT extends AccumuloClusterHarness {
     bw1.close();
     bw2.close();
 
-    Iterator<Entry<Key,Value>> t1 = conn.createScanner(table1, Authorizations.EMPTY).iterator(),
-        t2 = conn.createScanner(table2, Authorizations.EMPTY).iterator();
+    Iterator<Entry<Key,Value>> t1 = client.createScanner(table1, Authorizations.EMPTY).iterator(),
+        t2 = client.createScanner(table2, Authorizations.EMPTY).iterator();
     while (t1.hasNext() && t2.hasNext()) {
       // KeyValue, the implementation of Entry<Key,Value>, should support equality and hashCode
       // properly
diff --git a/test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java b/test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java
index 44777c5..931f674 100644
--- a/test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java
@@ -67,12 +67,13 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
 
     // make a table and lower the TABLE_END_ROW_MAX_SIZE property
     final String tableName = getUniqueNames(1)[0];
-    final AccumuloClient conn = getClient();
-    conn.tableOperations().create(tableName);
-    conn.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
+    final AccumuloClient client = getClient();
+    client.tableOperations().create(tableName);
+    client.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(),
+        "1000");
 
     // Create a BatchWriter and add a mutation to the table
-    BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter batchWriter = client.createBatchWriter(tableName, new BatchWriterConfig());
     Mutation m = new Mutation("Row");
     m.put("cf", "cq", "value");
     batchWriter.addMutation(m);
@@ -89,14 +90,14 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
 
     // try to add the split point that is too large, if the split point is created the test fails.
     try {
-      conn.tableOperations().addSplits(tableName, partitionKeys);
+      client.tableOperations().addSplits(tableName, partitionKeys);
       fail();
     } catch (AccumuloServerException e) {}
 
     // Make sure that the information that was written to the table before we tried to add the split
     // point is still correct
     int counter = 0;
-    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
       for (Entry<Key,Value> entry : scanner) {
         counter++;
         Key k = entry.getKey();
@@ -118,19 +119,20 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
 
     // make a table and lower the configure properties
     final String tableName = getUniqueNames(1)[0];
-    final AccumuloClient conn = getClient();
-    conn.tableOperations().create(tableName);
-    conn.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
-    conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(),
+    final AccumuloClient client = getClient();
+    client.tableOperations().create(tableName);
+    client.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+    client.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(),
         "none");
-    conn.tableOperations().setProperty(tableName,
+    client.tableOperations().setProperty(tableName,
         Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64");
-    conn.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
+    client.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(),
+        "1000");
 
     // Create a BatchWriter and key for a table entry that is longer than the allowed size for an
     // end row
     // Fill this key with all m's except the last spot
-    BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter batchWriter = client.createBatchWriter(tableName, new BatchWriterConfig());
     byte data[] = new byte[(int) (ConfigurationTypeHelper
         .getFixedMemoryAsBytes(Property.TABLE_MAX_END_ROW_SIZE.getDefaultValue()) + 2)];
     for (int i = 0; i < data.length - 1; i++) {
@@ -147,12 +149,12 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
     // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time
     // for the table to split if need be.
     batchWriter.close();
-    conn.tableOperations().flush(tableName, new Text(), new Text("z"), true);
+    client.tableOperations().flush(tableName, new Text(), new Text("z"), true);
     Thread.sleep(500);
 
     // Make sure all the data that was put in the table is still correct
     int count = 0;
-    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
       for (Entry<Key,Value> entry : scanner) {
         Key k = entry.getKey();
         data[data.length - 1] = (byte) count;
@@ -167,7 +169,7 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
     assertEquals(250, count);
 
     // Make sure no splits occurred in the table
-    assertEquals(0, conn.tableOperations().listSplits(tableName).size());
+    assertEquals(0, client.tableOperations().listSplits(tableName).size());
   }
 
   // 10 0's; 10 2's; 10 4's... 10 30's etc
@@ -191,10 +193,10 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
     log.info("Split later");
     automaticSplit(15, 1);
 
-    final AccumuloClient conn = getClient();
+    final AccumuloClient client = getClient();
 
     String tableName = new String();
-    java.util.Iterator<String> iterator = conn.tableOperations().list().iterator();
+    java.util.Iterator<String> iterator = client.tableOperations().list().iterator();
 
     while (iterator.hasNext()) {
       String curr = iterator.next();
@@ -205,7 +207,7 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
 
     // Create a BatchWriter and key for a table entry that is longer than the allowed size for an
     // end row
-    BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter batchWriter = client.createBatchWriter(tableName, new BatchWriterConfig());
     byte data[] = new byte[10];
 
     // Fill key with all j's except for last spot which alternates through 1 through 10 for every j
@@ -225,31 +227,32 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
     // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time
     // for the table to split if need be.
     batchWriter.close();
-    conn.tableOperations().flush(tableName, new Text(), new Text("z"), true);
+    client.tableOperations().flush(tableName, new Text(), new Text("z"), true);
 
     // Make sure a split occurs
-    while (conn.tableOperations().listSplits(tableName).size() == 0) {
+    while (client.tableOperations().listSplits(tableName).size() == 0) {
       Thread.sleep(250);
     }
 
-    assertTrue(0 < conn.tableOperations().listSplits(tableName).size());
+    assertTrue(0 < client.tableOperations().listSplits(tableName).size());
   }
 
   private void automaticSplit(int max, int spacing) throws Exception {
     // make a table and lower the configure properties
     final String tableName = getUniqueNames(1)[0];
-    final AccumuloClient conn = getClient();
-    conn.tableOperations().create(tableName);
-    conn.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
-    conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(),
+    final AccumuloClient client = getClient();
+    client.tableOperations().create(tableName);
+    client.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+    client.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(),
         "none");
-    conn.tableOperations().setProperty(tableName,
+    client.tableOperations().setProperty(tableName,
         Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64");
-    conn.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
+    client.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(),
+        "1000");
 
     // Create a BatchWriter and key for a table entry that is longer than the allowed size for an
     // end row
-    BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter batchWriter = client.createBatchWriter(tableName, new BatchWriterConfig());
     byte data[] = new byte[(int) (ConfigurationTypeHelper
         .getFixedMemoryAsBytes(Property.TABLE_MAX_END_ROW_SIZE.getDefaultValue()) + 2)];
 
@@ -270,13 +273,13 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
     // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time
     // for the table to split if need be.
     batchWriter.close();
-    conn.tableOperations().flush(tableName, new Text(), new Text("z"), true);
+    client.tableOperations().flush(tableName, new Text(), new Text("z"), true);
     Thread.sleep(500);
 
     // Make sure all the data that was put in the table is still correct
     int count = 0;
     int extra = 10;
-    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
       for (Entry<Key,Value> entry : scanner) {
         if (extra == 10) {
           extra = 0;
@@ -300,7 +303,7 @@ public class LargeSplitRowIT extends ConfigurableMacBase {
     assertEquals(max, count);
 
     // Make sure no splits occurred in the table
-    assertEquals(0, conn.tableOperations().listSplits(tableName).size());
+    assertEquals(0, client.tableOperations().listSplits(tableName).size());
   }
 
 }
diff --git a/test/src/main/java/org/apache/accumulo/test/LocatorIT.java b/test/src/main/java/org/apache/accumulo/test/LocatorIT.java
index b0408ab..4efb9a6 100644
--- a/test/src/main/java/org/apache/accumulo/test/LocatorIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/LocatorIT.java
@@ -86,15 +86,15 @@ public class LocatorIT extends AccumuloClusterHarness {
 
   @Test
   public void testBasic() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
 
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
     Range r1 = new Range("m");
     Range r2 = new Range("o", "x");
 
-    String tableId = conn.tableOperations().tableIdMap().get(tableName);
+    String tableId = client.tableOperations().tableIdMap().get(tableName);
 
     TabletId t1 = newTabletId(tableId, null, null);
     TabletId t2 = newTabletId(tableId, "r", null);
@@ -102,40 +102,40 @@ public class LocatorIT extends AccumuloClusterHarness {
 
     ArrayList<Range> ranges = new ArrayList<>();
 
-    HashSet<String> tservers = new HashSet<>(conn.instanceOperations().getTabletServers());
+    HashSet<String> tservers = new HashSet<>(client.instanceOperations().getTabletServers());
 
     ranges.add(r1);
-    Locations ret = conn.tableOperations().locate(tableName, ranges);
+    Locations ret = client.tableOperations().locate(tableName, ranges);
     assertContains(ret, tservers, ImmutableMap.of(r1, ImmutableSet.of(t1)),
         ImmutableMap.of(t1, ImmutableSet.of(r1)));
 
     ranges.add(r2);
-    ret = conn.tableOperations().locate(tableName, ranges);
+    ret = client.tableOperations().locate(tableName, ranges);
     assertContains(ret, tservers, ImmutableMap.of(r1, ImmutableSet.of(t1), r2, ImmutableSet.of(t1)),
         ImmutableMap.of(t1, ImmutableSet.of(r1, r2)));
 
     TreeSet<Text> splits = new TreeSet<>();
     splits.add(new Text("r"));
-    conn.tableOperations().addSplits(tableName, splits);
+    client.tableOperations().addSplits(tableName, splits);
 
-    ret = conn.tableOperations().locate(tableName, ranges);
+    ret = client.tableOperations().locate(tableName, ranges);
     assertContains(ret, tservers,
         ImmutableMap.of(r1, ImmutableSet.of(t2), r2, ImmutableSet.of(t2, t3)),
         ImmutableMap.of(t2, ImmutableSet.of(r1, r2), t3, ImmutableSet.of(r2)));
 
-    conn.tableOperations().offline(tableName, true);
+    client.tableOperations().offline(tableName, true);
 
     try {
-      conn.tableOperations().locate(tableName, ranges);
+      client.tableOperations().locate(tableName, ranges);
       fail();
     } catch (TableOfflineException e) {
       // expected
     }
 
-    conn.tableOperations().delete(tableName);
+    client.tableOperations().delete(tableName);
 
     try {
-      conn.tableOperations().locate(tableName, ranges);
+      client.tableOperations().locate(tableName, ranges);
       fail();
     } catch (TableNotFoundException e) {
       // expected
diff --git a/test/src/main/java/org/apache/accumulo/test/MetaSplitIT.java b/test/src/main/java/org/apache/accumulo/test/MetaSplitIT.java
index b20239d..1c07dbc 100644
--- a/test/src/main/java/org/apache/accumulo/test/MetaSplitIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/MetaSplitIT.java
@@ -52,15 +52,15 @@ public class MetaSplitIT extends AccumuloClusterHarness {
   @Before
   public void saveMetadataSplits() throws Exception {
     if (ClusterType.STANDALONE == getClusterType()) {
-      AccumuloClient conn = getAccumuloClient();
-      Collection<Text> splits = conn.tableOperations().listSplits(MetadataTable.NAME);
+      AccumuloClient client = getAccumuloClient();
+      Collection<Text> splits = client.tableOperations().listSplits(MetadataTable.NAME);
       // We expect a single split
       if (!splits.equals(Arrays.asList(new Text("~")))) {
         log.info("Existing splits on metadata table. Saving them, and applying"
             + " single original split of '~'");
         metadataSplits = splits;
-        conn.tableOperations().merge(MetadataTable.NAME, null, null);
-        conn.tableOperations().addSplits(MetadataTable.NAME,
+        client.tableOperations().merge(MetadataTable.NAME, null, null);
+        client.tableOperations().addSplits(MetadataTable.NAME,
             new TreeSet<>(Collections.singleton(new Text("~"))));
       }
     }
@@ -70,9 +70,9 @@ public class MetaSplitIT extends AccumuloClusterHarness {
   public void restoreMetadataSplits() throws Exception {
     if (null != metadataSplits) {
       log.info("Restoring split on metadata table");
-      AccumuloClient conn = getAccumuloClient();
-      conn.tableOperations().merge(MetadataTable.NAME, null, null);
-      conn.tableOperations().addSplits(MetadataTable.NAME, new TreeSet<>(metadataSplits));
+      AccumuloClient client = getAccumuloClient();
+      client.tableOperations().merge(MetadataTable.NAME, null, null);
+      client.tableOperations().addSplits(MetadataTable.NAME, new TreeSet<>(metadataSplits));
     }
   }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java b/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
index f6e8216..6180060 100644
--- a/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
@@ -78,11 +78,11 @@ public class MissingWalHeaderCompletesRecoveryIT extends ConfigurableMacBase {
 
   @Before
   public void setupMetadataPermission() throws Exception {
-    AccumuloClient conn = getClient();
-    rootHasWritePermission = conn.securityOperations().hasTablePermission("root",
+    AccumuloClient client = getClient();
+    rootHasWritePermission = client.securityOperations().hasTablePermission("root",
         MetadataTable.NAME, TablePermission.WRITE);
     if (!rootHasWritePermission) {
-      conn.securityOperations().grantTablePermission("root", MetadataTable.NAME,
+      client.securityOperations().grantTablePermission("root", MetadataTable.NAME,
           TablePermission.WRITE);
       // Make sure it propagates through ZK
       Thread.sleep(5000);
@@ -91,17 +91,17 @@ public class MissingWalHeaderCompletesRecoveryIT extends ConfigurableMacBase {
 
   @After
   public void resetMetadataPermission() throws Exception {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     // Final state doesn't match the original
-    if (rootHasWritePermission != conn.securityOperations().hasTablePermission("root",
+    if (rootHasWritePermission != client.securityOperations().hasTablePermission("root",
         MetadataTable.NAME, TablePermission.WRITE)) {
       if (rootHasWritePermission) {
         // root had write permission when starting, ensure root still does
-        conn.securityOperations().grantTablePermission("root", MetadataTable.NAME,
+        client.securityOperations().grantTablePermission("root", MetadataTable.NAME,
             TablePermission.WRITE);
       } else {
         // root did not have write permission when starting, ensure that it does not
-        conn.securityOperations().revokeTablePermission("root", MetadataTable.NAME,
+        client.securityOperations().revokeTablePermission("root", MetadataTable.NAME,
             TablePermission.WRITE);
       }
     }
@@ -109,7 +109,7 @@ public class MissingWalHeaderCompletesRecoveryIT extends ConfigurableMacBase {
 
   @Test
   public void testEmptyWalRecoveryCompletes() throws Exception {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     MiniAccumuloClusterImpl cluster = getCluster();
     FileSystem fs = cluster.getFileSystem();
 
@@ -125,20 +125,20 @@ public class MissingWalHeaderCompletesRecoveryIT extends ConfigurableMacBase {
     fs.create(new Path(emptyWalog.toURI())).close();
 
     assertTrue("root user did not have write permission to metadata table",
-        conn.securityOperations().hasTablePermission("root", MetadataTable.NAME,
+        client.securityOperations().hasTablePermission("root", MetadataTable.NAME,
             TablePermission.WRITE));
 
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
+    Table.ID tableId = Table.ID.of(client.tableOperations().tableIdMap().get(tableName));
     assertNotNull("Table ID was null", tableId);
 
     LogEntry logEntry = new LogEntry(new KeyExtent(tableId, null, null), 0, "127.0.0.1:12345",
         emptyWalog.toURI().toString());
 
     log.info("Taking {} offline", tableName);
-    conn.tableOperations().offline(tableName, true);
+    client.tableOperations().offline(tableName, true);
 
     log.info("{} is offline", tableName);
 
@@ -146,25 +146,25 @@ public class MissingWalHeaderCompletesRecoveryIT extends ConfigurableMacBase {
     Mutation m = new Mutation(row);
     m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
 
-    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     bw.addMutation(m);
     bw.close();
 
     log.info("Bringing {} online", tableName);
-    conn.tableOperations().online(tableName, true);
+    client.tableOperations().online(tableName, true);
 
     log.info("{} is online", tableName);
 
     // Reading the table implies that recovery completed successfully (the empty file was ignored)
     // otherwise the tablet will never come online and we won't be able to read it.
-    try (Scanner s = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner s = client.createScanner(tableName, Authorizations.EMPTY)) {
       assertEquals(0, Iterables.size(s));
     }
   }
 
   @Test
   public void testPartialHeaderWalRecoveryCompletes() throws Exception {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     MiniAccumuloClusterImpl cluster = getCluster();
     FileSystem fs = getCluster().getFileSystem();
 
@@ -184,20 +184,20 @@ public class MissingWalHeaderCompletesRecoveryIT extends ConfigurableMacBase {
     wal.close();
 
     assertTrue("root user did not have write permission to metadata table",
-        conn.securityOperations().hasTablePermission("root", MetadataTable.NAME,
+        client.securityOperations().hasTablePermission("root", MetadataTable.NAME,
             TablePermission.WRITE));
 
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
+    Table.ID tableId = Table.ID.of(client.tableOperations().tableIdMap().get(tableName));
     assertNotNull("Table ID was null", tableId);
 
     LogEntry logEntry = new LogEntry(null, 0, "127.0.0.1:12345",
         partialHeaderWalog.toURI().toString());
 
     log.info("Taking {} offline", tableName);
-    conn.tableOperations().offline(tableName, true);
+    client.tableOperations().offline(tableName, true);
 
     log.info("{} is offline", tableName);
 
@@ -205,18 +205,18 @@ public class MissingWalHeaderCompletesRecoveryIT extends ConfigurableMacBase {
     Mutation m = new Mutation(row);
     m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
 
-    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     bw.addMutation(m);
     bw.close();
 
     log.info("Bringing {} online", tableName);
-    conn.tableOperations().online(tableName, true);
+    client.tableOperations().online(tableName, true);
 
     log.info("{} is online", tableName);
 
     // Reading the table implies that recovery completed successfully (the empty file was ignored)
     // otherwise the tablet will never come online and we won't be able to read it.
-    try (Scanner s = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner s = client.createScanner(tableName, Authorizations.EMPTY)) {
       assertEquals(0, Iterables.size(s));
     }
   }
diff --git a/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java b/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
index 5420d2d..80c17ca 100644
--- a/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
@@ -104,7 +104,7 @@ public class NamespacesIT extends AccumuloClusterHarness {
   public void setupConnectorAndNamespace() throws Exception {
     Assume.assumeTrue(ClusterType.MINI == getClusterType());
 
-    // prepare a unique namespace and get a new root connector for each test
+    // prepare a unique namespace and get a new root client for each test
     c = getAccumuloClient();
     namespace = "ns_" + getUniqueNames(1)[0];
   }
diff --git a/test/src/main/java/org/apache/accumulo/test/NewTableConfigurationIT.java b/test/src/main/java/org/apache/accumulo/test/NewTableConfigurationIT.java
index 3d0657a..9902c81 100644
--- a/test/src/main/java/org/apache/accumulo/test/NewTableConfigurationIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/NewTableConfigurationIT.java
@@ -80,7 +80,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testSetPropertiesOverwriteOlderProperties() throws AccumuloSecurityException,
       AccumuloException, TableExistsException, TableNotFoundException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
     NewTableConfiguration ntc = new NewTableConfiguration();
     Map<String,String> initialProps = new HashMap<>();
@@ -92,7 +92,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     updatedProps.put(Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "newerprop1", "newerval1");
     updatedProps.put(Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "newerprop2", "newerval2");
     ntc.setProperties(updatedProps);
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
     // verify
     Map<String,String> props = ntc.getProperties();
     assertEquals(props.get(Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "newerprop1"),
@@ -125,7 +125,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testSimpleLocalityGroupCreation() throws AccumuloSecurityException, AccumuloException,
       TableExistsException, TableNotFoundException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
     NewTableConfiguration ntc = new NewTableConfiguration();
     // set locality groups map
@@ -134,9 +134,9 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     lgroups.put("lg2", ImmutableSet.of(new Text("lion"), new Text("tiger")));
     // set groups via NewTableConfiguration
     ntc.setLocalityGroups(lgroups);
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
     // verify
-    Map<String,Set<Text>> createdLocalityGroups = conn.tableOperations()
+    Map<String,Set<Text>> createdLocalityGroups = client.tableOperations()
         .getLocalityGroups(tableName);
     assertEquals(2, createdLocalityGroups.size());
     assertEquals(createdLocalityGroups.get("lg1"),
@@ -151,7 +151,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testMulitpleCallsToSetLocalityGroups() throws AccumuloSecurityException,
       AccumuloException, TableExistsException, TableNotFoundException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
     NewTableConfiguration ntc = new NewTableConfiguration();
     // set first locality groups map
@@ -162,9 +162,9 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     Map<String,Set<Text>> secondGroup = new HashMap<>();
     secondGroup.put("lg1", ImmutableSet.of(new Text("blue"), new Text("red")));
     ntc.setLocalityGroups(secondGroup);
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
     // verify
-    Map<String,Set<Text>> createdLocalityGroups = conn.tableOperations()
+    Map<String,Set<Text>> createdLocalityGroups = client.tableOperations()
         .getLocalityGroups(tableName);
     assertEquals(1, createdLocalityGroups.size());
     assertEquals(createdLocalityGroups.get("lg1"),
@@ -177,7 +177,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testSetPropertiesAndGroups() throws AccumuloSecurityException, AccumuloException,
       TableExistsException, TableNotFoundException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
     NewTableConfiguration ntc = new NewTableConfiguration();
 
@@ -189,10 +189,10 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     Map<String,Set<Text>> lgroups = new HashMap<>();
     lgroups.put("lg1", ImmutableSet.of(new Text("dog")));
     ntc.setLocalityGroups(lgroups);
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
     // verify
     int count = 0;
-    for (Entry<String,String> property : conn.tableOperations().getProperties(tableName)) {
+    for (Entry<String,String> property : client.tableOperations().getProperties(tableName)) {
       if (property.getKey().equals("table.group.lg1")) {
         assertEquals(property.getValue(), "dog");
         count++;
@@ -211,7 +211,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
       }
     }
     assertEquals(4, count);
-    Map<String,Set<Text>> createdLocalityGroups = conn.tableOperations()
+    Map<String,Set<Text>> createdLocalityGroups = client.tableOperations()
         .getLocalityGroups(tableName);
     assertEquals(1, createdLocalityGroups.size());
     assertEquals(createdLocalityGroups.get("lg1"), ImmutableSet.of(new Text("dog")));
@@ -243,20 +243,21 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testSetGroupsWithoutDefaultIterators() throws AccumuloSecurityException,
       AccumuloException, TableExistsException, TableNotFoundException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
     NewTableConfiguration ntc = new NewTableConfiguration().withoutDefaultIterators();
 
     Map<String,Set<Text>> lgroups = new HashMap<>();
     lgroups.put("lg1", ImmutableSet.of(new Text("colF")));
     ntc.setLocalityGroups(lgroups);
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
     // verify groups and verify no iterators
-    Map<String,Set<Text>> createdLocalityGroups = conn.tableOperations()
+    Map<String,Set<Text>> createdLocalityGroups = client.tableOperations()
         .getLocalityGroups(tableName);
     assertEquals(1, createdLocalityGroups.size());
     assertEquals(createdLocalityGroups.get("lg1"), ImmutableSet.of(new Text("colF")));
-    Map<String,EnumSet<IteratorScope>> iterators = conn.tableOperations().listIterators(tableName);
+    Map<String,EnumSet<IteratorScope>> iterators = client.tableOperations()
+        .listIterators(tableName);
     assertEquals(0, iterators.size());
   }
 
@@ -267,23 +268,24 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testPreconfigureIteratorWithDefaultIterator1() throws AccumuloException,
       TableNotFoundException, AccumuloSecurityException, TableExistsException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
 
     NewTableConfiguration ntc = new NewTableConfiguration();
     ntc.attachIterator(new IteratorSetting(10, "anIterator", "it.class", Collections.emptyMap()),
         EnumSet.of(IteratorScope.scan));
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
 
-    Map<String,EnumSet<IteratorScope>> iteratorList = conn.tableOperations()
+    Map<String,EnumSet<IteratorScope>> iteratorList = client.tableOperations()
         .listIterators(tableName);
     // should count the created iterator plus the default iterator
     assertEquals(2, iteratorList.size());
-    verifyIterators(conn, tableName, new String[] {"table.iterator.scan.anIterator=10,it.class"},
+    verifyIterators(client, tableName, new String[] {"table.iterator.scan.anIterator=10,it.class"},
         true);
-    conn.tableOperations().removeIterator(tableName, "anIterator", EnumSet.of(IteratorScope.scan));
-    verifyIterators(conn, tableName, new String[] {}, true);
-    iteratorList = conn.tableOperations().listIterators(tableName);
+    client.tableOperations().removeIterator(tableName, "anIterator",
+        EnumSet.of(IteratorScope.scan));
+    verifyIterators(client, tableName, new String[] {}, true);
+    iteratorList = client.tableOperations().listIterators(tableName);
     assertEquals(1, iteratorList.size());
   }
 
@@ -294,24 +296,24 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testPreconfiguredIteratorWithDefaultIterator2() throws AccumuloException,
       TableNotFoundException, AccumuloSecurityException, TableExistsException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
 
     NewTableConfiguration ntc = new NewTableConfiguration();
     IteratorSetting setting = new IteratorSetting(10, "someName", "foo.bar");
     ntc.attachIterator(setting);
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
 
-    Map<String,EnumSet<IteratorScope>> iteratorList = conn.tableOperations()
+    Map<String,EnumSet<IteratorScope>> iteratorList = client.tableOperations()
         .listIterators(tableName);
     // should count the created iterator plus the default iterator
     assertEquals(2, iteratorList.size());
-    verifyIterators(conn, tableName, new String[] {"table.iterator.scan.someName=10,foo.bar"},
+    verifyIterators(client, tableName, new String[] {"table.iterator.scan.someName=10,foo.bar"},
         true);
-    conn.tableOperations().removeIterator(tableName, "someName",
+    client.tableOperations().removeIterator(tableName, "someName",
         EnumSet.allOf((IteratorScope.class)));
-    verifyIterators(conn, tableName, new String[] {}, true);
-    Map<String,EnumSet<IteratorScope>> iteratorList2 = conn.tableOperations()
+    verifyIterators(client, tableName, new String[] {}, true);
+    Map<String,EnumSet<IteratorScope>> iteratorList2 = client.tableOperations()
         .listIterators(tableName);
     assertEquals(1, iteratorList2.size());
   }
@@ -323,23 +325,23 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testPreconfiguredIteratorWithDefaultIterator3() throws AccumuloException,
       TableNotFoundException, AccumuloSecurityException, TableExistsException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
 
     NewTableConfiguration ntc = new NewTableConfiguration();
     IteratorSetting setting = new IteratorSetting(10, "someName", "foo.bar");
     ntc.attachIterator(setting, EnumSet.of(IteratorScope.scan));
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
 
-    verifyIterators(conn, tableName, new String[] {"table.iterator.scan.someName=10,foo.bar"},
+    verifyIterators(client, tableName, new String[] {"table.iterator.scan.someName=10,foo.bar"},
         true);
-    Map<String,EnumSet<IteratorScope>> iteratorList = conn.tableOperations()
+    Map<String,EnumSet<IteratorScope>> iteratorList = client.tableOperations()
         .listIterators(tableName);
     assertEquals(2, iteratorList.size());
     assertEquals(iteratorList.get("someName"), EnumSet.of(IteratorScope.scan));
-    conn.tableOperations().removeIterator(tableName, "someName", EnumSet.of(IteratorScope.scan));
-    verifyIterators(conn, tableName, new String[] {}, true);
-    iteratorList = conn.tableOperations().listIterators(tableName);
+    client.tableOperations().removeIterator(tableName, "someName", EnumSet.of(IteratorScope.scan));
+    verifyIterators(client, tableName, new String[] {}, true);
+    iteratorList = client.tableOperations().listIterators(tableName);
     assertEquals(1, iteratorList.size());
   }
 
@@ -349,7 +351,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testSettingInitialIteratorWithAdditionalIteratorOptions() throws AccumuloException,
       TableNotFoundException, AccumuloSecurityException, TableExistsException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
 
     NewTableConfiguration ntc = new NewTableConfiguration();
@@ -357,11 +359,11 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     setting.addOptions(Collections.singletonMap("key", "value"));
     ntc.attachIterator(setting);
 
-    conn.tableOperations().create(tableName, ntc);
-    verifyIterators(conn, tableName, new String[] {"table.iterator.scan.someName=10,foo.bar",
+    client.tableOperations().create(tableName, ntc);
+    verifyIterators(client, tableName, new String[] {"table.iterator.scan.someName=10,foo.bar",
         "table.iterator.scan.someName.opt.key=value"}, true);
-    conn.tableOperations().removeIterator(tableName, "someName", EnumSet.of(IteratorScope.scan));
-    verifyIterators(conn, tableName, new String[] {}, true);
+    client.tableOperations().removeIterator(tableName, "someName", EnumSet.of(IteratorScope.scan));
+    verifyIterators(client, tableName, new String[] {}, true);
   }
 
   /**
@@ -370,23 +372,23 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testSetIteratorWithoutDefaultIterators() throws AccumuloException,
       TableNotFoundException, AccumuloSecurityException, TableExistsException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
 
     NewTableConfiguration ntc = new NewTableConfiguration().withoutDefaultIterators();
     IteratorSetting setting = new IteratorSetting(10, "myIterator", "my.class");
     ntc.attachIterator(setting);
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
 
-    Map<String,EnumSet<IteratorScope>> iteratorList = conn.tableOperations()
+    Map<String,EnumSet<IteratorScope>> iteratorList = client.tableOperations()
         .listIterators(tableName);
     assertEquals(1, iteratorList.size());
-    verifyIterators(conn, tableName, new String[] {"table.iterator.scan.myIterator=10,my.class"},
+    verifyIterators(client, tableName, new String[] {"table.iterator.scan.myIterator=10,my.class"},
         false);
-    conn.tableOperations().removeIterator(tableName, "myIterator",
+    client.tableOperations().removeIterator(tableName, "myIterator",
         EnumSet.allOf(IteratorScope.class));
-    verifyIterators(conn, tableName, new String[] {}, false);
-    Map<String,EnumSet<IteratorScope>> iteratorList2 = conn.tableOperations()
+    verifyIterators(client, tableName, new String[] {}, false);
+    Map<String,EnumSet<IteratorScope>> iteratorList2 = client.tableOperations()
         .listIterators(tableName);
     assertEquals(0, iteratorList2.size());
   }
@@ -397,7 +399,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testSettingIteratorAndProperties() throws AccumuloException, TableNotFoundException,
       AccumuloSecurityException, TableExistsException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
 
     NewTableConfiguration ntc = new NewTableConfiguration();
@@ -409,10 +411,10 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     props.put(Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "prop2", "val2");
     ntc.setProperties(props);
 
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
 
     int count = 0;
-    for (Entry<String,String> property : conn.tableOperations().getProperties(tableName)) {
+    for (Entry<String,String> property : client.tableOperations().getProperties(tableName)) {
       if (property.getKey().equals(Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "prop1")) {
         assertEquals(property.getValue(), "val1");
         count++;
@@ -423,10 +425,10 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
       }
     }
     assertEquals(2, count);
-    verifyIterators(conn, tableName, new String[] {"table.iterator.scan.someName=10,foo.bar"},
+    verifyIterators(client, tableName, new String[] {"table.iterator.scan.someName=10,foo.bar"},
         true);
-    conn.tableOperations().removeIterator(tableName, "someName", EnumSet.of(IteratorScope.scan));
-    verifyIterators(conn, tableName, new String[] {}, true);
+    client.tableOperations().removeIterator(tableName, "someName", EnumSet.of(IteratorScope.scan));
+    verifyIterators(client, tableName, new String[] {}, true);
   }
 
   /**
@@ -435,7 +437,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test(expected = IllegalArgumentException.class)
   public void testIteratorConflictFound1() throws AccumuloException, TableNotFoundException,
       AccumuloSecurityException, TableExistsException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
 
     NewTableConfiguration ntc = new NewTableConfiguration();
@@ -443,13 +445,13 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     ntc.attachIterator(setting, EnumSet.of(IteratorScope.scan));
     setting = new IteratorSetting(12, "someName", "foo2.bar");
     ntc.attachIterator(setting, EnumSet.of(IteratorScope.scan));
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
   }
 
   @Test(expected = IllegalArgumentException.class)
   public void testIteratorConflictFound2() throws AccumuloException, TableNotFoundException,
       AccumuloSecurityException, TableExistsException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
 
     NewTableConfiguration ntc = new NewTableConfiguration();
@@ -457,13 +459,13 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     ntc.attachIterator(setting, EnumSet.of(IteratorScope.scan));
     setting = new IteratorSetting(10, "anotherName", "foo2.bar");
     ntc.attachIterator(setting, EnumSet.of(IteratorScope.scan));
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
   }
 
   @Test(expected = IllegalArgumentException.class)
   public void testIteratorConflictFound3() throws AccumuloException, TableNotFoundException,
       AccumuloSecurityException, TableExistsException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
 
     NewTableConfiguration ntc = new NewTableConfiguration();
@@ -471,7 +473,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     ntc.attachIterator(setting, EnumSet.of(IteratorScope.scan));
     setting = new IteratorSetting(12, "someName", "foo.bar");
     ntc.attachIterator(setting, EnumSet.of(IteratorScope.scan));
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
   }
 
   /**
@@ -481,7 +483,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testMultipleIteratorValid() throws AccumuloException, TableNotFoundException,
       AccumuloSecurityException, TableExistsException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
 
     NewTableConfiguration ntc = new NewTableConfiguration();
@@ -490,18 +492,18 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     setting = new IteratorSetting(11, "secondIterator", "second.class");
     ntc.attachIterator(setting, EnumSet.of(IteratorScope.scan));
 
-    conn.tableOperations().create(tableName, ntc);
-    verifyIterators(conn, tableName,
+    client.tableOperations().create(tableName, ntc);
+    verifyIterators(client, tableName,
         new String[] {"table.iterator.scan.firstIterator=10,first.class",
             "table.iterator.scan.secondIterator=11,second.class"},
         true);
-    conn.tableOperations().removeIterator(tableName, "firstIterator",
+    client.tableOperations().removeIterator(tableName, "firstIterator",
         EnumSet.of(IteratorScope.scan));
-    verifyIterators(conn, tableName,
+    verifyIterators(client, tableName,
         new String[] {"table.iterator.scan.secondIterator=11,second.class"}, true);
-    conn.tableOperations().removeIterator(tableName, "secondIterator",
+    client.tableOperations().removeIterator(tableName, "secondIterator",
         EnumSet.of(IteratorScope.scan));
-    verifyIterators(conn, tableName, new String[] {}, true);
+    verifyIterators(client, tableName, new String[] {}, true);
   }
 
   /**
@@ -510,7 +512,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testGroupsIteratorAndPropsTogether() throws AccumuloException, TableNotFoundException,
       AccumuloSecurityException, TableExistsException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
 
     NewTableConfiguration ntc = new NewTableConfiguration();
@@ -522,10 +524,10 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     Map<String,Set<Text>> lgroups = new HashMap<>();
     lgroups.put("lg1", ImmutableSet.of(new Text("colF")));
     ntc.setLocalityGroups(lgroups);
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
     // verify user table properties
     int count = 0;
-    for (Entry<String,String> property : conn.tableOperations().getProperties(tableName)) {
+    for (Entry<String,String> property : client.tableOperations().getProperties(tableName)) {
       if (property.getKey().equals(Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "prop1")) {
         assertEquals(property.getValue(), "val1");
         count++;
@@ -533,15 +535,15 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     }
     assertEquals(1, count);
     // verify locality groups
-    Map<String,Set<Text>> createdLocalityGroups = conn.tableOperations()
+    Map<String,Set<Text>> createdLocalityGroups = client.tableOperations()
         .getLocalityGroups(tableName);
     assertEquals(1, createdLocalityGroups.size());
     assertEquals(createdLocalityGroups.get("lg1"), ImmutableSet.of(new Text("colF")));
     // verify iterators
-    verifyIterators(conn, tableName, new String[] {"table.iterator.scan.someName=10,foo.bar"},
+    verifyIterators(client, tableName, new String[] {"table.iterator.scan.someName=10,foo.bar"},
         true);
-    conn.tableOperations().removeIterator(tableName, "someName", EnumSet.of(IteratorScope.scan));
-    verifyIterators(conn, tableName, new String[] {}, true);
+    client.tableOperations().removeIterator(tableName, "someName", EnumSet.of(IteratorScope.scan));
+    verifyIterators(client, tableName, new String[] {}, true);
   }
 
   /**
@@ -550,7 +552,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   @Test
   public void testNtcChaining() throws AccumuloException, AccumuloSecurityException,
       TableExistsException, TableNotFoundException {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = getUniqueNames(2)[0];
 
     IteratorSetting setting = new IteratorSetting(10, "anIterator", "it.class",
@@ -561,20 +563,21 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     NewTableConfiguration ntc = new NewTableConfiguration().withoutDefaultIterators()
         .attachIterator(setting, EnumSet.of(IteratorScope.scan)).setLocalityGroups(lgroups);
 
-    conn.tableOperations().create(tableName, ntc);
+    client.tableOperations().create(tableName, ntc);
 
-    Map<String,EnumSet<IteratorScope>> iteratorList = conn.tableOperations()
+    Map<String,EnumSet<IteratorScope>> iteratorList = client.tableOperations()
         .listIterators(tableName);
     assertEquals(1, iteratorList.size());
-    verifyIterators(conn, tableName, new String[] {"table.iterator.scan.anIterator=10,it.class"},
+    verifyIterators(client, tableName, new String[] {"table.iterator.scan.anIterator=10,it.class"},
         false);
-    conn.tableOperations().removeIterator(tableName, "anIterator", EnumSet.of(IteratorScope.scan));
-    verifyIterators(conn, tableName, new String[] {}, false);
-    iteratorList = conn.tableOperations().listIterators(tableName);
+    client.tableOperations().removeIterator(tableName, "anIterator",
+        EnumSet.of(IteratorScope.scan));
+    verifyIterators(client, tableName, new String[] {}, false);
+    iteratorList = client.tableOperations().listIterators(tableName);
     assertEquals(0, iteratorList.size());
 
     int count = 0;
-    for (Entry<String,String> property : conn.tableOperations().getProperties(tableName)) {
+    for (Entry<String,String> property : client.tableOperations().getProperties(tableName)) {
       if (property.getKey().equals("table.group.lgp")) {
         assertEquals(property.getValue(), "col");
         count++;
@@ -585,7 +588,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
       }
     }
     assertEquals(2, count);
-    Map<String,Set<Text>> createdLocalityGroups = conn.tableOperations()
+    Map<String,Set<Text>> createdLocalityGroups = client.tableOperations()
         .getLocalityGroups(tableName);
     assertEquals(1, createdLocalityGroups.size());
     assertEquals(createdLocalityGroups.get("lgp"), ImmutableSet.of(new Text("col")));
@@ -642,7 +645,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
   /**
    * Verify the expected iterator properties exist.
    */
-  private void verifyIterators(AccumuloClient conn, String tablename, String[] values,
+  private void verifyIterators(AccumuloClient client, String tablename, String[] values,
       boolean withDefaultIts) throws AccumuloException, TableNotFoundException {
     Map<String,String> expected = new TreeMap<>();
     if (withDefaultIts) {
@@ -656,7 +659,7 @@ public class NewTableConfigurationIT extends SharedMiniClusterBase {
     }
 
     Map<String,String> actual = new TreeMap<>();
-    for (Entry<String,String> entry : this.getProperties(conn, tablename).entrySet()) {
+    for (Entry<String,String> entry : this.getProperties(client, tablename).entrySet()) {
       if (entry.getKey().contains("table.iterator.scan.")) {
         actual.put(entry.getKey(), entry.getValue());
       }
diff --git a/test/src/main/java/org/apache/accumulo/test/OrIteratorIT.java b/test/src/main/java/org/apache/accumulo/test/OrIteratorIT.java
index 66a1325..dc8140d 100644
--- a/test/src/main/java/org/apache/accumulo/test/OrIteratorIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/OrIteratorIT.java
@@ -59,11 +59,11 @@ public class OrIteratorIT extends AccumuloClusterHarness {
 
   @Test
   public void testMultipleRowsInTablet() throws Exception {
-    final AccumuloClient conn = getAccumuloClient();
+    final AccumuloClient client = getAccumuloClient();
     final String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    try (BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig())) {
+    try (BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig())) {
       Mutation m = new Mutation("row1");
       m.put("bob", "2", EMPTY);
       m.put("frank", "3", EMPTY);
@@ -84,7 +84,7 @@ public class OrIteratorIT extends AccumuloClusterHarness {
     expectedData.put("frank", "3");
     expectedData.put("mort", "6");
 
-    try (BatchScanner bs = conn.createBatchScanner(tableName, Authorizations.EMPTY, 1)) {
+    try (BatchScanner bs = client.createBatchScanner(tableName, Authorizations.EMPTY, 1)) {
       Set<Range> ranges = new HashSet<>(Arrays.asList(Range.exact("row1"), Range.exact("row2")));
       bs.setRanges(ranges);
       bs.addScanIterator(is);
@@ -100,11 +100,11 @@ public class OrIteratorIT extends AccumuloClusterHarness {
 
   @Test
   public void testMultipleTablets() throws Exception {
-    final AccumuloClient conn = getAccumuloClient();
+    final AccumuloClient client = getAccumuloClient();
     final String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    try (BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig())) {
+    try (BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig())) {
       Mutation m = new Mutation("row1");
       m.put("bob", "2", EMPTY);
       m.put("frank", "3", EMPTY);
@@ -126,7 +126,7 @@ public class OrIteratorIT extends AccumuloClusterHarness {
       bw.addMutation(m);
     }
 
-    conn.tableOperations().addSplits(tableName,
+    client.tableOperations().addSplits(tableName,
         new TreeSet<>(Arrays.asList(new Text("row2"), new Text("row3"))));
 
     IteratorSetting is = new IteratorSetting(50, OrIterator.class);
@@ -136,7 +136,7 @@ public class OrIteratorIT extends AccumuloClusterHarness {
     expectedData.put("mort", "6");
     expectedData.put("nick", "3");
 
-    try (BatchScanner bs = conn.createBatchScanner(tableName, Authorizations.EMPTY, 1)) {
+    try (BatchScanner bs = client.createBatchScanner(tableName, Authorizations.EMPTY, 1)) {
       bs.setRanges(Collections.singleton(new Range()));
       bs.addScanIterator(is);
       for (Entry<Key,Value> entry : bs) {
@@ -151,12 +151,12 @@ public class OrIteratorIT extends AccumuloClusterHarness {
 
   @Test
   public void testSingleLargeRow() throws Exception {
-    final AccumuloClient conn = getAccumuloClient();
+    final AccumuloClient client = getAccumuloClient();
     final String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
-    conn.tableOperations().setProperty(tableName, Property.TABLE_SCAN_MAXMEM.getKey(), "1");
+    client.tableOperations().create(tableName);
+    client.tableOperations().setProperty(tableName, Property.TABLE_SCAN_MAXMEM.getKey(), "1");
 
-    try (BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig())) {
+    try (BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig())) {
       Mutation m = new Mutation("row1");
       m.put("bob", "02", EMPTY);
       m.put("carl", "07", EMPTY);
@@ -182,7 +182,7 @@ public class OrIteratorIT extends AccumuloClusterHarness {
     expectedData.put("nick", "12");
     expectedData.put("richard", "18");
 
-    try (BatchScanner bs = conn.createBatchScanner(tableName, Authorizations.EMPTY, 1)) {
+    try (BatchScanner bs = client.createBatchScanner(tableName, Authorizations.EMPTY, 1)) {
       bs.setRanges(Collections.singleton(new Range()));
       bs.addScanIterator(is);
       for (Entry<Key,Value> entry : bs) {
@@ -198,11 +198,11 @@ public class OrIteratorIT extends AccumuloClusterHarness {
 
   @Test
   public void testNoMatchesForTable() throws Exception {
-    final AccumuloClient conn = getAccumuloClient();
+    final AccumuloClient client = getAccumuloClient();
     final String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    try (BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig())) {
+    try (BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig())) {
       Mutation m = new Mutation("row1");
       m.put("bob", "02", EMPTY);
       m.put("carl", "07", EMPTY);
@@ -222,7 +222,7 @@ public class OrIteratorIT extends AccumuloClusterHarness {
     is.addOption(OrIterator.COLUMNS_KEY, "theresa,sally");
     Map<String,String> expectedData = Collections.emptyMap();
 
-    try (BatchScanner bs = conn.createBatchScanner(tableName, Authorizations.EMPTY, 1)) {
+    try (BatchScanner bs = client.createBatchScanner(tableName, Authorizations.EMPTY, 1)) {
       bs.setRanges(Collections.singleton(new Range()));
       bs.addScanIterator(is);
       for (Entry<Key,Value> entry : bs) {
@@ -238,11 +238,11 @@ public class OrIteratorIT extends AccumuloClusterHarness {
 
   @Test
   public void testNoMatchesInSingleTablet() throws Exception {
-    final AccumuloClient conn = getAccumuloClient();
+    final AccumuloClient client = getAccumuloClient();
     final String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    try (BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig())) {
+    try (BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig())) {
       Mutation m = new Mutation("row1");
       m.put("bob", "02", EMPTY);
       m.put("carl", "07", EMPTY);
@@ -273,10 +273,10 @@ public class OrIteratorIT extends AccumuloClusterHarness {
     expectedData.put("steve", "01");
 
     // Split each row into its own tablet
-    conn.tableOperations().addSplits(tableName,
+    client.tableOperations().addSplits(tableName,
         new TreeSet<>(Arrays.asList(new Text("row2"), new Text("row3"))));
 
-    try (BatchScanner bs = conn.createBatchScanner(tableName, Authorizations.EMPTY, 1)) {
+    try (BatchScanner bs = client.createBatchScanner(tableName, Authorizations.EMPTY, 1)) {
       bs.setRanges(Collections.singleton(new Range()));
       bs.addScanIterator(is);
       for (Entry<Key,Value> entry : bs) {
@@ -292,11 +292,11 @@ public class OrIteratorIT extends AccumuloClusterHarness {
 
   @Test
   public void testResultOrder() throws Exception {
-    final AccumuloClient conn = getAccumuloClient();
+    final AccumuloClient client = getAccumuloClient();
     final String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    try (BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig())) {
+    try (BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig())) {
       Mutation m = new Mutation("row1");
       m.put("bob", "2", EMPTY);
       m.put("frank", "3", EMPTY);
@@ -307,7 +307,7 @@ public class OrIteratorIT extends AccumuloClusterHarness {
     IteratorSetting is = new IteratorSetting(50, OrIterator.class);
     is.addOption(OrIterator.COLUMNS_KEY, "bob,steve");
 
-    try (Scanner s = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner s = client.createScanner(tableName, Authorizations.EMPTY)) {
       s.addScanIterator(is);
       Iterator<Entry<Key,Value>> iter = s.iterator();
       assertTrue(iter.hasNext());
diff --git a/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java b/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
index 4b5567c..b8ea0d7 100644
--- a/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
+++ b/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
@@ -52,11 +52,11 @@ public class QueryMetadataTable {
 
   static class MDTQuery implements Runnable {
 
-    private AccumuloClient conn;
+    private AccumuloClient client;
     private Text row;
 
-    MDTQuery(AccumuloClient conn, Text row) {
-      this.conn = conn;
+    MDTQuery(AccumuloClient client, Text row) {
+      this.client = client;
       this.row = row;
     }
 
@@ -66,7 +66,7 @@ public class QueryMetadataTable {
       try {
         KeyExtent extent = new KeyExtent(row, (Text) null);
 
-        mdScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+        mdScanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
         Text row = extent.getMetadataEntry();
 
         mdScanner.setRange(new Range(row));
diff --git a/test/src/main/java/org/apache/accumulo/test/SampleIT.java b/test/src/main/java/org/apache/accumulo/test/SampleIT.java
index 43089d6..c6c22bf 100644
--- a/test/src/main/java/org/apache/accumulo/test/SampleIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/SampleIT.java
@@ -124,36 +124,36 @@ public class SampleIT extends AccumuloClusterHarness {
   @Test
   public void testBasic() throws Exception {
 
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
     String clone = tableName + "_clone";
 
-    conn.tableOperations().create(tableName, new NewTableConfiguration().enableSampling(SC1));
+    client.tableOperations().create(tableName, new NewTableConfiguration().enableSampling(SC1));
 
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     TreeMap<Key,Value> expected = new TreeMap<>();
     String someRow = writeData(bw, SC1, expected);
     assertEquals(20, expected.size());
 
-    Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
-    Scanner isoScanner = new IsolatedScanner(conn.createScanner(tableName, Authorizations.EMPTY));
+    Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY);
+    Scanner isoScanner = new IsolatedScanner(client.createScanner(tableName, Authorizations.EMPTY));
     Scanner csiScanner = new ClientSideIteratorScanner(
-        conn.createScanner(tableName, Authorizations.EMPTY));
+        client.createScanner(tableName, Authorizations.EMPTY));
     scanner.setSamplerConfiguration(SC1);
     csiScanner.setSamplerConfiguration(SC1);
     isoScanner.setSamplerConfiguration(SC1);
     isoScanner.setBatchSize(10);
 
-    BatchScanner bScanner = conn.createBatchScanner(tableName, Authorizations.EMPTY, 2);
+    BatchScanner bScanner = client.createBatchScanner(tableName, Authorizations.EMPTY, 2);
     bScanner.setSamplerConfiguration(SC1);
     bScanner.setRanges(Arrays.asList(new Range()));
 
     check(expected, scanner, bScanner, isoScanner, csiScanner);
 
-    conn.tableOperations().flush(tableName, null, null, true);
+    client.tableOperations().flush(tableName, null, null, true);
 
-    Scanner oScanner = newOfflineScanner(conn, tableName, clone, SC1);
+    Scanner oScanner = newOfflineScanner(client, tableName, clone, SC1);
     check(expected, scanner, bScanner, isoScanner, csiScanner, oScanner);
 
     // ensure non sample data can be scanned after scanning sample data
@@ -185,9 +185,9 @@ public class SampleIT extends AccumuloClusterHarness {
 
     check(expected, scanner, bScanner, isoScanner, csiScanner);
 
-    conn.tableOperations().flush(tableName, null, null, true);
+    client.tableOperations().flush(tableName, null, null, true);
 
-    oScanner = newOfflineScanner(conn, tableName, clone, SC1);
+    oScanner = newOfflineScanner(client, tableName, clone, SC1);
     check(expected, scanner, bScanner, isoScanner, csiScanner, oScanner);
 
     scanner.setRange(new Range(someRow));
@@ -204,17 +204,17 @@ public class SampleIT extends AccumuloClusterHarness {
     check(expected, scanner, bScanner, isoScanner, csiScanner, oScanner);
   }
 
-  private Scanner newOfflineScanner(AccumuloClient conn, String tableName, String clone,
+  private Scanner newOfflineScanner(AccumuloClient client, String tableName, String clone,
       SamplerConfiguration sc) throws Exception {
-    if (conn.tableOperations().exists(clone)) {
-      conn.tableOperations().delete(clone);
+    if (client.tableOperations().exists(clone)) {
+      client.tableOperations().delete(clone);
     }
     Map<String,String> em = Collections.emptyMap();
     Set<String> es = Collections.emptySet();
-    conn.tableOperations().clone(tableName, clone, false, em, es);
-    conn.tableOperations().offline(clone, true);
-    Table.ID cloneID = Table.ID.of(conn.tableOperations().tableIdMap().get(clone));
-    ClientContext context = new ClientContext(conn.info());
+    client.tableOperations().clone(tableName, clone, false, em, es);
+    client.tableOperations().offline(clone, true);
+    Table.ID cloneID = Table.ID.of(client.tableOperations().tableIdMap().get(clone));
+    ClientContext context = new ClientContext(client.info());
     OfflineScanner oScanner = new OfflineScanner(context, cloneID, Authorizations.EMPTY);
     if (sc != null) {
       oScanner.setSamplerConfiguration(sc);
@@ -306,13 +306,13 @@ public class SampleIT extends AccumuloClusterHarness {
 
   @Test
   public void testIterator() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
     String clone = tableName + "_clone";
 
-    conn.tableOperations().create(tableName, new NewTableConfiguration().enableSampling(SC1));
+    client.tableOperations().create(tableName, new NewTableConfiguration().enableSampling(SC1));
 
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     TreeMap<Key,Value> expected = new TreeMap<>();
     writeData(bw, SC1, expected);
@@ -327,11 +327,11 @@ public class SampleIT extends AccumuloClusterHarness {
     BatchScanner bScanner = null;
     Scanner oScanner = null;
     try {
-      scanner = conn.createScanner(tableName, Authorizations.EMPTY);
-      isoScanner = new IsolatedScanner(conn.createScanner(tableName, Authorizations.EMPTY));
+      scanner = client.createScanner(tableName, Authorizations.EMPTY);
+      isoScanner = new IsolatedScanner(client.createScanner(tableName, Authorizations.EMPTY));
       csiScanner = new ClientSideIteratorScanner(
-          conn.createScanner(tableName, Authorizations.EMPTY));
-      bScanner = conn.createBatchScanner(tableName, Authorizations.EMPTY, 2);
+          client.createScanner(tableName, Authorizations.EMPTY));
+      bScanner = client.createBatchScanner(tableName, Authorizations.EMPTY, 2);
 
       csiScanner.setIteratorSamplerConfiguration(SC1);
 
@@ -357,9 +357,9 @@ public class SampleIT extends AccumuloClusterHarness {
       }
 
       // flush an rerun same test against files
-      conn.tableOperations().flush(tableName, null, null, true);
+      client.tableOperations().flush(tableName, null, null, true);
 
-      oScanner = newOfflineScanner(conn, tableName, clone, null);
+      oScanner = newOfflineScanner(client, tableName, clone, null);
       oScanner.addScanIterator(new IteratorSetting(100, IteratorThatUsesSample.class));
       scanners = Arrays.asList(scanner, isoScanner, bScanner, csiScanner, oScanner);
 
@@ -373,11 +373,11 @@ public class SampleIT extends AccumuloClusterHarness {
         assertEquals(0, countEntries(s));
       }
 
-      updateSamplingConfig(conn, tableName, SC2);
+      updateSamplingConfig(client, tableName, SC2);
 
       csiScanner.setIteratorSamplerConfiguration(SC2);
 
-      oScanner = newOfflineScanner(conn, tableName, clone, null);
+      oScanner = newOfflineScanner(client, tableName, clone, null);
       oScanner.addScanIterator(new IteratorSetting(100, IteratorThatUsesSample.class));
       scanners = Arrays.asList(scanner, isoScanner, bScanner, csiScanner, oScanner);
 
@@ -418,76 +418,77 @@ public class SampleIT extends AccumuloClusterHarness {
   @Test
   public void testSampleNotPresent() throws Exception {
 
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
     String clone = tableName + "_clone";
 
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     TreeMap<Key,Value> expected = new TreeMap<>();
     writeData(bw, SC1, expected);
 
-    Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
-    Scanner isoScanner = new IsolatedScanner(conn.createScanner(tableName, Authorizations.EMPTY));
+    Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY);
+    Scanner isoScanner = new IsolatedScanner(client.createScanner(tableName, Authorizations.EMPTY));
     isoScanner.setBatchSize(10);
     Scanner csiScanner = new ClientSideIteratorScanner(
-        conn.createScanner(tableName, Authorizations.EMPTY));
-    BatchScanner bScanner = conn.createBatchScanner(tableName, Authorizations.EMPTY, 2);
+        client.createScanner(tableName, Authorizations.EMPTY));
+    BatchScanner bScanner = client.createBatchScanner(tableName, Authorizations.EMPTY, 2);
     bScanner.setRanges(Arrays.asList(new Range()));
 
     // ensure sample not present exception occurs when sampling is not configured
     assertSampleNotPresent(SC1, scanner, isoScanner, bScanner, csiScanner);
 
-    conn.tableOperations().flush(tableName, null, null, true);
+    client.tableOperations().flush(tableName, null, null, true);
 
-    Scanner oScanner = newOfflineScanner(conn, tableName, clone, SC1);
+    Scanner oScanner = newOfflineScanner(client, tableName, clone, SC1);
     assertSampleNotPresent(SC1, scanner, isoScanner, bScanner, csiScanner, oScanner);
 
     // configure sampling, however there exist an rfile w/o sample data... so should still see
     // sample not present exception
 
-    updateSamplingConfig(conn, tableName, SC1);
+    updateSamplingConfig(client, tableName, SC1);
 
     // create clone with new config
-    oScanner = newOfflineScanner(conn, tableName, clone, SC1);
+    oScanner = newOfflineScanner(client, tableName, clone, SC1);
 
     assertSampleNotPresent(SC1, scanner, isoScanner, bScanner, csiScanner, oScanner);
 
     // create rfile with sample data present
-    conn.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
+    client.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
 
     // should be able to scan sample now
-    oScanner = newOfflineScanner(conn, tableName, clone, SC1);
+    oScanner = newOfflineScanner(client, tableName, clone, SC1);
     setSamplerConfig(SC1, scanner, csiScanner, isoScanner, bScanner, oScanner);
     check(expected, scanner, isoScanner, bScanner, csiScanner, oScanner);
 
     // change sampling config
-    updateSamplingConfig(conn, tableName, SC2);
+    updateSamplingConfig(client, tableName, SC2);
 
     // create clone with new config
-    oScanner = newOfflineScanner(conn, tableName, clone, SC2);
+    oScanner = newOfflineScanner(client, tableName, clone, SC2);
 
     // rfile should have different sample config than table, and scan should not work
     assertSampleNotPresent(SC2, scanner, isoScanner, bScanner, csiScanner, oScanner);
 
     // create rfile that has same sample data as table config
-    conn.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
+    client.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
 
     // should be able to scan sample now
     updateExpected(SC2, expected);
-    oScanner = newOfflineScanner(conn, tableName, clone, SC2);
+    oScanner = newOfflineScanner(client, tableName, clone, SC2);
     setSamplerConfig(SC2, scanner, csiScanner, isoScanner, bScanner, oScanner);
     check(expected, scanner, isoScanner, bScanner, csiScanner, oScanner);
   }
 
-  private void updateSamplingConfig(AccumuloClient conn, String tableName, SamplerConfiguration sc)
+  private void updateSamplingConfig(AccumuloClient client, String tableName,
+      SamplerConfiguration sc)
       throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
-    conn.tableOperations().setSamplerConfiguration(tableName, sc);
+    client.tableOperations().setSamplerConfiguration(tableName, sc);
     // wait for for config change
-    conn.tableOperations().offline(tableName, true);
-    conn.tableOperations().online(tableName, true);
+    client.tableOperations().offline(tableName, true);
+    client.tableOperations().online(tableName, true);
   }
 
   private void assertSampleNotPresent(SamplerConfiguration sc, ScannerBase... scanners) {
diff --git a/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java b/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java
index 72ab34b..2258db3 100644
--- a/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java
@@ -48,22 +48,22 @@ public class ShellConfigIT extends AccumuloClusterHarness {
 
   @Before
   public void checkProperty() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     // TABLE_VOLUME_CHOOSER is a valid property that can be updated in ZK, whereas the crypto
     // properties are not.
     // This lets us run this test more generically rather than forcibly needing to update some
     // property in accumulo.properties
-    origPropValue = conn.instanceOperations().getSystemConfiguration()
+    origPropValue = client.instanceOperations().getSystemConfiguration()
         .get(PerTableVolumeChooser.TABLE_VOLUME_CHOOSER);
-    conn.instanceOperations().setProperty(PerTableVolumeChooser.TABLE_VOLUME_CHOOSER,
+    client.instanceOperations().setProperty(PerTableVolumeChooser.TABLE_VOLUME_CHOOSER,
         FairVolumeChooser.class.getName());
   }
 
   @After
   public void resetProperty() throws Exception {
     if (null != origPropValue) {
-      AccumuloClient conn = getAccumuloClient();
-      conn.instanceOperations().setProperty(PerTableVolumeChooser.TABLE_VOLUME_CHOOSER,
+      AccumuloClient client = getAccumuloClient();
+      client.instanceOperations().setProperty(PerTableVolumeChooser.TABLE_VOLUME_CHOOSER,
           origPropValue);
     }
   }
diff --git a/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
index 312193c..daa382a 100644
--- a/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
@@ -303,8 +303,8 @@ public class ShellServerIT extends SharedMiniClusterBase {
 
     traceProcess = getCluster().exec(TraceServer.class);
 
-    AccumuloClient conn = getCluster().getAccumuloClient(getPrincipal(), getToken());
-    TableOperations tops = conn.tableOperations();
+    AccumuloClient client = getCluster().getAccumuloClient(getPrincipal(), getToken());
+    TableOperations tops = client.tableOperations();
 
     // give the tracer some time to start
     while (!tops.exists("trace")) {
@@ -607,7 +607,7 @@ public class ShellServerIT extends SharedMiniClusterBase {
 
   @Test
   public void setIterOptionPrompt() throws Exception {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tableName = name.getMethodName();
 
     ts.exec("createtable " + tableName);
@@ -621,7 +621,7 @@ public class ShellServerIT extends SharedMiniClusterBase {
 
     String expectedKey = "table.iterator.scan.cfcounter";
     String expectedValue = "30," + COLUMN_FAMILY_COUNTER_ITERATOR;
-    TableOperations tops = conn.tableOperations();
+    TableOperations tops = client.tableOperations();
     checkTableForProperty(tops, tableName, expectedKey, expectedValue);
 
     ts.exec("deletetable " + tableName, true);
@@ -2001,10 +2001,10 @@ public class ShellServerIT extends SharedMiniClusterBase {
   }
 
   private String getTableId(String tableName) throws Exception {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
 
     for (int i = 0; i < 5; i++) {
-      Map<String,String> nameToId = conn.tableOperations().tableIdMap();
+      Map<String,String> nameToId = client.tableOperations().tableIdMap();
       if (nameToId.containsKey(tableName)) {
         return nameToId.get(tableName);
       } else {
diff --git a/test/src/main/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java b/test/src/main/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java
index 11b8242..6b36c27 100644
--- a/test/src/main/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java
@@ -52,11 +52,11 @@ public class TableConfigurationUpdateIT extends AccumuloClusterHarness {
 
   @Test
   public void test() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     ServerContext context = getCluster().getServerContext();
 
     String table = getUniqueNames(1)[0];
-    conn.tableOperations().create(table);
+    client.tableOperations().create(table);
 
     final NamespaceConfiguration defaultConf = new NamespaceConfiguration(Namespace.ID.DEFAULT,
         context, DefaultConfiguration.getInstance());
diff --git a/test/src/main/java/org/apache/accumulo/test/TabletServerGivesUpIT.java b/test/src/main/java/org/apache/accumulo/test/TabletServerGivesUpIT.java
index 7d6dd36..5354b0a 100644
--- a/test/src/main/java/org/apache/accumulo/test/TabletServerGivesUpIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/TabletServerGivesUpIT.java
@@ -46,11 +46,11 @@ public class TabletServerGivesUpIT extends ConfigurableMacBase {
 
   @Test(timeout = 45 * 1000)
   public void test() throws Exception {
-    final AccumuloClient conn = this.getClient();
+    final AccumuloClient client = this.getClient();
     // Yes, there's a tabletserver
-    assertEquals(1, conn.instanceOperations().getTabletServers().size());
+    assertEquals(1, client.instanceOperations().getTabletServers().size());
     final String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
     // Kill dfs
     cluster.getMiniDfs().shutdown();
     // ask the tserver to do something
@@ -61,7 +61,7 @@ public class TabletServerGivesUpIT extends ConfigurableMacBase {
         try {
           TreeSet<Text> splits = new TreeSet<>();
           splits.add(new Text("X"));
-          conn.tableOperations().addSplits(tableName, splits);
+          client.tableOperations().addSplits(tableName, splits);
         } catch (Exception e) {
           ex.set(e);
         }
@@ -69,7 +69,7 @@ public class TabletServerGivesUpIT extends ConfigurableMacBase {
     };
     splitter.start();
     // wait for the tserver to give up on writing to the WAL
-    while (conn.instanceOperations().getTabletServers().size() == 1) {
+    while (client.instanceOperations().getTabletServers().size() == 1) {
       sleepUninterruptibly(1, TimeUnit.SECONDS);
     }
   }
diff --git a/test/src/main/java/org/apache/accumulo/test/TabletServerHdfsRestartIT.java b/test/src/main/java/org/apache/accumulo/test/TabletServerHdfsRestartIT.java
index 63d9ac9..5765e8a 100644
--- a/test/src/main/java/org/apache/accumulo/test/TabletServerHdfsRestartIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/TabletServerHdfsRestartIT.java
@@ -44,24 +44,25 @@ public class TabletServerHdfsRestartIT extends ConfigurableMacBase {
 
   @Test(timeout = 2 * 60 * 1000)
   public void test() throws Exception {
-    final AccumuloClient conn = this.getClient();
+    final AccumuloClient client = this.getClient();
     // Yes, there's a tabletserver
-    assertEquals(1, conn.instanceOperations().getTabletServers().size());
+    assertEquals(1, client.instanceOperations().getTabletServers().size());
     final String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, null);
+    client.tableOperations().create(tableName);
+    BatchWriter bw = client.createBatchWriter(tableName, null);
     for (int i = 0; i < N; i++) {
       Mutation m = new Mutation("" + i);
       m.put("", "", "");
       bw.addMutation(m);
     }
     bw.close();
-    conn.tableOperations().flush(tableName, null, null, true);
+    client.tableOperations().flush(tableName, null, null, true);
 
     // Kill dfs
     cluster.getMiniDfs().restartNameNode(false);
 
-    assertEquals(N, Iterators.size(conn.createScanner(tableName, Authorizations.EMPTY).iterator()));
+    assertEquals(N,
+        Iterators.size(client.createScanner(tableName, Authorizations.EMPTY).iterator()));
   }
 
 }
diff --git a/test/src/main/java/org/apache/accumulo/test/TestIngest.java b/test/src/main/java/org/apache/accumulo/test/TestIngest.java
index 65caee9..b710b76 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestIngest.java
@@ -113,16 +113,16 @@ public class TestIngest {
     }
   }
 
-  public static void createTable(AccumuloClient conn, Opts args)
+  public static void createTable(AccumuloClient client, Opts args)
       throws AccumuloException, AccumuloSecurityException, TableExistsException {
     if (args.createTable) {
       TreeSet<Text> splits = getSplitPoints(args.startRow, args.startRow + args.rows,
           args.numsplits);
 
-      if (!conn.tableOperations().exists(args.getTableName()))
-        conn.tableOperations().create(args.getTableName());
+      if (!client.tableOperations().exists(args.getTableName()))
+        client.tableOperations().create(args.getTableName());
       try {
-        conn.tableOperations().addSplits(args.getTableName(), splits);
+        client.tableOperations().addSplits(args.getTableName(), splits);
       } catch (TableNotFoundException ex) {
         // unlikely
         throw new RuntimeException(ex);
diff --git a/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java b/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
index 59564f8..0844c3e 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
@@ -52,14 +52,14 @@ public class TestMultiTableIngest {
     String prefix = "test_";
   }
 
-  private static void readBack(Opts opts, ScannerOpts scanOpts, AccumuloClient conn,
+  private static void readBack(Opts opts, ScannerOpts scanOpts, AccumuloClient client,
       List<String> tableNames) throws Exception {
     int i = 0;
     for (String table : tableNames) {
       // wait for table to exist
-      while (!conn.tableOperations().exists(table))
+      while (!client.tableOperations().exists(table))
         UtilWaitThread.sleep(100);
-      try (Scanner scanner = conn.createScanner(table, opts.auths)) {
+      try (Scanner scanner = client.createScanner(table, opts.auths)) {
         scanner.setBatchSize(scanOpts.scanBatchSize);
         int count = i;
         for (Entry<Key,Value> elt : scanner) {
diff --git a/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java b/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java
index d26f8a4..19e4e48 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java
@@ -82,8 +82,8 @@ public class TestRandomDeletes {
   private static TreeSet<RowColumn> scanAll(ClientOnDefaultTable opts, ScannerOpts scanOpts,
       String tableName) throws Exception {
     TreeSet<RowColumn> result = new TreeSet<>();
-    AccumuloClient conn = opts.getClient();
-    try (Scanner scanner = conn.createScanner(tableName, auths)) {
+    AccumuloClient client = opts.getClient();
+    try (Scanner scanner = client.createScanner(tableName, auths)) {
       scanner.setBatchSize(scanOpts.scanBatchSize);
       for (Entry<Key,Value> entry : scanner) {
         Key key = entry.getKey();
diff --git a/test/src/main/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java b/test/src/main/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java
index 281df72..2c26104 100644
--- a/test/src/main/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java
@@ -55,27 +55,27 @@ public class TracerRecoversAfterOfflineTableIT extends ConfigurableMacBase {
   @Test
   public void test() throws Exception {
     Process tracer = null;
-    AccumuloClient conn = getClient();
-    if (!conn.tableOperations().exists("trace")) {
+    AccumuloClient client = getClient();
+    if (!client.tableOperations().exists("trace")) {
       MiniAccumuloClusterImpl mac = cluster;
       tracer = mac.exec(TraceServer.class);
-      while (!conn.tableOperations().exists("trace")) {
+      while (!client.tableOperations().exists("trace")) {
         sleepUninterruptibly(1, TimeUnit.SECONDS);
       }
       sleepUninterruptibly(5, TimeUnit.SECONDS);
     }
 
     log.info("Taking table offline");
-    conn.tableOperations().offline("trace", true);
+    client.tableOperations().offline("trace", true);
 
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
 
     log.info("Start a distributed trace span");
 
     DistributedTrace.enable("localhost", "testTrace", getClientInfo().getProperties());
     Span root = Trace.on("traceTest");
-    BatchWriter bw = conn.createBatchWriter(tableName, null);
+    BatchWriter bw = client.createBatchWriter(tableName, null);
     Mutation m = new Mutation("m");
     m.put("a", "b", "c");
     bw.addMutation(m);
@@ -83,11 +83,11 @@ public class TracerRecoversAfterOfflineTableIT extends ConfigurableMacBase {
     root.stop();
 
     log.info("Bringing trace table back online");
-    conn.tableOperations().online("trace", true);
+    client.tableOperations().online("trace", true);
 
     log.info("Trace table is online, should be able to find trace");
 
-    try (Scanner scanner = conn.createScanner("trace", Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner("trace", Authorizations.EMPTY)) {
       scanner.setRange(new Range(new Text(Long.toHexString(root.traceId()))));
       while (true) {
         final StringBuilder finalBuffer = new StringBuilder();
diff --git a/test/src/main/java/org/apache/accumulo/test/UnusedWALIT.java b/test/src/main/java/org/apache/accumulo/test/UnusedWALIT.java
index f33c542..3fad6ca 100644
--- a/test/src/main/java/org/apache/accumulo/test/UnusedWALIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/UnusedWALIT.java
@@ -142,11 +142,11 @@ public class UnusedWALIT extends ConfigurableMacBase {
     return result;
   }
 
-  private void writeSomeData(AccumuloClient conn, String table, int startRow, int rowCount,
+  private void writeSomeData(AccumuloClient client, String table, int startRow, int rowCount,
       int startCol, int colCount) throws Exception {
     BatchWriterConfig config = new BatchWriterConfig();
     config.setMaxMemory(10 * 1024 * 1024);
-    BatchWriter bw = conn.createBatchWriter(table, config);
+    BatchWriter bw = client.createBatchWriter(table, config);
     for (int r = startRow; r < startRow + rowCount; r++) {
       Mutation m = new Mutation(Integer.toHexString(r));
       for (int c = startCol; c < startCol + colCount; c++) {
diff --git a/test/src/main/java/org/apache/accumulo/test/UserCompactionStrategyIT.java b/test/src/main/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
index 85b9348..6e955d0 100644
--- a/test/src/main/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
@@ -324,12 +324,12 @@ public class UserCompactionStrategyIT extends AccumuloClusterHarness {
     return rows;
   }
 
-  private void writeFlush(AccumuloClient conn, String tablename, String row) throws Exception {
-    BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
+  private void writeFlush(AccumuloClient client, String tablename, String row) throws Exception {
+    BatchWriter bw = client.createBatchWriter(tablename, new BatchWriterConfig());
     Mutation m = new Mutation(row);
     m.put("", "", "");
     bw.addMutation(m);
     bw.close();
-    conn.tableOperations().flush(tablename, null, null, true);
+    client.tableOperations().flush(tablename, null, null, true);
   }
 }
diff --git a/test/src/main/java/org/apache/accumulo/test/UsersIT.java b/test/src/main/java/org/apache/accumulo/test/UsersIT.java
index 77359e8..081ee73 100644
--- a/test/src/main/java/org/apache/accumulo/test/UsersIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/UsersIT.java
@@ -35,8 +35,8 @@ public class UsersIT extends AccumuloClusterHarness {
   @Test
   public void testCreateExistingUser() throws Exception {
     ClusterUser user0 = getUser(0);
-    AccumuloClient conn = getAccumuloClient();
-    Set<String> currentUsers = conn.securityOperations().listLocalUsers();
+    AccumuloClient client = getAccumuloClient();
+    Set<String> currentUsers = client.securityOperations().listLocalUsers();
 
     // Ensure that the user exists
     if (!currentUsers.contains(user0.getPrincipal())) {
@@ -44,11 +44,11 @@ public class UsersIT extends AccumuloClusterHarness {
       if (!saslEnabled()) {
         token = new PasswordToken(user0.getPassword());
       }
-      conn.securityOperations().createLocalUser(user0.getPrincipal(), token);
+      client.securityOperations().createLocalUser(user0.getPrincipal(), token);
     }
 
     try {
-      conn.securityOperations().createLocalUser(user0.getPrincipal(),
+      client.securityOperations().createLocalUser(user0.getPrincipal(),
           new PasswordToken("better_fail"));
       fail("Creating a user that already exists should throw an exception");
     } catch (AccumuloSecurityException e) {
diff --git a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
index 073ef8b..202e2cb 100644
--- a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
@@ -367,7 +367,7 @@ public class VolumeIT extends ConfigurableMacBase {
     verifyVolumesUsed(tableNames[1], false, v2, v3);
   }
 
-  private void writeData(String tableName, AccumuloClient conn)
+  private void writeData(String tableName, AccumuloClient client)
       throws AccumuloException, AccumuloSecurityException, TableExistsException,
       TableNotFoundException, MutationsRejectedException {
     TreeSet<Text> splits = new TreeSet<>();
@@ -375,10 +375,10 @@ public class VolumeIT extends ConfigurableMacBase {
       splits.add(new Text(String.format("%06d", i * 100)));
     }
 
-    conn.tableOperations().create(tableName);
-    conn.tableOperations().addSplits(tableName, splits);
+    client.tableOperations().create(tableName);
+    client.tableOperations().addSplits(tableName, splits);
 
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
     for (int i = 0; i < 100; i++) {
       String row = String.format("%06d", i * 100 + 3);
       Mutation m = new Mutation(row);
@@ -392,7 +392,7 @@ public class VolumeIT extends ConfigurableMacBase {
   private void verifyVolumesUsed(String tableName, boolean shouldExist, Path... paths)
       throws Exception {
 
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
 
     List<String> expected = new ArrayList<>();
     for (int i = 0; i < 100; i++) {
@@ -400,20 +400,20 @@ public class VolumeIT extends ConfigurableMacBase {
       expected.add(row + ":cf1:cq1:1");
     }
 
-    if (!conn.tableOperations().exists(tableName)) {
+    if (!client.tableOperations().exists(tableName)) {
       assertFalse(shouldExist);
 
-      writeData(tableName, conn);
+      writeData(tableName, client);
 
-      verifyData(expected, conn.createScanner(tableName, Authorizations.EMPTY));
+      verifyData(expected, client.createScanner(tableName, Authorizations.EMPTY));
 
-      conn.tableOperations().flush(tableName, null, null, true);
+      client.tableOperations().flush(tableName, null, null, true);
     }
 
-    verifyData(expected, conn.createScanner(tableName, Authorizations.EMPTY));
+    verifyData(expected, client.createScanner(tableName, Authorizations.EMPTY));
 
-    Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
-    try (Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+    Table.ID tableId = Table.ID.of(client.tableOperations().tableIdMap().get(tableName));
+    try (Scanner metaScanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
       MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(metaScanner);
       metaScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
       metaScanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
@@ -496,8 +496,8 @@ public class VolumeIT extends ConfigurableMacBase {
     // start cluster and verify that volume was decommissioned
     cluster.start();
 
-    AccumuloClient conn = cluster.getAccumuloClient("root", new PasswordToken(ROOT_PASSWORD));
-    conn.tableOperations().compact(tableNames[0], null, null, true, true);
+    AccumuloClient client = cluster.getAccumuloClient("root", new PasswordToken(ROOT_PASSWORD));
+    client.tableOperations().compact(tableNames[0], null, null, true, true);
 
     verifyVolumesUsed(tableNames[0], true, v2);
 
@@ -507,11 +507,11 @@ public class VolumeIT extends ConfigurableMacBase {
     String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
     assertTrue(rootTabletDir.startsWith(v2.toString()));
 
-    conn.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<>(),
+    client.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<>(),
         new HashSet<>());
 
-    conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
-    conn.tableOperations().flush(RootTable.NAME, null, null, true);
+    client.tableOperations().flush(MetadataTable.NAME, null, null, true);
+    client.tableOperations().flush(RootTable.NAME, null, null, true);
 
     verifyVolumesUsed(tableNames[0], true, v2);
     verifyVolumesUsed(tableNames[1], true, v2);
diff --git a/test/src/main/java/org/apache/accumulo/test/YieldScannersIT.java b/test/src/main/java/org/apache/accumulo/test/YieldScannersIT.java
index 33f8f35..e5a2c06 100644
--- a/test/src/main/java/org/apache/accumulo/test/YieldScannersIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/YieldScannersIT.java
@@ -62,9 +62,9 @@ public class YieldScannersIT extends AccumuloClusterHarness {
   public void testScan() throws Exception {
     // make a table
     final String tableName = getUniqueNames(1)[0];
-    final AccumuloClient conn = getAccumuloClient();
-    conn.tableOperations().create(tableName);
-    final BatchWriter writer = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    final AccumuloClient client = getAccumuloClient();
+    client.tableOperations().create(tableName);
+    final BatchWriter writer = client.createBatchWriter(tableName, new BatchWriterConfig());
     for (int i = 0; i < 10; i++) {
       byte[] row = {(byte) (START_ROW + i)};
       Mutation m = new Mutation(new Text(row));
@@ -76,7 +76,7 @@ public class YieldScannersIT extends AccumuloClusterHarness {
 
     log.info("Creating scanner");
     // make a scanner for a table with 10 keys
-    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
       final IteratorSetting cfg = new IteratorSetting(100, YieldingIterator.class);
       scanner.addScanIterator(cfg);
 
@@ -116,9 +116,9 @@ public class YieldScannersIT extends AccumuloClusterHarness {
   public void testBatchScan() throws Exception {
     // make a table
     final String tableName = getUniqueNames(1)[0];
-    final AccumuloClient conn = getAccumuloClient();
-    conn.tableOperations().create(tableName);
-    final BatchWriter writer = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    final AccumuloClient client = getAccumuloClient();
+    client.tableOperations().create(tableName);
+    final BatchWriter writer = client.createBatchWriter(tableName, new BatchWriterConfig());
     for (int i = 0; i < 10; i++) {
       byte[] row = {(byte) (START_ROW + i)};
       Mutation m = new Mutation(new Text(row));
@@ -130,7 +130,7 @@ public class YieldScannersIT extends AccumuloClusterHarness {
 
     log.info("Creating batch scanner");
     // make a scanner for a table with 10 keys
-    try (BatchScanner scanner = conn.createBatchScanner(tableName, Authorizations.EMPTY, 1)) {
+    try (BatchScanner scanner = client.createBatchScanner(tableName, Authorizations.EMPTY, 1)) {
       final IteratorSetting cfg = new IteratorSetting(100, YieldingIterator.class);
       scanner.addScanIterator(cfg);
       scanner.setRanges(Collections.singleton(new Range()));
diff --git a/test/src/main/java/org/apache/accumulo/test/ZooKeeperPropertiesIT.java b/test/src/main/java/org/apache/accumulo/test/ZooKeeperPropertiesIT.java
index a1ea97a..51b4716 100644
--- a/test/src/main/java/org/apache/accumulo/test/ZooKeeperPropertiesIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ZooKeeperPropertiesIT.java
@@ -26,9 +26,9 @@ public class ZooKeeperPropertiesIT extends AccumuloClusterHarness {
 
   @Test(expected = AccumuloException.class)
   public void testNoFiles() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     // Should throw an error as this property can't be changed in ZooKeeper
-    conn.instanceOperations().setProperty(Property.GENERAL_RPC_TIMEOUT.getKey(), "60s");
+    client.instanceOperations().setProperty(Property.GENERAL_RPC_TIMEOUT.getKey(), "60s");
   }
 
 }
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java b/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java
index 67a971e..46b1f6b 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java
@@ -32,7 +32,7 @@ import org.junit.Test;
 public class AccumuloClientIT extends AccumuloClusterHarness {
 
   @Test
-  public void testConnectorBuilder() throws Exception {
+  public void testclientectorBuilder() throws Exception {
     AccumuloClient c = getAccumuloClient();
     String instanceName = c.info().getInstanceName();
     String zookeepers = c.info().getZooKeepers();
@@ -40,13 +40,13 @@ public class AccumuloClientIT extends AccumuloClusterHarness {
     final String password = "testpassword";
     c.securityOperations().createLocalUser(user, new PasswordToken(password));
 
-    AccumuloClient conn = Accumulo.newClient().forInstance(instanceName, zookeepers)
+    AccumuloClient client = Accumulo.newClient().forInstance(instanceName, zookeepers)
         .usingPassword(user, password).withZkTimeout(1234).build();
 
-    assertEquals(instanceName, conn.info().getInstanceName());
-    assertEquals(zookeepers, conn.info().getZooKeepers());
-    assertEquals(user, conn.whoami());
-    assertEquals(1234, conn.info().getZooKeepersSessionTimeOut());
+    assertEquals(instanceName, client.info().getInstanceName());
+    assertEquals(zookeepers, client.info().getZooKeepers());
+    assertEquals(user, client.whoami());
+    assertEquals(1234, client.info().getZooKeepersSessionTimeOut());
 
     ClientInfo info = Accumulo.newClient().forInstance(instanceName, zookeepers)
         .usingPassword(user, password).info();
@@ -61,23 +61,23 @@ public class AccumuloClientIT extends AccumuloClusterHarness {
     props.put(ClientProperty.AUTH_PRINCIPAL.getKey(), user);
     props.put(ClientProperty.INSTANCE_ZOOKEEPERS_TIMEOUT.getKey(), "22s");
     ClientProperty.setPassword(props, password);
-    conn = Accumulo.newClient().usingProperties(props).build();
+    client = Accumulo.newClient().usingProperties(props).build();
 
-    assertEquals(instanceName, conn.info().getInstanceName());
-    assertEquals(zookeepers, conn.info().getZooKeepers());
-    assertEquals(user, conn.whoami());
-    assertEquals(22000, conn.info().getZooKeepersSessionTimeOut());
+    assertEquals(instanceName, client.info().getInstanceName());
+    assertEquals(zookeepers, client.info().getZooKeepers());
+    assertEquals(user, client.whoami());
+    assertEquals(22000, client.info().getZooKeepersSessionTimeOut());
 
     final String user2 = "testuser2";
     final String password2 = "testpassword2";
     c.securityOperations().createLocalUser(user2, new PasswordToken(password2));
 
-    AccumuloClient conn2 = Accumulo.newClient().usingClientInfo(conn.info())
+    AccumuloClient client2 = Accumulo.newClient().usingClientInfo(client.info())
         .usingToken(user2, new PasswordToken(password2)).build();
-    assertEquals(instanceName, conn2.info().getInstanceName());
-    assertEquals(zookeepers, conn2.info().getZooKeepers());
-    assertEquals(user2, conn2.whoami());
-    info = conn2.info();
+    assertEquals(instanceName, client2.info().getInstanceName());
+    assertEquals(zookeepers, client2.info().getZooKeepers());
+    assertEquals(user2, client2.whoami());
+    info = client2.info();
     assertEquals(instanceName, info.getInstanceName());
     assertEquals(zookeepers, info.getZooKeepers());
     assertEquals(user2, info.getPrincipal());
@@ -86,11 +86,11 @@ public class AccumuloClientIT extends AccumuloClusterHarness {
     final String password3 = "testpassword3";
     c.securityOperations().createLocalUser(user3, new PasswordToken(password3));
 
-    AccumuloClient conn3 = conn.changeUser(user3, new PasswordToken(password3));
-    assertEquals(instanceName, conn3.info().getInstanceName());
-    assertEquals(zookeepers, conn3.info().getZooKeepers());
-    assertEquals(user3, conn3.whoami());
-    info = conn3.info();
+    AccumuloClient client3 = client.changeUser(user3, new PasswordToken(password3));
+    assertEquals(instanceName, client3.info().getInstanceName());
+    assertEquals(zookeepers, client3.info().getZooKeepers());
+    assertEquals(user3, client3.whoami());
+    info = client3.info();
     assertEquals(instanceName, info.getInstanceName());
     assertEquals(zookeepers, info.getZooKeepers());
     assertEquals(user3, info.getPrincipal());
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java
index a272970..3fd1c39 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java
@@ -89,10 +89,10 @@ public class BalanceInPresenceOfOfflineTableIT extends AccumuloClusterHarness {
   @Before
   public void setupTables() throws AccumuloException, AccumuloSecurityException,
       TableExistsException, TableNotFoundException {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     // Need at least two tservers
     Assume.assumeTrue("Not enough tservers to run test",
-        conn.instanceOperations().getTabletServers().size() >= 2);
+        client.instanceOperations().getTabletServers().size() >= 2);
 
     // set up splits
     final SortedSet<Text> splits = new TreeSet<>();
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BulkLoadIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BulkLoadIT.java
index d8b61d2..77e26b0 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BulkLoadIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BulkLoadIT.java
@@ -41,7 +41,6 @@ import java.util.stream.Collectors;
 
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
@@ -280,7 +279,7 @@ public class BulkLoadIT extends AccumuloClusterHarness {
 
   @Test
   public void testBadLoadPlans() throws Exception {
-    Connector c = getConnector();
+    AccumuloClient c = getAccumuloClient();
     addSplits(tableName, "0333 0666 0999 1333 1666");
 
     String dir = getDir("/testBulkFile-");
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
index d3cde2e..131c479 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
@@ -55,11 +55,11 @@ public class BulkSplitOptimizationIT extends AccumuloClusterHarness {
 
   @Before
   public void alterConfig() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
-    majcDelay = conn.instanceOperations().getSystemConfiguration()
+    AccumuloClient client = getAccumuloClient();
+    majcDelay = client.instanceOperations().getSystemConfiguration()
         .get(Property.TSERV_MAJC_DELAY.getKey());
     if (!"1s".equals(majcDelay)) {
-      conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1s");
+      client.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1s");
       getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
       getClusterControl().startAllServers(ServerType.TABLET_SERVER);
     }
@@ -68,8 +68,8 @@ public class BulkSplitOptimizationIT extends AccumuloClusterHarness {
   @After
   public void resetConfig() throws Exception {
     if (null != majcDelay) {
-      AccumuloClient conn = getAccumuloClient();
-      conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
+      AccumuloClient client = getAccumuloClient();
+      client.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
       getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
       getClusterControl().startAllServers(ServerType.TABLET_SERVER);
     }
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
index 555cc97..5b54df6 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
@@ -135,12 +135,12 @@ public class CloneTestIT extends AccumuloClusterHarness {
     }
   }
 
-  private void checkMetadata(String table, AccumuloClient conn) throws Exception {
-    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+  private void checkMetadata(String table, AccumuloClient client) throws Exception {
+    try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
 
       s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
       MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(s);
-      String tableId = conn.tableOperations().tableIdMap().get(table);
+      String tableId = client.tableOperations().tableIdMap().get(table);
 
       assertNotNull("Could not get table id for " + table, tableId);
 
@@ -266,7 +266,7 @@ public class CloneTestIT extends AccumuloClusterHarness {
 
   @Test
   public void testCloneWithSplits() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
 
     List<Mutation> mutations = new ArrayList<>();
     TreeSet<Text> splits = new TreeSet<>();
@@ -279,21 +279,21 @@ public class CloneTestIT extends AccumuloClusterHarness {
 
     String[] tables = getUniqueNames(2);
 
-    conn.tableOperations().create(tables[0]);
+    client.tableOperations().create(tables[0]);
 
-    conn.tableOperations().addSplits(tables[0], splits);
+    client.tableOperations().addSplits(tables[0], splits);
 
-    BatchWriter bw = conn.createBatchWriter(tables[0], new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(tables[0], new BatchWriterConfig());
     bw.addMutations(mutations);
     bw.close();
 
-    conn.tableOperations().clone(tables[0], tables[1], true, null, null);
+    client.tableOperations().clone(tables[0], tables[1], true, null, null);
 
-    conn.tableOperations().deleteRows(tables[1], new Text("4"), new Text("8"));
+    client.tableOperations().deleteRows(tables[1], new Text("4"), new Text("8"));
 
     List<String> rows = Arrays.asList("0", "1", "2", "3", "4", "9");
     List<String> actualRows = new ArrayList<>();
-    for (Entry<Key,Value> entry : conn.createScanner(tables[1], Authorizations.EMPTY)) {
+    for (Entry<Key,Value> entry : client.createScanner(tables[1], Authorizations.EMPTY)) {
       actualRows.add(entry.getKey().getRow().toString());
     }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ConcurrencyIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ConcurrencyIT.java
index c3ed6b9..93c4abe 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ConcurrencyIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ConcurrencyIT.java
@@ -53,9 +53,9 @@ public class ConcurrencyIT extends AccumuloClusterHarness {
     int count = 0;
     Scanner scanner = null;
 
-    ScanTask(AccumuloClient conn, String tableName, long time) throws Exception {
+    ScanTask(AccumuloClient client, String tableName, long time) throws Exception {
       try {
-        scanner = conn.createScanner(tableName, Authorizations.EMPTY);
+        scanner = client.createScanner(tableName, Authorizations.EMPTY);
         IteratorSetting slow = new IteratorSetting(30, "slow", SlowIterator.class);
         SlowIterator.setSleepTime(slow, time);
         scanner.addScanIterator(slow);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableCompactionIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableCompactionIT.java
index 4688faa..97dab3b 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableCompactionIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableCompactionIT.java
@@ -133,19 +133,19 @@ public class ConfigurableCompactionIT extends ConfigurableMacBase {
     return destName;
   }
 
-  private void writeFlush(AccumuloClient conn, String tablename, String row) throws Exception {
-    BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
+  private void writeFlush(AccumuloClient client, String tablename, String row) throws Exception {
+    BatchWriter bw = client.createBatchWriter(tablename, new BatchWriterConfig());
     Mutation m = new Mutation(row);
     m.put("", "", "");
     bw.addMutation(m);
     bw.close();
-    conn.tableOperations().flush(tablename, null, null, true);
+    client.tableOperations().flush(tablename, null, null, true);
   }
 
   final static Random r = new SecureRandom();
 
-  private void makeFile(AccumuloClient conn, String tablename) throws Exception {
-    BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
+  private void makeFile(AccumuloClient client, String tablename) throws Exception {
+    BatchWriter bw = client.createBatchWriter(tablename, new BatchWriterConfig());
     byte[] empty = {};
     byte[] row = new byte[10];
     r.nextBytes(row);
@@ -154,7 +154,7 @@ public class ConfigurableCompactionIT extends ConfigurableMacBase {
     bw.addMutation(m);
     bw.flush();
     bw.close();
-    conn.tableOperations().flush(tablename, null, null, true);
+    client.tableOperations().flush(tablename, null, null, true);
   }
 
   private void runTest(final AccumuloClient c, final String tableName, final int n)
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CreateInitialSplitsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CreateInitialSplitsIT.java
index 856956a..7774223 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/CreateInitialSplitsIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CreateInitialSplitsIT.java
@@ -27,9 +27,9 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.UUID;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.admin.NewTableConfiguration;
@@ -46,7 +46,7 @@ import org.junit.Test;
 
 public class CreateInitialSplitsIT extends AccumuloClusterHarness {
 
-  private Connector connector;
+  private AccumuloClient client;
   private String tableName;
 
   @Override
@@ -64,7 +64,7 @@ public class CreateInitialSplitsIT extends AccumuloClusterHarness {
 
   @Before
   public void setupInitialSplits() {
-    connector = getConnector();
+    client = getAccumuloClient();
   }
 
   /**
@@ -74,8 +74,8 @@ public class CreateInitialSplitsIT extends AccumuloClusterHarness {
   public void testCreateTableWithNoSplits()
       throws TableExistsException, AccumuloSecurityException, AccumuloException {
     tableName = getUniqueNames(1)[0];
-    connector.tableOperations().create(tableName);
-    assertTrue(connector.tableOperations().exists(tableName));
+    client.tableOperations().create(tableName);
+    assertTrue(client.tableOperations().exists(tableName));
   }
 
   /**
@@ -87,10 +87,10 @@ public class CreateInitialSplitsIT extends AccumuloClusterHarness {
     tableName = getUniqueNames(1)[0];
     SortedSet<Text> expectedSplits = generateNonBinarySplits(3000, 32);
     NewTableConfiguration ntc = new NewTableConfiguration().withSplits(expectedSplits);
-    assertFalse(connector.tableOperations().exists(tableName));
-    connector.tableOperations().create(tableName, ntc);
-    assertTrue(connector.tableOperations().exists(tableName));
-    Collection<Text> createdSplits = connector.tableOperations().listSplits(tableName);
+    assertFalse(client.tableOperations().exists(tableName));
+    client.tableOperations().create(tableName, ntc);
+    assertTrue(client.tableOperations().exists(tableName));
+    Collection<Text> createdSplits = client.tableOperations().listSplits(tableName);
     assertEquals(expectedSplits, new TreeSet<>(createdSplits));
   }
 
@@ -100,10 +100,10 @@ public class CreateInitialSplitsIT extends AccumuloClusterHarness {
     tableName = getUniqueNames(1)[0];
     SortedSet<Text> expectedSplits = generateNonBinarySplits(3000, 32, true);
     NewTableConfiguration ntc = new NewTableConfiguration().withSplits(expectedSplits);
-    assertFalse(connector.tableOperations().exists(tableName));
-    connector.tableOperations().create(tableName, ntc);
-    assertTrue(connector.tableOperations().exists(tableName));
-    Collection<Text> createdSplits = connector.tableOperations().listSplits(tableName);
+    assertFalse(client.tableOperations().exists(tableName));
+    client.tableOperations().create(tableName, ntc);
+    assertTrue(client.tableOperations().exists(tableName));
+    Collection<Text> createdSplits = client.tableOperations().listSplits(tableName);
     assertEquals(expectedSplits, new TreeSet<>(createdSplits));
   }
 
@@ -116,10 +116,10 @@ public class CreateInitialSplitsIT extends AccumuloClusterHarness {
     tableName = getUniqueNames(1)[0];
     SortedSet<Text> expectedSplits = generateBinarySplits(1000, 16);
     NewTableConfiguration ntc = new NewTableConfiguration().withSplits(expectedSplits);
-    assertFalse(connector.tableOperations().exists(tableName));
-    connector.tableOperations().create(tableName, ntc);
-    assertTrue(connector.tableOperations().exists(tableName));
-    Collection<Text> createdSplits = connector.tableOperations().listSplits(tableName);
+    assertFalse(client.tableOperations().exists(tableName));
+    client.tableOperations().create(tableName, ntc);
+    assertTrue(client.tableOperations().exists(tableName));
+    Collection<Text> createdSplits = client.tableOperations().listSplits(tableName);
     assertEquals(expectedSplits, new TreeSet<>(createdSplits));
   }
 
@@ -129,10 +129,10 @@ public class CreateInitialSplitsIT extends AccumuloClusterHarness {
     tableName = getUniqueNames(1)[0];
     SortedSet<Text> expectedSplits = generateBinarySplits(1000, 16, true);
     NewTableConfiguration ntc = new NewTableConfiguration().withSplits(expectedSplits);
-    assertFalse(connector.tableOperations().exists(tableName));
-    connector.tableOperations().create(tableName, ntc);
-    assertTrue(connector.tableOperations().exists(tableName));
-    Collection<Text> createdSplits = connector.tableOperations().listSplits(tableName);
+    assertFalse(client.tableOperations().exists(tableName));
+    client.tableOperations().create(tableName, ntc);
+    assertTrue(client.tableOperations().exists(tableName));
+    Collection<Text> createdSplits = client.tableOperations().listSplits(tableName);
     assertEquals(expectedSplits, new TreeSet<>(createdSplits));
   }
 
@@ -146,23 +146,23 @@ public class CreateInitialSplitsIT extends AccumuloClusterHarness {
     // older method was not affected.
     tableName = getUniqueNames(1)[0];
     NewTableConfiguration ntc = new NewTableConfiguration();
-    connector.tableOperations().create(tableName, ntc);
-    assertTrue(connector.tableOperations().exists(tableName));
+    client.tableOperations().create(tableName, ntc);
+    assertTrue(client.tableOperations().exists(tableName));
     SortedSet<Text> splits = new TreeSet<>();
     splits.add(new Text("ccccc"));
     splits.add(new Text("mmmmm"));
     splits.add(new Text("ttttt"));
-    connector.tableOperations().addSplits(tableName, splits);
+    client.tableOperations().addSplits(tableName, splits);
     // now create another table using the splits from this table
-    Collection<Text> otherSplits = connector.tableOperations().listSplits(tableName);
+    Collection<Text> otherSplits = client.tableOperations().listSplits(tableName);
     assertEquals(splits, new TreeSet<>(otherSplits));
     String tableName2 = getUniqueNames(2)[1];
     NewTableConfiguration ntc2 = new NewTableConfiguration();
     ntc2.withSplits(new TreeSet<>(otherSplits));
-    assertFalse(connector.tableOperations().exists(tableName2));
-    connector.tableOperations().create(tableName2, ntc);
-    assertTrue(connector.tableOperations().exists(tableName2));
-    Collection<Text> createdSplits = connector.tableOperations().listSplits(tableName);
+    assertFalse(client.tableOperations().exists(tableName2));
+    client.tableOperations().create(tableName2, ntc);
+    assertTrue(client.tableOperations().exists(tableName2));
+    Collection<Text> createdSplits = client.tableOperations().listSplits(tableName);
     assertEquals(splits, new TreeSet<>(createdSplits));
   }
 
@@ -177,21 +177,21 @@ public class CreateInitialSplitsIT extends AccumuloClusterHarness {
     tableName = getUniqueNames(1)[0];
     SortedSet<Text> expectedSplits = generateNonBinarySplits(1000, 32);
     NewTableConfiguration ntc = new NewTableConfiguration().withSplits(expectedSplits);
-    assertFalse(connector.tableOperations().exists(tableName));
-    connector.tableOperations().create(tableName, ntc);
-    assertTrue(connector.tableOperations().exists(tableName));
+    assertFalse(client.tableOperations().exists(tableName));
+    client.tableOperations().create(tableName, ntc);
+    assertTrue(client.tableOperations().exists(tableName));
     // verify data
-    Collection<Text> createdSplits = connector.tableOperations().listSplits(tableName);
+    Collection<Text> createdSplits = client.tableOperations().listSplits(tableName);
     assertEquals(expectedSplits, new TreeSet<>(createdSplits));
-    connector.tableOperations().flush(tableName);
+    client.tableOperations().flush(tableName);
     // compact data
-    connector.tableOperations().compact(tableName, null, null, true, true);
+    client.tableOperations().compact(tableName, null, null, true, true);
     // verify data
-    createdSplits = connector.tableOperations().listSplits(tableName);
+    createdSplits = client.tableOperations().listSplits(tableName);
     assertEquals(expectedSplits, new TreeSet<>(createdSplits));
     // delete table
-    connector.tableOperations().delete(tableName);
-    assertFalse(connector.tableOperations().exists(tableName));
+    client.tableOperations().delete(tableName);
+    assertFalse(client.tableOperations().exists(tableName));
   }
 
   // @Test
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CredentialsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CredentialsIT.java
index b317855..d56f90b 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/CredentialsIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CredentialsIT.java
@@ -56,19 +56,19 @@ public class CredentialsIT extends AccumuloClusterHarness {
 
   @Before
   public void createLocalUser() throws AccumuloException, AccumuloSecurityException {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     ClusterUser user = getUser(0);
     username = user.getPrincipal();
     saslEnabled = saslEnabled();
     // Create the user if it doesn't exist
-    Set<String> users = conn.securityOperations().listLocalUsers();
+    Set<String> users = client.securityOperations().listLocalUsers();
     if (!users.contains(username)) {
       PasswordToken passwdToken = null;
       if (!saslEnabled) {
         password = user.getPassword();
         passwdToken = new PasswordToken(password);
       }
-      conn.securityOperations().createLocalUser(username, passwdToken);
+      client.securityOperations().createLocalUser(username, passwdToken);
     }
   }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
index 914fe22..7f4aa3c 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
@@ -67,16 +67,16 @@ public class DeleteRowsSplitIT extends AccumuloClusterHarness {
     // Delete ranges of rows, and verify the are removed
     // Do this while adding many splits
     final String tableName = getUniqueNames(1)[0];
-    final AccumuloClient conn = getAccumuloClient();
+    final AccumuloClient client = getAccumuloClient();
 
     // Eliminate whole tablets
     for (int test = 0; test < 10; test++) {
       // create a table
       log.info("Test {}", test);
-      conn.tableOperations().create(tableName);
+      client.tableOperations().create(tableName);
 
       // put some data in it
-      fillTable(conn, tableName);
+      fillTable(client, tableName);
 
       // generate a random delete range
       final Text start = new Text();
@@ -91,7 +91,7 @@ public class DeleteRowsSplitIT extends AccumuloClusterHarness {
           try {
             // split the table
             final SortedSet<Text> afterEnd = SPLITS.tailSet(new Text(end + "\0"));
-            conn.tableOperations().addSplits(tableName, afterEnd);
+            client.tableOperations().addSplits(tableName, afterEnd);
           } catch (Exception ex) {
             log.error("Exception", ex);
             synchronized (fail) {
@@ -104,7 +104,7 @@ public class DeleteRowsSplitIT extends AccumuloClusterHarness {
 
       sleepUninterruptibly(test * 2, TimeUnit.MILLISECONDS);
 
-      conn.tableOperations().deleteRows(tableName, start, end);
+      client.tableOperations().deleteRows(tableName, start, end);
 
       t.join();
       synchronized (fail) {
@@ -112,14 +112,14 @@ public class DeleteRowsSplitIT extends AccumuloClusterHarness {
       }
 
       // scan the table
-      try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
+      try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
         for (Entry<Key,Value> entry : scanner) {
           Text row = entry.getKey().getRow();
           assertTrue(row.compareTo(start) <= 0 || row.compareTo(end) > 0);
         }
 
         // delete the table
-        conn.tableOperations().delete(tableName);
+        client.tableOperations().delete(tableName);
       }
     }
   }
@@ -137,8 +137,8 @@ public class DeleteRowsSplitIT extends AccumuloClusterHarness {
 
   }
 
-  private void fillTable(AccumuloClient conn, String table) throws Exception {
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+  private void fillTable(AccumuloClient client, String table) throws Exception {
+    BatchWriter bw = client.createBatchWriter(table, new BatchWriterConfig());
     for (String row : ROWS) {
       Mutation m = new Mutation(row);
       m.put("cf", "cq", "value");
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java b/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
index c0523fc..05589fc 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
@@ -252,10 +252,10 @@ public class GarbageCollectorIT extends ConfigurableMacBase {
   @Test
   public void testProperPortAdvertisement() throws Exception {
 
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
 
     ZooReaderWriter zk = new ZooReaderWriter(cluster.getZooKeepers(), 30000, OUR_SECRET);
-    String path = ZooUtil.getRoot(conn.getInstanceID()) + Constants.ZGC_LOCK;
+    String path = ZooUtil.getRoot(client.getInstanceID()) + Constants.ZGC_LOCK;
     for (int i = 0; i < 5; i++) {
       List<String> locks;
       try {
@@ -300,10 +300,10 @@ public class GarbageCollectorIT extends ConfigurableMacBase {
     return Iterators.size(Arrays.asList(cluster.getFileSystem().globStatus(path)).iterator());
   }
 
-  public static void addEntries(AccumuloClient conn, BatchWriterOpts bwOpts) throws Exception {
-    conn.securityOperations().grantTablePermission(conn.whoami(), MetadataTable.NAME,
+  public static void addEntries(AccumuloClient client, BatchWriterOpts bwOpts) throws Exception {
+    client.securityOperations().grantTablePermission(client.whoami(), MetadataTable.NAME,
         TablePermission.WRITE);
-    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, bwOpts.getBatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(MetadataTable.NAME, bwOpts.getBatchWriterConfig());
 
     for (int i = 0; i < 100000; ++i) {
       final Text emptyText = new Text("");
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java b/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java
index 41aa12c..2fb8229 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java
@@ -162,18 +162,18 @@ public class KerberosIT extends AccumuloITBase {
     UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
         rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
     ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
-      final AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(),
+      final AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(),
           new KerberosToken());
 
       // The "root" user should have all system permissions
       for (SystemPermission perm : SystemPermission.values()) {
         assertTrue("Expected user to have permission: " + perm,
-            conn.securityOperations().hasSystemPermission(conn.whoami(), perm));
+            client.securityOperations().hasSystemPermission(client.whoami(), perm));
       }
 
       // and the ability to modify the root and metadata tables
       for (String table : Arrays.asList(RootTable.NAME, MetadataTable.NAME)) {
-        assertTrue(conn.securityOperations().hasTablePermission(conn.whoami(), table,
+        assertTrue(client.securityOperations().hasTablePermission(client.whoami(), table,
             TablePermission.ALTER_TABLE));
       }
       return null;
@@ -200,14 +200,14 @@ public class KerberosIT extends AccumuloITBase {
     log.info("Logged in as {}", rootUser.getPrincipal());
 
     ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
-      AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
-      log.info("Created connector as {}", rootUser.getPrincipal());
-      assertEquals(rootUser.getPrincipal(), conn.whoami());
+      AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
+      log.info("Created client as {}", rootUser.getPrincipal());
+      assertEquals(rootUser.getPrincipal(), client.whoami());
 
       // Make sure the system user doesn't exist -- this will force some RPC to happen server-side
-      createTableWithDataAndCompact(conn);
+      createTableWithDataAndCompact(client);
 
-      assertEquals(users, conn.securityOperations().listLocalUsers());
+      assertEquals(users, client.securityOperations().listLocalUsers());
 
       return null;
     });
@@ -216,19 +216,19 @@ public class KerberosIT extends AccumuloITBase {
         newUserKeytab.getAbsolutePath());
     log.info("Logged in as {}", newQualifiedUser);
     ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
-      AccumuloClient conn = mac.getAccumuloClient(newQualifiedUser, new KerberosToken());
-      log.info("Created connector as {}", newQualifiedUser);
-      assertEquals(newQualifiedUser, conn.whoami());
+      AccumuloClient client = mac.getAccumuloClient(newQualifiedUser, new KerberosToken());
+      log.info("Created client as {}", newQualifiedUser);
+      assertEquals(newQualifiedUser, client.whoami());
 
       // The new user should have no system permissions
       for (SystemPermission perm : SystemPermission.values()) {
-        assertFalse(conn.securityOperations().hasSystemPermission(newQualifiedUser, perm));
+        assertFalse(client.securityOperations().hasSystemPermission(newQualifiedUser, perm));
       }
 
       users.add(newQualifiedUser);
 
       // Same users as before, plus the new user we just created
-      assertEquals(users, conn.securityOperations().listLocalUsers());
+      assertEquals(users, client.securityOperations().listLocalUsers());
       return null;
     });
   }
@@ -252,12 +252,12 @@ public class KerberosIT extends AccumuloITBase {
     log.info("Logged in as {}", user1);
     ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
       // Indirectly creates this user when we use it
-      AccumuloClient conn = mac.getAccumuloClient(qualifiedUser1, new KerberosToken());
-      log.info("Created connector as {}", qualifiedUser1);
+      AccumuloClient client = mac.getAccumuloClient(qualifiedUser1, new KerberosToken());
+      log.info("Created client as {}", qualifiedUser1);
 
       // The new user should have no system permissions
       for (SystemPermission perm : SystemPermission.values()) {
-        assertFalse(conn.securityOperations().hasSystemPermission(qualifiedUser1, perm));
+        assertFalse(client.securityOperations().hasSystemPermission(qualifiedUser1, perm));
       }
 
       return null;
@@ -266,8 +266,8 @@ public class KerberosIT extends AccumuloITBase {
     ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(),
         rootUser.getKeytab().getAbsolutePath());
     ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
-      AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
-      conn.securityOperations().grantSystemPermission(qualifiedUser1,
+      AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
+      client.securityOperations().grantSystemPermission(qualifiedUser1,
           SystemPermission.CREATE_TABLE);
       return null;
     });
@@ -276,20 +276,20 @@ public class KerberosIT extends AccumuloITBase {
     ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(user1,
         user1Keytab.getAbsolutePath());
     ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
-      AccumuloClient conn = mac.getAccumuloClient(qualifiedUser1, new KerberosToken());
+      AccumuloClient client = mac.getAccumuloClient(qualifiedUser1, new KerberosToken());
 
       // Shouldn't throw an exception since we granted the create table permission
       final String table = testName.getMethodName() + "_user_table";
-      conn.tableOperations().create(table);
+      client.tableOperations().create(table);
 
       // Make sure we can actually use the table we made
-      BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+      BatchWriter bw = client.createBatchWriter(table, new BatchWriterConfig());
       Mutation m = new Mutation("a");
       m.put("b", "c", "d");
       bw.addMutation(m);
       bw.close();
 
-      conn.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
+      client.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
       return null;
     });
   }
@@ -313,12 +313,12 @@ public class KerberosIT extends AccumuloITBase {
     log.info("Logged in as {}", user1);
     ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
       // Indirectly creates this user when we use it
-      AccumuloClient conn = mac.getAccumuloClient(qualifiedUser1, new KerberosToken());
-      log.info("Created connector as {}", qualifiedUser1);
+      AccumuloClient client = mac.getAccumuloClient(qualifiedUser1, new KerberosToken());
+      log.info("Created client as {}", qualifiedUser1);
 
       // The new user should have no system permissions
       for (SystemPermission perm : SystemPermission.values()) {
-        assertFalse(conn.securityOperations().hasSystemPermission(qualifiedUser1, perm));
+        assertFalse(client.securityOperations().hasSystemPermission(qualifiedUser1, perm));
       }
       return null;
     });
@@ -330,16 +330,17 @@ public class KerberosIT extends AccumuloITBase {
         rootUser.getKeytab().getAbsolutePath());
 
     ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
-      AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
-      conn.tableOperations().create(table);
+      AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
+      client.tableOperations().create(table);
       // Give our unprivileged user permission on the table we made for them
-      conn.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.READ);
-      conn.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.WRITE);
-      conn.securityOperations().grantTablePermission(qualifiedUser1, table,
+      client.securityOperations().grantTablePermission(qualifiedUser1, table, TablePermission.READ);
+      client.securityOperations().grantTablePermission(qualifiedUser1, table,
+          TablePermission.WRITE);
+      client.securityOperations().grantTablePermission(qualifiedUser1, table,
           TablePermission.ALTER_TABLE);
-      conn.securityOperations().grantTablePermission(qualifiedUser1, table,
+      client.securityOperations().grantTablePermission(qualifiedUser1, table,
           TablePermission.DROP_TABLE);
-      conn.securityOperations().changeUserAuthorizations(qualifiedUser1, new Authorizations(viz));
+      client.securityOperations().changeUserAuthorizations(qualifiedUser1, new Authorizations(viz));
       return null;
     });
 
@@ -347,26 +348,26 @@ public class KerberosIT extends AccumuloITBase {
     ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(qualifiedUser1,
         user1Keytab.getAbsolutePath());
     ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
-      AccumuloClient conn = mac.getAccumuloClient(qualifiedUser1, new KerberosToken());
+      AccumuloClient client = mac.getAccumuloClient(qualifiedUser1, new KerberosToken());
 
       // Make sure we can actually use the table we made
 
       // Write data
       final long ts = 1000L;
-      BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+      BatchWriter bw = client.createBatchWriter(table, new BatchWriterConfig());
       Mutation m = new Mutation("a");
       m.put("b", "c", new ColumnVisibility(viz.getBytes()), ts, "d");
       bw.addMutation(m);
       bw.close();
 
       // Compact
-      conn.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
+      client.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
 
       // Alter
-      conn.tableOperations().setProperty(table, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
+      client.tableOperations().setProperty(table, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
 
       // Read (and proper authorizations)
-      try (Scanner s = conn.createScanner(table, new Authorizations(viz))) {
+      try (Scanner s = client.createScanner(table, new Authorizations(viz))) {
         Iterator<Entry<Key,Value>> iter = s.iterator();
         assertTrue("No results from iterator", iter.hasNext());
         Entry<Key,Value> entry = iter.next();
@@ -392,12 +393,13 @@ public class KerberosIT extends AccumuloITBase {
     // As the "root" user, open up the connection and get a delegation token
     final AuthenticationToken delegationToken = root
         .doAs((PrivilegedExceptionAction<AuthenticationToken>) () -> {
-          AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
-          log.info("Created connector as {}", rootUser.getPrincipal());
-          assertEquals(rootUser.getPrincipal(), conn.whoami());
+          AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(),
+              new KerberosToken());
+          log.info("Created client as {}", rootUser.getPrincipal());
+          assertEquals(rootUser.getPrincipal(), client.whoami());
 
-          conn.tableOperations().create(tableName);
-          BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+          client.tableOperations().create(tableName);
+          BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
           for (int r = 0; r < numRows; r++) {
             Mutation m = new Mutation(Integer.toString(r));
             for (int c = 0; c < numColumns; c++) {
@@ -408,7 +410,7 @@ public class KerberosIT extends AccumuloITBase {
           }
           bw.close();
 
-          return conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+          return client.securityOperations().getDelegationToken(new DelegationTokenConfig());
         });
 
     // The above login with keytab doesn't have a way to logout, so make a fake user that won't have
@@ -416,9 +418,9 @@ public class KerberosIT extends AccumuloITBase {
     UserGroupInformation userWithoutPrivs = UserGroupInformation.createUserForTesting("fake_user",
         new String[0]);
     int recordsSeen = userWithoutPrivs.doAs((PrivilegedExceptionAction<Integer>) () -> {
-      AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(), delegationToken);
+      AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(), delegationToken);
 
-      try (BatchScanner bs = conn.createBatchScanner(tableName, Authorizations.EMPTY, 2)) {
+      try (BatchScanner bs = client.createBatchScanner(tableName, Authorizations.EMPTY, 2)) {
         bs.setRanges(Collections.singleton(new Range()));
         return Iterables.size(bs);
       }
@@ -438,10 +440,10 @@ public class KerberosIT extends AccumuloITBase {
     try {
       delegationToken = ugi.doAs((PrivilegedExceptionAction<AuthenticationToken>) () -> {
         // As the "root" user, open up the connection and get a delegation token
-        AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
-        log.info("Created connector as {}", rootUser.getPrincipal());
-        assertEquals(rootUser.getPrincipal(), conn.whoami());
-        return conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+        AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
+        log.info("Created client as {}", rootUser.getPrincipal());
+        assertEquals(rootUser.getPrincipal(), client.whoami());
+        return client.securityOperations().getDelegationToken(new DelegationTokenConfig());
       });
     } catch (UndeclaredThrowableException ex) {
       throw ex;
@@ -486,11 +488,11 @@ public class KerberosIT extends AccumuloITBase {
     try {
       ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
         // As the "root" user, open up the connection and get a delegation token
-        AccumuloClient conn = mac.getAccumuloClient(qualifiedNewUser, new KerberosToken());
-        log.info("Created connector as {}", qualifiedNewUser);
-        assertEquals(qualifiedNewUser, conn.whoami());
+        AccumuloClient client = mac.getAccumuloClient(qualifiedNewUser, new KerberosToken());
+        log.info("Created client as {}", qualifiedNewUser);
+        assertEquals(qualifiedNewUser, client.whoami());
 
-        conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+        client.securityOperations().getDelegationToken(new DelegationTokenConfig());
         return null;
       });
     } catch (UndeclaredThrowableException ex) {
@@ -508,11 +510,12 @@ public class KerberosIT extends AccumuloITBase {
     // As the "root" user, open up the connection and get a delegation token
     final AuthenticationToken delegationToken1 = root
         .doAs((PrivilegedExceptionAction<AuthenticationToken>) () -> {
-          AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
-          log.info("Created connector as {}", rootUser.getPrincipal());
-          assertEquals(rootUser.getPrincipal(), conn.whoami());
+          AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(),
+              new KerberosToken());
+          log.info("Created client as {}", rootUser.getPrincipal());
+          assertEquals(rootUser.getPrincipal(), client.whoami());
 
-          AuthenticationToken token = conn.securityOperations()
+          AuthenticationToken token = client.securityOperations()
               .getDelegationToken(new DelegationTokenConfig());
 
           assertTrue("Could not get tables with delegation token",
@@ -530,10 +533,10 @@ public class KerberosIT extends AccumuloITBase {
 
     // Make sure our original token is still good
     root.doAs((PrivilegedExceptionAction<Void>) () -> {
-      AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(), delegationToken1);
+      AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(), delegationToken1);
 
       assertTrue("Could not get tables with delegation token",
-          conn.tableOperations().list().size() > 0);
+          client.tableOperations().list().size() > 0);
 
       return null;
     });
@@ -541,11 +544,12 @@ public class KerberosIT extends AccumuloITBase {
     // Get a new token, so we can compare the keyId on the second to the first
     final AuthenticationToken delegationToken2 = root
         .doAs((PrivilegedExceptionAction<AuthenticationToken>) () -> {
-          AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
-          log.info("Created connector as {}", rootUser.getPrincipal());
-          assertEquals(rootUser.getPrincipal(), conn.whoami());
+          AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(),
+              new KerberosToken());
+          log.info("Created client as {}", rootUser.getPrincipal());
+          assertEquals(rootUser.getPrincipal(), client.whoami());
 
-          AuthenticationToken token = conn.securityOperations()
+          AuthenticationToken token = client.securityOperations()
               .getDelegationToken(new DelegationTokenConfig());
 
           assertTrue("Could not get tables with delegation token",
@@ -572,12 +576,12 @@ public class KerberosIT extends AccumuloITBase {
     // As the "root" user, open up the connection and get a delegation token
     try {
       root.doAs((PrivilegedExceptionAction<AuthenticationToken>) () -> {
-        AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
-        log.info("Created connector as {}", rootUser.getPrincipal());
-        assertEquals(rootUser.getPrincipal(), conn.whoami());
+        AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
+        log.info("Created client as {}", rootUser.getPrincipal());
+        assertEquals(rootUser.getPrincipal(), client.whoami());
 
         // Should fail
-        return conn.securityOperations().getDelegationToken(
+        return client.securityOperations().getDelegationToken(
             new DelegationTokenConfig().setTokenLifetime(Long.MAX_VALUE, TimeUnit.MILLISECONDS));
       });
     } catch (UndeclaredThrowableException e) {
@@ -600,11 +604,12 @@ public class KerberosIT extends AccumuloITBase {
     // As the "root" user, open up the connection and get a delegation token
     final AuthenticationToken dt = root
         .doAs((PrivilegedExceptionAction<AuthenticationToken>) () -> {
-          AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
-          log.info("Created connector as {}", rootUser.getPrincipal());
-          assertEquals(rootUser.getPrincipal(), conn.whoami());
+          AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(),
+              new KerberosToken());
+          log.info("Created client as {}", rootUser.getPrincipal());
+          assertEquals(rootUser.getPrincipal(), client.whoami());
 
-          return conn.securityOperations().getDelegationToken(
+          return client.securityOperations().getDelegationToken(
               new DelegationTokenConfig().setTokenLifetime(5, TimeUnit.MINUTES));
         });
 
@@ -619,12 +624,13 @@ public class KerberosIT extends AccumuloITBase {
     UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
         rootUser.getKeytab().getAbsolutePath());
 
-    final AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
+    final AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(),
+        new KerberosToken());
 
     // The server-side implementation should prevent the revocation of the 'root' user's systems
     // permissions
     // because once they're gone, it's possible that they could never be restored.
-    conn.securityOperations().revokeSystemPermission(rootUser.getPrincipal(),
+    client.securityOperations().revokeSystemPermission(rootUser.getPrincipal(),
         SystemPermission.GRANT);
   }
 
@@ -633,15 +639,15 @@ public class KerberosIT extends AccumuloITBase {
    * that the system user exists (since the master does an RPC to the tserver which will create the
    * system user if it doesn't already exist).
    */
-  private void createTableWithDataAndCompact(AccumuloClient conn) throws TableNotFoundException,
+  private void createTableWithDataAndCompact(AccumuloClient client) throws TableNotFoundException,
       AccumuloSecurityException, AccumuloException, TableExistsException {
     final String table = testName.getMethodName() + "_table";
-    conn.tableOperations().create(table);
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    client.tableOperations().create(table);
+    BatchWriter bw = client.createBatchWriter(table, new BatchWriterConfig());
     Mutation m = new Mutation("a");
     m.put("b", "c", "d");
     bw.addMutation(m);
     bw.close();
-    conn.tableOperations().compact(table, new CompactionConfig().setFlush(true).setWait(true));
+    client.tableOperations().compact(table, new CompactionConfig().setFlush(true).setWait(true));
   }
 }
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java b/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
index eb95ad2..23c288d 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
@@ -540,21 +540,21 @@ public class KerberosProxyIT extends AccumuloITBase {
 
     // Create a table and user, grant permission to our user to read that table.
     rootUgi.doAs((PrivilegedExceptionAction<Void>) () -> {
-      AccumuloClient conn = mac.getAccumuloClient(rootUgi.getUserName(), new KerberosToken());
-      conn.tableOperations().create(tableName);
-      conn.securityOperations().createLocalUser(userWithoutCredentials1,
+      AccumuloClient client = mac.getAccumuloClient(rootUgi.getUserName(), new KerberosToken());
+      client.tableOperations().create(tableName);
+      client.securityOperations().createLocalUser(userWithoutCredentials1,
           new PasswordToken("ignored"));
-      conn.securityOperations().grantTablePermission(userWithoutCredentials1, tableName,
+      client.securityOperations().grantTablePermission(userWithoutCredentials1, tableName,
           TablePermission.READ);
-      conn.securityOperations().createLocalUser(userWithoutCredentials3,
+      client.securityOperations().createLocalUser(userWithoutCredentials3,
           new PasswordToken("ignored"));
-      conn.securityOperations().grantTablePermission(userWithoutCredentials3, tableName,
+      client.securityOperations().grantTablePermission(userWithoutCredentials3, tableName,
           TablePermission.READ);
       return null;
     });
     realUgi.doAs((PrivilegedExceptionAction<Void>) () -> {
-      AccumuloClient conn = mac.getAccumuloClient(proxyPrincipal, new KerberosToken());
-      try (Scanner s = conn.createScanner(tableName, Authorizations.EMPTY)) {
+      AccumuloClient client = mac.getAccumuloClient(proxyPrincipal, new KerberosToken());
+      try (Scanner s = client.createScanner(tableName, Authorizations.EMPTY)) {
         s.iterator().hasNext();
         fail("Expected to see an exception");
       } catch (RuntimeException e) {
@@ -568,17 +568,17 @@ public class KerberosProxyIT extends AccumuloITBase {
     });
     // Allowed to be proxied and has read permission
     proxyUser1.doAs((PrivilegedExceptionAction<Void>) () -> {
-      AccumuloClient conn = mac.getAccumuloClient(userWithoutCredentials1,
+      AccumuloClient client = mac.getAccumuloClient(userWithoutCredentials1,
           new KerberosToken(userWithoutCredentials1));
-      Scanner s = conn.createScanner(tableName, Authorizations.EMPTY);
+      Scanner s = client.createScanner(tableName, Authorizations.EMPTY);
       assertFalse(s.iterator().hasNext());
       return null;
     });
     // Allowed to be proxied but does not have read permission
     proxyUser2.doAs((PrivilegedExceptionAction<Void>) () -> {
-      AccumuloClient conn = mac.getAccumuloClient(userWithoutCredentials2,
+      AccumuloClient client = mac.getAccumuloClient(userWithoutCredentials2,
           new KerberosToken(userWithoutCredentials3));
-      try (Scanner s = conn.createScanner(tableName, Authorizations.EMPTY)) {
+      try (Scanner s = client.createScanner(tableName, Authorizations.EMPTY)) {
         s.iterator().hasNext();
         fail("Expected to see an exception");
       } catch (RuntimeException e) {
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/KerberosRenewalIT.java b/test/src/main/java/org/apache/accumulo/test/functional/KerberosRenewalIT.java
index f0f5449..1cb2b26 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/KerberosRenewalIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/KerberosRenewalIT.java
@@ -155,16 +155,16 @@ public class KerberosRenewalIT extends AccumuloITBase {
         rootUser.getKeytab().getAbsolutePath());
     log.info("Logged in as {}", rootUser.getPrincipal());
 
-    AccumuloClient conn = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
-    log.info("Created connector as {}", rootUser.getPrincipal());
-    assertEquals(rootUser.getPrincipal(), conn.whoami());
+    AccumuloClient client = mac.getAccumuloClient(rootUser.getPrincipal(), new KerberosToken());
+    log.info("Created client as {}", rootUser.getPrincipal());
+    assertEquals(rootUser.getPrincipal(), client.whoami());
 
     long duration = 0;
     long last = System.currentTimeMillis();
     // Make sure we have a couple renewals happen
     while (duration < TICKET_TEST_LIFETIME) {
       // Create a table, write a record, compact, read the record, drop the table.
-      createReadWriteDrop(conn);
+      createReadWriteDrop(client);
       // Wait a bit after
       Thread.sleep(5000);
 
@@ -180,22 +180,22 @@ public class KerberosRenewalIT extends AccumuloITBase {
    * that the system user exists (since the master does an RPC to the tserver which will create the
    * system user if it doesn't already exist).
    */
-  private void createReadWriteDrop(AccumuloClient conn) throws TableNotFoundException,
+  private void createReadWriteDrop(AccumuloClient client) throws TableNotFoundException,
       AccumuloSecurityException, AccumuloException, TableExistsException {
     final String table = testName.getMethodName() + "_table";
-    conn.tableOperations().create(table);
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    client.tableOperations().create(table);
+    BatchWriter bw = client.createBatchWriter(table, new BatchWriterConfig());
     Mutation m = new Mutation("a");
     m.put("b", "c", "d");
     bw.addMutation(m);
     bw.close();
-    conn.tableOperations().compact(table, new CompactionConfig().setFlush(true).setWait(true));
-    try (Scanner s = conn.createScanner(table, Authorizations.EMPTY)) {
+    client.tableOperations().compact(table, new CompactionConfig().setFlush(true).setWait(true));
+    try (Scanner s = client.createScanner(table, Authorizations.EMPTY)) {
       Entry<Key,Value> entry = Iterables.getOnlyElement(s);
       assertEquals("Did not find the expected key", 0,
           new Key("a", "b", "c").compareTo(entry.getKey(), PartialKey.ROW_COLFAM_COLQUAL));
       assertEquals("d", entry.getValue().toString());
-      conn.tableOperations().delete(table);
+      client.tableOperations().delete(table);
     }
   }
 }
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java b/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
index 1577f98..b9f0cf9 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
@@ -101,8 +101,8 @@ public class LargeRowIT extends AccumuloClusterHarness {
   @After
   public void resetMajcDelay() throws Exception {
     if (null != tservMajcDelay) {
-      AccumuloClient conn = getAccumuloClient();
-      conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), tservMajcDelay);
+      AccumuloClient client = getAccumuloClient();
+      client.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), tservMajcDelay);
     }
   }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java b/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
index 7428497..86b5ebf 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
@@ -87,17 +87,18 @@ public class LogicalTimeIT extends AccumuloClusterHarness {
 
   }
 
-  private void runMergeTest(AccumuloClient conn, String table, String[] splits, String[] inserts,
+  private void runMergeTest(AccumuloClient client, String table, String[] splits, String[] inserts,
       String start, String end, String last, long expected) throws Exception {
     log.info("table {}", table);
-    conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
+    client.tableOperations().create(table,
+        new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
     TreeSet<Text> splitSet = new TreeSet<>();
     for (String split : splits) {
       splitSet.add(new Text(split));
     }
-    conn.tableOperations().addSplits(table, splitSet);
+    client.tableOperations().addSplits(table, splitSet);
 
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(table, new BatchWriterConfig());
     for (String row : inserts) {
       Mutation m = new Mutation(row);
       m.put("cf", "cq", "v");
@@ -106,7 +107,7 @@ public class LogicalTimeIT extends AccumuloClusterHarness {
 
     bw.flush();
 
-    conn.tableOperations().merge(table, start == null ? null : new Text(start),
+    client.tableOperations().merge(table, start == null ? null : new Text(start),
         end == null ? null : new Text(end));
 
     Mutation m = new Mutation(last);
@@ -114,7 +115,7 @@ public class LogicalTimeIT extends AccumuloClusterHarness {
     bw.addMutation(m);
     bw.flush();
 
-    try (Scanner scanner = conn.createScanner(table, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
       scanner.setRange(new Range(last));
 
       bw.close();
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
index c781fba..a2eb76e 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
@@ -156,19 +156,20 @@ public class MergeIT extends AccumuloClusterHarness {
     }
   }
 
-  private void runMergeTest(AccumuloClient conn, String table, String[] splits,
+  private void runMergeTest(AccumuloClient client, String table, String[] splits,
       String[] expectedSplits, String[] inserts, String start, String end) throws Exception {
     System.out.println(
         "Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);
 
-    conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
+    client.tableOperations().create(table,
+        new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
     TreeSet<Text> splitSet = new TreeSet<>();
     for (String split : splits) {
       splitSet.add(new Text(split));
     }
-    conn.tableOperations().addSplits(table, splitSet);
+    client.tableOperations().addSplits(table, splitSet);
 
-    BatchWriter bw = conn.createBatchWriter(table, null);
+    BatchWriter bw = client.createBatchWriter(table, null);
     HashSet<String> expected = new HashSet<>();
     for (String row : inserts) {
       Mutation m = new Mutation(row);
@@ -179,10 +180,10 @@ public class MergeIT extends AccumuloClusterHarness {
 
     bw.close();
 
-    conn.tableOperations().merge(table, start == null ? null : new Text(start),
+    client.tableOperations().merge(table, start == null ? null : new Text(start),
         end == null ? null : new Text(end));
 
-    try (Scanner scanner = conn.createScanner(table, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
 
       HashSet<String> observed = new HashSet<>();
       for (Entry<Key,Value> entry : scanner) {
@@ -196,7 +197,7 @@ public class MergeIT extends AccumuloClusterHarness {
         throw new Exception("data inconsistency " + table + " " + observed + " != " + expected);
       }
 
-      HashSet<Text> currentSplits = new HashSet<>(conn.tableOperations().listSplits(table));
+      HashSet<Text> currentSplits = new HashSet<>(client.tableOperations().listSplits(table));
       HashSet<Text> ess = new HashSet<>();
       for (String es : expectedSplits) {
         ess.add(new Text(es));
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java
index 72bc7c7..31f2750 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java
@@ -108,7 +108,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
     loginAs(rootUser);
     c.securityOperations().createLocalUser(principal, passwordToken);
     loginAs(testUser);
-    AccumuloClient test_user_conn = c.changeUser(principal, token);
+    AccumuloClient test_user_client = c.changeUser(principal, token);
     loginAs(rootUser);
     verifyHasNoSystemPermissions(c, principal, SystemPermission.values());
 
@@ -118,11 +118,11 @@ public class PermissionsIT extends AccumuloClusterHarness {
 
       // test permission before and after granting it
       String tableNamePrefix = getUniqueNames(1)[0];
-      testMissingSystemPermission(tableNamePrefix, c, rootUser, test_user_conn, testUser, perm);
+      testMissingSystemPermission(tableNamePrefix, c, rootUser, test_user_client, testUser, perm);
       loginAs(rootUser);
       c.securityOperations().grantSystemPermission(principal, perm);
       verifyHasOnlyTheseSystemPermissions(c, principal, perm);
-      testGrantedSystemPermission(tableNamePrefix, c, rootUser, test_user_conn, testUser, perm);
+      testGrantedSystemPermission(tableNamePrefix, c, rootUser, test_user_client, testUser, perm);
       loginAs(rootUser);
       c.securityOperations().revokeSystemPermission(principal, perm);
       verifyHasNoSystemPermissions(c, principal, perm);
@@ -137,8 +137,8 @@ public class PermissionsIT extends AccumuloClusterHarness {
     return result;
   }
 
-  private void testMissingSystemPermission(String tableNamePrefix, AccumuloClient root_conn,
-      ClusterUser rootUser, AccumuloClient test_user_conn, ClusterUser testUser,
+  private void testMissingSystemPermission(String tableNamePrefix, AccumuloClient root_client,
+      ClusterUser rootUser, AccumuloClient test_user_client, ClusterUser testUser,
       SystemPermission perm) throws Exception {
     String tableName, user, password = "password", namespace;
     boolean passwordBased = testUser.getPassword() != null;
@@ -150,71 +150,71 @@ public class PermissionsIT extends AccumuloClusterHarness {
         tableName = tableNamePrefix + "__CREATE_TABLE_WITHOUT_PERM_TEST__";
         try {
           loginAs(testUser);
-          test_user_conn.tableOperations().create(tableName);
+          test_user_client.tableOperations().create(tableName);
           throw new IllegalStateException("Should NOT be able to create a table");
         } catch (AccumuloSecurityException e) {
           loginAs(rootUser);
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || root_conn.tableOperations().list().contains(tableName))
+              || root_client.tableOperations().list().contains(tableName))
             throw e;
         }
         break;
       case DROP_TABLE:
         tableName = tableNamePrefix + "__DROP_TABLE_WITHOUT_PERM_TEST__";
         loginAs(rootUser);
-        root_conn.tableOperations().create(tableName);
+        root_client.tableOperations().create(tableName);
         try {
           loginAs(testUser);
-          test_user_conn.tableOperations().delete(tableName);
+          test_user_client.tableOperations().delete(tableName);
           throw new IllegalStateException("Should NOT be able to delete a table");
         } catch (AccumuloSecurityException e) {
           loginAs(rootUser);
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || !root_conn.tableOperations().list().contains(tableName))
+              || !root_client.tableOperations().list().contains(tableName))
             throw e;
         }
         break;
       case ALTER_TABLE:
         tableName = tableNamePrefix + "__ALTER_TABLE_WITHOUT_PERM_TEST__";
         loginAs(rootUser);
-        root_conn.tableOperations().create(tableName);
+        root_client.tableOperations().create(tableName);
         try {
           loginAs(testUser);
-          test_user_conn.tableOperations().setProperty(tableName,
+          test_user_client.tableOperations().setProperty(tableName,
               Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
           throw new IllegalStateException("Should NOT be able to set a table property");
         } catch (AccumuloSecurityException e) {
           loginAs(rootUser);
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || map(root_conn.tableOperations().getProperties(tableName))
+              || map(root_client.tableOperations().getProperties(tableName))
                   .get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
             throw e;
         }
         loginAs(rootUser);
-        root_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(),
-            "003.14159%");
+        root_client.tableOperations().setProperty(tableName,
+            Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
         try {
           loginAs(testUser);
-          test_user_conn.tableOperations().removeProperty(tableName,
+          test_user_client.tableOperations().removeProperty(tableName,
               Property.TABLE_BLOOM_ERRORRATE.getKey());
           throw new IllegalStateException("Should NOT be able to remove a table property");
         } catch (AccumuloSecurityException e) {
           loginAs(rootUser);
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || !map(root_conn.tableOperations().getProperties(tableName))
+              || !map(root_client.tableOperations().getProperties(tableName))
                   .get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
             throw e;
         }
         String table2 = tableName + "2";
         try {
           loginAs(testUser);
-          test_user_conn.tableOperations().rename(tableName, table2);
+          test_user_client.tableOperations().rename(tableName, table2);
           throw new IllegalStateException("Should NOT be able to rename a table");
         } catch (AccumuloSecurityException e) {
           loginAs(rootUser);
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || !root_conn.tableOperations().list().contains(tableName)
-              || root_conn.tableOperations().list().contains(table2))
+              || !root_client.tableOperations().list().contains(tableName)
+              || root_client.tableOperations().list().contains(table2))
             throw e;
         }
         break;
@@ -222,7 +222,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
         user = "__CREATE_USER_WITHOUT_PERM_TEST__";
         try {
           loginAs(testUser);
-          test_user_conn.securityOperations().createLocalUser(user,
+          test_user_client.securityOperations().createLocalUser(user,
               (passwordBased ? new PasswordToken(password) : null));
           throw new IllegalStateException("Should NOT be able to create a user");
         } catch (AccumuloSecurityException e) {
@@ -230,23 +230,23 @@ public class PermissionsIT extends AccumuloClusterHarness {
           loginAs(rootUser);
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
               || (userToken instanceof PasswordToken
-                  && root_conn.securityOperations().authenticateUser(user, userToken)))
+                  && root_client.securityOperations().authenticateUser(user, userToken)))
             throw e;
         }
         break;
       case DROP_USER:
         user = "__DROP_USER_WITHOUT_PERM_TEST__";
         loginAs(rootUser);
-        root_conn.securityOperations().createLocalUser(user,
+        root_client.securityOperations().createLocalUser(user,
             (passwordBased ? new PasswordToken(password) : null));
         try {
           loginAs(testUser);
-          test_user_conn.securityOperations().dropLocalUser(user);
+          test_user_client.securityOperations().dropLocalUser(user);
           throw new IllegalStateException("Should NOT be able to delete a user");
         } catch (AccumuloSecurityException e) {
           loginAs(rootUser);
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || !root_conn.securityOperations().listLocalUsers().contains(user)) {
+              || !root_client.securityOperations().listLocalUsers().contains(user)) {
             log.info("Failed to authenticate as {}", user);
             throw e;
           }
@@ -255,17 +255,17 @@ public class PermissionsIT extends AccumuloClusterHarness {
       case ALTER_USER:
         user = "__ALTER_USER_WITHOUT_PERM_TEST__";
         loginAs(rootUser);
-        root_conn.securityOperations().createLocalUser(user,
+        root_client.securityOperations().createLocalUser(user,
             (passwordBased ? new PasswordToken(password) : null));
         try {
           loginAs(testUser);
-          test_user_conn.securityOperations().changeUserAuthorizations(user,
+          test_user_client.securityOperations().changeUserAuthorizations(user,
               new Authorizations("A", "B"));
           throw new IllegalStateException("Should NOT be able to alter a user");
         } catch (AccumuloSecurityException e) {
           loginAs(rootUser);
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || !root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
+              || !root_client.securityOperations().getUserAuthorizations(user).isEmpty())
             throw e;
         }
         break;
@@ -276,71 +276,71 @@ public class PermissionsIT extends AccumuloClusterHarness {
         namespace = "__CREATE_NAMESPACE_WITHOUT_PERM_TEST__";
         try {
           loginAs(testUser);
-          test_user_conn.namespaceOperations().create(namespace);
+          test_user_client.namespaceOperations().create(namespace);
           throw new IllegalStateException("Should NOT be able to create a namespace");
         } catch (AccumuloSecurityException e) {
           loginAs(rootUser);
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || root_conn.namespaceOperations().list().contains(namespace))
+              || root_client.namespaceOperations().list().contains(namespace))
             throw e;
         }
         break;
       case DROP_NAMESPACE:
         namespace = "__DROP_NAMESPACE_WITHOUT_PERM_TEST__";
         loginAs(rootUser);
-        root_conn.namespaceOperations().create(namespace);
+        root_client.namespaceOperations().create(namespace);
         try {
           loginAs(testUser);
-          test_user_conn.namespaceOperations().delete(namespace);
+          test_user_client.namespaceOperations().delete(namespace);
           throw new IllegalStateException("Should NOT be able to delete a namespace");
         } catch (AccumuloSecurityException e) {
           loginAs(rootUser);
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || !root_conn.namespaceOperations().list().contains(namespace))
+              || !root_client.namespaceOperations().list().contains(namespace))
             throw e;
         }
         break;
       case ALTER_NAMESPACE:
         namespace = "__ALTER_NAMESPACE_WITHOUT_PERM_TEST__";
         loginAs(rootUser);
-        root_conn.namespaceOperations().create(namespace);
+        root_client.namespaceOperations().create(namespace);
         try {
           loginAs(testUser);
-          test_user_conn.namespaceOperations().setProperty(namespace,
+          test_user_client.namespaceOperations().setProperty(namespace,
               Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
           throw new IllegalStateException("Should NOT be able to set a namespace property");
         } catch (AccumuloSecurityException e) {
           loginAs(rootUser);
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || map(root_conn.namespaceOperations().getProperties(namespace))
+              || map(root_client.namespaceOperations().getProperties(namespace))
                   .get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
             throw e;
         }
         loginAs(rootUser);
-        root_conn.namespaceOperations().setProperty(namespace,
+        root_client.namespaceOperations().setProperty(namespace,
             Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
         try {
           loginAs(testUser);
-          test_user_conn.namespaceOperations().removeProperty(namespace,
+          test_user_client.namespaceOperations().removeProperty(namespace,
               Property.TABLE_BLOOM_ERRORRATE.getKey());
           throw new IllegalStateException("Should NOT be able to remove a namespace property");
         } catch (AccumuloSecurityException e) {
           loginAs(rootUser);
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || !map(root_conn.namespaceOperations().getProperties(namespace))
+              || !map(root_client.namespaceOperations().getProperties(namespace))
                   .get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
             throw e;
         }
         String namespace2 = namespace + "2";
         try {
           loginAs(testUser);
-          test_user_conn.namespaceOperations().rename(namespace, namespace2);
+          test_user_client.namespaceOperations().rename(namespace, namespace2);
           throw new IllegalStateException("Should NOT be able to rename a namespace");
         } catch (AccumuloSecurityException e) {
           loginAs(rootUser);
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-              || !root_conn.namespaceOperations().list().contains(namespace)
-              || root_conn.namespaceOperations().list().contains(namespace2))
+              || !root_client.namespaceOperations().list().contains(namespace)
+              || root_client.namespaceOperations().list().contains(namespace2))
             throw e;
         }
         break;
@@ -352,13 +352,13 @@ public class PermissionsIT extends AccumuloClusterHarness {
       case GRANT:
         loginAs(testUser);
         try {
-          test_user_conn.securityOperations().grantSystemPermission(testUser.getPrincipal(),
+          test_user_client.securityOperations().grantSystemPermission(testUser.getPrincipal(),
               SystemPermission.GRANT);
           throw new IllegalStateException("Should NOT be able to grant System.GRANT to yourself");
         } catch (AccumuloSecurityException e) {
           // Expected
           loginAs(rootUser);
-          assertFalse(root_conn.securityOperations().hasSystemPermission(testUser.getPrincipal(),
+          assertFalse(root_client.securityOperations().hasSystemPermission(testUser.getPrincipal(),
               SystemPermission.GRANT));
         }
         break;
@@ -367,8 +367,8 @@ public class PermissionsIT extends AccumuloClusterHarness {
     }
   }
 
-  private void testGrantedSystemPermission(String tableNamePrefix, AccumuloClient root_conn,
-      ClusterUser rootUser, AccumuloClient test_user_conn, ClusterUser testUser,
+  private void testGrantedSystemPermission(String tableNamePrefix, AccumuloClient root_client,
+      ClusterUser rootUser, AccumuloClient test_user_client, ClusterUser testUser,
       SystemPermission perm) throws Exception {
     String tableName, user, password = "password", namespace;
     boolean passwordBased = testUser.getPassword() != null;
@@ -379,79 +379,79 @@ public class PermissionsIT extends AccumuloClusterHarness {
       case CREATE_TABLE:
         tableName = tableNamePrefix + "__CREATE_TABLE_WITH_PERM_TEST__";
         loginAs(testUser);
-        test_user_conn.tableOperations().create(tableName);
+        test_user_client.tableOperations().create(tableName);
         loginAs(rootUser);
-        if (!root_conn.tableOperations().list().contains(tableName))
+        if (!root_client.tableOperations().list().contains(tableName))
           throw new IllegalStateException("Should be able to create a table");
         break;
       case DROP_TABLE:
         tableName = tableNamePrefix + "__DROP_TABLE_WITH_PERM_TEST__";
         loginAs(rootUser);
-        root_conn.tableOperations().create(tableName);
+        root_client.tableOperations().create(tableName);
         loginAs(testUser);
-        test_user_conn.tableOperations().delete(tableName);
+        test_user_client.tableOperations().delete(tableName);
         loginAs(rootUser);
-        if (root_conn.tableOperations().list().contains(tableName))
+        if (root_client.tableOperations().list().contains(tableName))
           throw new IllegalStateException("Should be able to delete a table");
         break;
       case ALTER_TABLE:
         tableName = tableNamePrefix + "__ALTER_TABLE_WITH_PERM_TEST__";
         String table2 = tableName + "2";
         loginAs(rootUser);
-        root_conn.tableOperations().create(tableName);
+        root_client.tableOperations().create(tableName);
         loginAs(testUser);
-        test_user_conn.tableOperations().setProperty(tableName,
+        test_user_client.tableOperations().setProperty(tableName,
             Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
         loginAs(rootUser);
-        Map<String,String> properties = map(root_conn.tableOperations().getProperties(tableName));
+        Map<String,String> properties = map(root_client.tableOperations().getProperties(tableName));
         if (!properties.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
           throw new IllegalStateException("Should be able to set a table property");
         loginAs(testUser);
-        test_user_conn.tableOperations().removeProperty(tableName,
+        test_user_client.tableOperations().removeProperty(tableName,
             Property.TABLE_BLOOM_ERRORRATE.getKey());
         loginAs(rootUser);
-        properties = map(root_conn.tableOperations().getProperties(tableName));
+        properties = map(root_client.tableOperations().getProperties(tableName));
         if (properties.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
           throw new IllegalStateException("Should be able to remove a table property");
         loginAs(testUser);
-        test_user_conn.tableOperations().rename(tableName, table2);
+        test_user_client.tableOperations().rename(tableName, table2);
         loginAs(rootUser);
-        if (root_conn.tableOperations().list().contains(tableName)
-            || !root_conn.tableOperations().list().contains(table2))
+        if (root_client.tableOperations().list().contains(tableName)
+            || !root_client.tableOperations().list().contains(table2))
           throw new IllegalStateException("Should be able to rename a table");
         break;
       case CREATE_USER:
         user = "__CREATE_USER_WITH_PERM_TEST__";
         loginAs(testUser);
-        test_user_conn.securityOperations().createLocalUser(user,
+        test_user_client.securityOperations().createLocalUser(user,
             (passwordBased ? new PasswordToken(password) : null));
         loginAs(rootUser);
-        if (passwordBased
-            && !root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
+        if (passwordBased && !root_client.securityOperations().authenticateUser(user,
+            new PasswordToken(password)))
           throw new IllegalStateException("Should be able to create a user");
         break;
       case DROP_USER:
         user = "__DROP_USER_WITH_PERM_TEST__";
         loginAs(rootUser);
-        root_conn.securityOperations().createLocalUser(user,
+        root_client.securityOperations().createLocalUser(user,
             (passwordBased ? new PasswordToken(password) : null));
         loginAs(testUser);
-        test_user_conn.securityOperations().dropLocalUser(user);
+        test_user_client.securityOperations().dropLocalUser(user);
         loginAs(rootUser);
         if (passwordBased
-            && root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
+            && root_client.securityOperations().authenticateUser(user, new PasswordToken(password)))
           throw new IllegalStateException("Should be able to delete a user");
         break;
       case ALTER_USER:
         user = "__ALTER_USER_WITH_PERM_TEST__";
         loginAs(rootUser);
-        root_conn.securityOperations().createLocalUser(user,
+        root_client.securityOperations().createLocalUser(user,
             (passwordBased ? new PasswordToken(password) : null));
         loginAs(testUser);
-        test_user_conn.securityOperations().changeUserAuthorizations(user,
+        test_user_client.securityOperations().changeUserAuthorizations(user,
             new Authorizations("A", "B"));
         loginAs(rootUser);
-        if (root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
+        if (root_client.securityOperations().getUserAuthorizations(user).isEmpty())
           throw new IllegalStateException("Should be able to alter a user");
         break;
       case SYSTEM:
@@ -460,45 +460,46 @@ public class PermissionsIT extends AccumuloClusterHarness {
       case CREATE_NAMESPACE:
         namespace = "__CREATE_NAMESPACE_WITH_PERM_TEST__";
         loginAs(testUser);
-        test_user_conn.namespaceOperations().create(namespace);
+        test_user_client.namespaceOperations().create(namespace);
         loginAs(rootUser);
-        if (!root_conn.namespaceOperations().list().contains(namespace))
+        if (!root_client.namespaceOperations().list().contains(namespace))
           throw new IllegalStateException("Should be able to create a namespace");
         break;
       case DROP_NAMESPACE:
         namespace = "__DROP_NAMESPACE_WITH_PERM_TEST__";
         loginAs(rootUser);
-        root_conn.namespaceOperations().create(namespace);
+        root_client.namespaceOperations().create(namespace);
         loginAs(testUser);
-        test_user_conn.namespaceOperations().delete(namespace);
+        test_user_client.namespaceOperations().delete(namespace);
         loginAs(rootUser);
-        if (root_conn.namespaceOperations().list().contains(namespace))
+        if (root_client.namespaceOperations().list().contains(namespace))
           throw new IllegalStateException("Should be able to delete a namespace");
         break;
       case ALTER_NAMESPACE:
         namespace = "__ALTER_NAMESPACE_WITH_PERM_TEST__";
         String namespace2 = namespace + "2";
         loginAs(rootUser);
-        root_conn.namespaceOperations().create(namespace);
+        root_client.namespaceOperations().create(namespace);
         loginAs(testUser);
-        test_user_conn.namespaceOperations().setProperty(namespace,
+        test_user_client.namespaceOperations().setProperty(namespace,
             Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
         loginAs(rootUser);
-        Map<String,String> propies = map(root_conn.namespaceOperations().getProperties(namespace));
+        Map<String,String> propies = map(
+            root_client.namespaceOperations().getProperties(namespace));
         if (!propies.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
           throw new IllegalStateException("Should be able to set a table property");
         loginAs(testUser);
-        test_user_conn.namespaceOperations().removeProperty(namespace,
+        test_user_client.namespaceOperations().removeProperty(namespace,
             Property.TABLE_BLOOM_ERRORRATE.getKey());
         loginAs(rootUser);
-        propies = map(root_conn.namespaceOperations().getProperties(namespace));
+        propies = map(root_client.namespaceOperations().getProperties(namespace));
         if (propies.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
           throw new IllegalStateException("Should be able to remove a table property");
         loginAs(testUser);
-        test_user_conn.namespaceOperations().rename(namespace, namespace2);
+        test_user_client.namespaceOperations().rename(namespace, namespace2);
         loginAs(rootUser);
-        if (root_conn.namespaceOperations().list().contains(namespace)
-            || !root_conn.namespaceOperations().list().contains(namespace2))
+        if (root_client.namespaceOperations().list().contains(namespace)
+            || !root_client.namespaceOperations().list().contains(namespace2))
           throw new IllegalStateException("Should be able to rename a table");
         break;
       case OBTAIN_DELEGATION_TOKEN:
@@ -508,17 +509,17 @@ public class PermissionsIT extends AccumuloClusterHarness {
         break;
       case GRANT:
         loginAs(rootUser);
-        root_conn.securityOperations().grantSystemPermission(testUser.getPrincipal(),
+        root_client.securityOperations().grantSystemPermission(testUser.getPrincipal(),
             SystemPermission.GRANT);
         loginAs(testUser);
-        test_user_conn.securityOperations().grantSystemPermission(testUser.getPrincipal(),
+        test_user_client.securityOperations().grantSystemPermission(testUser.getPrincipal(),
             SystemPermission.CREATE_TABLE);
         loginAs(rootUser);
-        assertTrue("Test user should have CREATE_TABLE", root_conn.securityOperations()
+        assertTrue("Test user should have CREATE_TABLE", root_client.securityOperations()
             .hasSystemPermission(testUser.getPrincipal(), SystemPermission.CREATE_TABLE));
-        assertTrue("Test user should have GRANT", root_conn.securityOperations()
+        assertTrue("Test user should have GRANT", root_client.securityOperations()
             .hasSystemPermission(testUser.getPrincipal(), SystemPermission.GRANT));
-        root_conn.securityOperations().revokeSystemPermission(testUser.getPrincipal(),
+        root_client.securityOperations().revokeSystemPermission(testUser.getPrincipal(),
             SystemPermission.CREATE_TABLE);
         break;
       default:
@@ -526,26 +527,26 @@ public class PermissionsIT extends AccumuloClusterHarness {
     }
   }
 
-  private void verifyHasOnlyTheseSystemPermissions(AccumuloClient root_conn, String user,
+  private void verifyHasOnlyTheseSystemPermissions(AccumuloClient root_client, String user,
       SystemPermission... perms) throws AccumuloException, AccumuloSecurityException {
     List<SystemPermission> permList = Arrays.asList(perms);
     for (SystemPermission p : SystemPermission.values()) {
       if (permList.contains(p)) {
         // should have these
-        if (!root_conn.securityOperations().hasSystemPermission(user, p))
+        if (!root_client.securityOperations().hasSystemPermission(user, p))
           throw new IllegalStateException(user + " SHOULD have system permission " + p);
       } else {
         // should not have these
-        if (root_conn.securityOperations().hasSystemPermission(user, p))
+        if (root_client.securityOperations().hasSystemPermission(user, p))
           throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
       }
     }
   }
 
-  private void verifyHasNoSystemPermissions(AccumuloClient root_conn, String user,
+  private void verifyHasNoSystemPermissions(AccumuloClient root_client, String user,
       SystemPermission... perms) throws AccumuloException, AccumuloSecurityException {
     for (SystemPermission p : perms)
-      if (root_conn.securityOperations().hasSystemPermission(user, p))
+      if (root_client.securityOperations().hasSystemPermission(user, p))
         throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
   }
 
@@ -564,7 +565,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
     AccumuloClient c = getAccumuloClient();
     c.securityOperations().createLocalUser(principal, passwordToken);
     loginAs(testUser);
-    AccumuloClient test_user_conn = c.changeUser(principal, token);
+    AccumuloClient test_user_client = c.changeUser(principal, token);
 
     // check for read-only access to metadata table
     loginAs(rootUser);
@@ -580,12 +581,12 @@ public class PermissionsIT extends AccumuloClusterHarness {
       // test permission before and after granting it
       createTestTable(c, principal, tableName);
       loginAs(testUser);
-      testMissingTablePermission(test_user_conn, testUser, perm, tableName);
+      testMissingTablePermission(test_user_client, testUser, perm, tableName);
       loginAs(rootUser);
       c.securityOperations().grantTablePermission(principal, tableName, perm);
       verifyHasOnlyTheseTablePermissions(c, principal, tableName, perm);
       loginAs(testUser);
-      testGrantedTablePermission(test_user_conn, testUser, perm, tableName);
+      testGrantedTablePermission(test_user_client, testUser, perm, tableName);
 
       loginAs(rootUser);
       createTestTable(c, principal, tableName);
@@ -613,7 +614,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
     }
   }
 
-  private void testMissingTablePermission(AccumuloClient test_user_conn, ClusterUser testUser,
+  private void testMissingTablePermission(AccumuloClient test_user_client, ClusterUser testUser,
       TablePermission perm, String tableName) throws Exception {
     BatchWriter writer;
     Mutation m;
@@ -622,7 +623,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
     // test permission prior to granting it
     switch (perm) {
       case READ:
-        try (Scanner scanner = test_user_conn.createScanner(tableName, Authorizations.EMPTY)) {
+        try (Scanner scanner = test_user_client.createScanner(tableName, Authorizations.EMPTY)) {
           int i = 0;
           for (Entry<Key,Value> entry : scanner)
             i += 1 + entry.getKey().getRowData().length();
@@ -637,7 +638,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
         break;
       case WRITE:
         try {
-          writer = test_user_conn.createBatchWriter(tableName, new BatchWriterConfig());
+          writer = test_user_client.createBatchWriter(tableName, new BatchWriterConfig());
           m = new Mutation(new Text("row"));
           m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
           writer.addMutation(m);
@@ -645,7 +646,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
             writer.close();
           } catch (MutationsRejectedException e1) {
             if (e1.getSecurityErrorCodes().size() > 0)
-              throw new AccumuloSecurityException(test_user_conn.whoami(),
+              throw new AccumuloSecurityException(test_user_client.whoami(),
                   org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.PERMISSION_DENIED,
                   e1);
           }
@@ -662,7 +663,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
         Map<String,Set<Text>> groups = new HashMap<>();
         groups.put("tgroup", new HashSet<>(Arrays.asList(new Text("t1"), new Text("t2"))));
         try {
-          test_user_conn.tableOperations().setLocalityGroups(tableName, groups);
+          test_user_client.tableOperations().setLocalityGroups(tableName, groups);
           throw new IllegalStateException("User should not be able to set locality groups");
         } catch (AccumuloSecurityException e) {
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
@@ -671,7 +672,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
         break;
       case DROP_TABLE:
         try {
-          test_user_conn.tableOperations().delete(tableName);
+          test_user_client.tableOperations().delete(tableName);
           throw new IllegalStateException("User should not be able delete the table");
         } catch (AccumuloSecurityException e) {
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
@@ -680,7 +681,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
         break;
       case GRANT:
         try {
-          test_user_conn.securityOperations().grantTablePermission(getAdminPrincipal(), tableName,
+          test_user_client.securityOperations().grantTablePermission(getAdminPrincipal(), tableName,
               TablePermission.GRANT);
           throw new IllegalStateException("User should not be able grant permissions");
         } catch (AccumuloSecurityException e) {
@@ -690,7 +691,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
         break;
       case GET_SUMMARIES:
         try {
-          test_user_conn.tableOperations().summaries(tableName).retrieve();
+          test_user_client.tableOperations().summaries(tableName).retrieve();
           throw new IllegalStateException("User should not be able to get table summaries");
         } catch (AccumuloSecurityException e) {
           if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
@@ -702,7 +703,7 @@ public class PermissionsIT extends AccumuloClusterHarness {
     }
   }
 
-  private void testGrantedTablePermission(AccumuloClient test_user_conn, ClusterUser normalUser,
+  private void testGrantedTablePermission(AccumuloClient test_user_client, ClusterUser normalUser,
       TablePermission perm, String tableName) throws AccumuloException, TableExistsException,
       AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
     BatchWriter writer;
@@ -712,14 +713,14 @@ public class PermissionsIT extends AccumuloClusterHarness {
     // test permission after granting it
     switch (perm) {
       case READ:
-        try (Scanner scanner = test_user_conn.createScanner(tableName, Authorizations.EMPTY)) {
+        try (Scanner scanner = test_user_client.createScanner(tableName, Authorizations.EMPTY)) {
           Iterator<Entry<Key,Value>> iter = scanner.iterator();
           while (iter.hasNext())
             iter.next();
         }
         break;
       case WRITE:
-        writer = test_user_conn.createBatchWriter(tableName, new BatchWriterConfig());
+        writer = test_user_client.createBatchWriter(tableName, new BatchWriterConfig());
         m = new Mutation(new Text("row"));
         m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
         writer.addMutation(m);
@@ -733,14 +734,15 @@ public class PermissionsIT extends AccumuloClusterHarness {
         groups.put("tgroup", new HashSet<>(Arrays.asList(new Text("t1"), new Text("t2"))));
         break;
       case DROP_TABLE:
-        test_user_conn.tableOperations().delete(tableName);
+        test_user_client.tableOperations().delete(tableName);
         break;
       case GRANT:
-        test_user_conn.securityOperations().grantTablePermission(getAdminPrincipal(), tableName,
+        test_user_client.securityOperations().grantTablePermission(getAdminPrincipal(), tableName,
             TablePermission.GRANT);
         break;
       case GET_SUMMARIES:
-        List<Summary> summaries = test_user_conn.tableOperations().summaries(tableName).retrieve();
+        List<Summary> summaries = test_user_client.tableOperations().summaries(tableName)
+            .retrieve();
         // just make sure it's not blocked by permissions, the actual summaries are tested in
         // SummaryIT
         assertTrue(summaries.isEmpty());
@@ -750,28 +752,28 @@ public class PermissionsIT extends AccumuloClusterHarness {
     }
   }
 
-  private void verifyHasOnlyTheseTablePermissions(AccumuloClient root_conn, String user,
+  private void verifyHasOnlyTheseTablePermissions(AccumuloClient root_client, String user,
       String table, TablePermission... perms) throws AccumuloException, AccumuloSecurityException {
     List<TablePermission> permList = Arrays.asList(perms);
     for (TablePermission p : TablePermission.values()) {
       if (permList.contains(p)) {
         // should have these
-        if (!root_conn.securityOperations().hasTablePermission(user, table, p))
+        if (!root_client.securityOperations().hasTablePermission(user, table, p))
           throw new IllegalStateException(
               user + " SHOULD have table permission " + p + " for table " + table);
       } else {
         // should not have these
-        if (root_conn.securityOperations().hasTablePermission(user, table, p))
+        if (root_client.securityOperations().hasTablePermission(user, table, p))
           throw new IllegalStateException(
               user + " SHOULD NOT have table permission " + p + " for table " + table);
       }
     }
   }
 
-  private void verifyHasNoTablePermissions(AccumuloClient root_conn, String user, String table,
+  private void verifyHasNoTablePermissions(AccumuloClient root_client, String user, String table,
       TablePermission... perms) throws AccumuloException, AccumuloSecurityException {
     for (TablePermission p : perms)
-      if (root_conn.securityOperations().hasTablePermission(user, table, p))
+      if (root_client.securityOperations().hasTablePermission(user, table, p))
         throw new IllegalStateException(
             user + " SHOULD NOT have table permission " + p + " for table " + table);
   }
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java b/test/src/main/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
index 3609722..52bcb75 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
@@ -53,9 +53,9 @@ public class RegexGroupBalanceIT extends ConfigurableMacBase {
 
   @Test(timeout = 120000)
   public void testBalancing() throws Exception {
-    AccumuloClient conn = getClient();
+    AccumuloClient client = getClient();
     String tablename = getUniqueNames(1)[0];
-    conn.tableOperations().create(tablename);
+    client.tableOperations().create(tablename);
 
     SortedSet<Text> splits = new TreeSet<>();
     splits.add(new Text("01a"));
@@ -72,18 +72,19 @@ public class RegexGroupBalanceIT extends ConfigurableMacBase {
     splits.add(new Text("03m"));
     splits.add(new Text("03r"));
 
-    conn.tableOperations().setProperty(tablename, RegexGroupBalancer.REGEX_PROPERTY, "(\\d\\d).*");
-    conn.tableOperations().setProperty(tablename, RegexGroupBalancer.DEFAUT_GROUP_PROPERTY, "03");
-    conn.tableOperations().setProperty(tablename, RegexGroupBalancer.WAIT_TIME_PROPERTY, "50ms");
-    conn.tableOperations().setProperty(tablename, Property.TABLE_LOAD_BALANCER.getKey(),
+    client.tableOperations().setProperty(tablename, RegexGroupBalancer.REGEX_PROPERTY,
+        "(\\d\\d).*");
+    client.tableOperations().setProperty(tablename, RegexGroupBalancer.DEFAUT_GROUP_PROPERTY, "03");
+    client.tableOperations().setProperty(tablename, RegexGroupBalancer.WAIT_TIME_PROPERTY, "50ms");
+    client.tableOperations().setProperty(tablename, Property.TABLE_LOAD_BALANCER.getKey(),
         RegexGroupBalancer.class.getName());
 
-    conn.tableOperations().addSplits(tablename, splits);
+    client.tableOperations().addSplits(tablename, splits);
 
     while (true) {
       Thread.sleep(250);
 
-      Table<String,String,MutableInt> groupLocationCounts = getCounts(conn, tablename);
+      Table<String,String,MutableInt> groupLocationCounts = getCounts(client, tablename);
 
       boolean allGood = true;
       allGood &= checkGroup(groupLocationCounts, "01", 1, 1, 3);
@@ -101,12 +102,12 @@ public class RegexGroupBalanceIT extends ConfigurableMacBase {
     splits.add(new Text("01f"));
     splits.add(new Text("01l"));
     splits.add(new Text("01r"));
-    conn.tableOperations().addSplits(tablename, splits);
+    client.tableOperations().addSplits(tablename, splits);
 
     while (true) {
       Thread.sleep(250);
 
-      Table<String,String,MutableInt> groupLocationCounts = getCounts(conn, tablename);
+      Table<String,String,MutableInt> groupLocationCounts = getCounts(client, tablename);
 
       boolean allGood = true;
       allGood &= checkGroup(groupLocationCounts, "01", 1, 2, 4);
@@ -120,12 +121,12 @@ public class RegexGroupBalanceIT extends ConfigurableMacBase {
     }
 
     // merge group 01 down to one tablet
-    conn.tableOperations().merge(tablename, null, new Text("01z"));
+    client.tableOperations().merge(tablename, null, new Text("01z"));
 
     while (true) {
       Thread.sleep(250);
 
-      Table<String,String,MutableInt> groupLocationCounts = getCounts(conn, tablename);
+      Table<String,String,MutableInt> groupLocationCounts = getCounts(client, tablename);
 
       boolean allGood = true;
       allGood &= checkGroup(groupLocationCounts, "01", 1, 1, 1);
@@ -166,11 +167,11 @@ public class RegexGroupBalanceIT extends ConfigurableMacBase {
         && counts.size() == tsevers;
   }
 
-  private Table<String,String,MutableInt> getCounts(AccumuloClient conn, String tablename)
+  private Table<String,String,MutableInt> getCounts(AccumuloClient client, String tablename)
       throws TableNotFoundException {
-    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+    try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
       s.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
-      ID tableId = ID.of(conn.tableOperations().tableIdMap().get(tablename));
+      ID tableId = ID.of(client.tableOperations().tableIdMap().get(tablename));
       s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
 
       Table<String,String,MutableInt> groupLocationCounts = HashBasedTable.create();
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ScanIdIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ScanIdIT.java
index f66f24f..0efa00d 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ScanIdIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ScanIdIT.java
@@ -109,23 +109,23 @@ public class ScanIdIT extends AccumuloClusterHarness {
   public void testScanId() throws Exception {
 
     final String tableName = getUniqueNames(1)[0];
-    AccumuloClient conn = getAccumuloClient();
-    conn.tableOperations().create(tableName);
+    AccumuloClient client = getAccumuloClient();
+    client.tableOperations().create(tableName);
 
-    addSplits(conn, tableName);
+    addSplits(client, tableName);
 
     log.info("Splits added");
 
-    generateSampleData(conn, tableName);
+    generateSampleData(client, tableName);
 
     log.info("Generated data for {}", tableName);
 
-    attachSlowIterator(conn, tableName);
+    attachSlowIterator(client, tableName);
 
     CountDownLatch latch = new CountDownLatch(NUM_SCANNERS);
 
     for (int scannerIndex = 0; scannerIndex < NUM_SCANNERS; scannerIndex++) {
-      ScannerThread st = new ScannerThread(conn, scannerIndex, tableName, latch);
+      ScannerThread st = new ScannerThread(client, scannerIndex, tableName, latch);
       pool.submit(st);
     }
 
@@ -150,7 +150,7 @@ public class ScanIdIT extends AccumuloClusterHarness {
     // all scanner have reported at least 1 result, so check for unique scan ids.
     Set<Long> scanIds = new HashSet<>();
 
-    List<String> tservers = conn.instanceOperations().getTabletServers();
+    List<String> tservers = client.instanceOperations().getTabletServers();
 
     log.debug("tablet servers {}", tservers);
 
@@ -159,7 +159,7 @@ public class ScanIdIT extends AccumuloClusterHarness {
       List<ActiveScan> activeScans = null;
       for (int i = 0; i < 10; i++) {
         try {
-          activeScans = conn.instanceOperations().getActiveScans(tserver);
+          activeScans = client.instanceOperations().getActiveScans(tserver);
           break;
         } catch (AccumuloException e) {
           if (e.getCause() instanceof TableNotFoundException) {
@@ -284,23 +284,23 @@ public class ScanIdIT extends AccumuloClusterHarness {
    * Create splits on table and force migration by taking table offline and then bring back online
    * for test.
    *
-   * @param conn
-   *          Accumulo connector Accumulo connector to test cluster or MAC instance.
+   * @param client
+   *          Accumulo client to test cluster or MAC instance.
    */
-  private void addSplits(final AccumuloClient conn, final String tableName) {
+  private void addSplits(final AccumuloClient client, final String tableName) {
 
     SortedSet<Text> splits = createSplits();
 
     try {
 
-      conn.tableOperations().addSplits(tableName, splits);
+      client.tableOperations().addSplits(tableName, splits);
 
-      conn.tableOperations().offline(tableName, true);
+      client.tableOperations().offline(tableName, true);
 
       sleepUninterruptibly(2, TimeUnit.SECONDS);
-      conn.tableOperations().online(tableName, true);
+      client.tableOperations().online(tableName, true);
 
-      for (Text split : conn.tableOperations().listSplits(tableName)) {
+      for (Text split : client.tableOperations().listSplits(tableName)) {
         log.trace("Split {}", split);
       }
 
@@ -335,7 +335,7 @@ public class ScanIdIT extends AccumuloClusterHarness {
    * secondary consideration for this test, that is included for completeness.
    *
    * @param accumuloClient
-   *          Accumulo connector Accumulo connector to test cluster or MAC instance.
+   *          Accumulo client to test cluster or MAC instance.
    */
   private void generateSampleData(AccumuloClient accumuloClient, final String tablename) {
 
@@ -374,7 +374,7 @@ public class ScanIdIT extends AccumuloClusterHarness {
    * scan id.
    *
    * @param accumuloClient
-   *          Accumulo connector Accumulo connector to test cluster or MAC instance.
+   *          Accumulo client to test cluster or MAC instance.
    */
   private void attachSlowIterator(AccumuloClient accumuloClient, final String tablename) {
     try {
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java b/test/src/main/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java
index 425e360..9b122c7 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java
@@ -194,14 +194,14 @@ public class TabletStateChangeIteratorIT extends AccumuloClusterHarness {
 
   private void createTable(String t, boolean online) throws AccumuloSecurityException,
       AccumuloException, TableNotFoundException, TableExistsException {
-    AccumuloClient conn = getAccumuloClient();
-    conn.tableOperations().create(t);
-    conn.tableOperations().online(t, true);
+    AccumuloClient client = getAccumuloClient();
+    client.tableOperations().create(t);
+    client.tableOperations().online(t, true);
     SortedSet<Text> partitionKeys = new TreeSet<>();
     partitionKeys.add(new Text("some split"));
-    conn.tableOperations().addSplits(t, partitionKeys);
+    client.tableOperations().addSplits(t, partitionKeys);
     if (!online) {
-      conn.tableOperations().offline(t, true);
+      client.tableOperations().offline(t, true);
     }
   }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/TimeoutIT.java b/test/src/main/java/org/apache/accumulo/test/functional/TimeoutIT.java
index 57eaf06..f8bdba6 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/TimeoutIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/TimeoutIT.java
@@ -47,20 +47,20 @@ public class TimeoutIT extends AccumuloClusterHarness {
 
   @Test
   public void run() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String[] tableNames = getUniqueNames(2);
-    testBatchWriterTimeout(conn, tableNames[0]);
-    testBatchScannerTimeout(conn, tableNames[1]);
+    testBatchWriterTimeout(client, tableNames[0]);
+    testBatchScannerTimeout(client, tableNames[1]);
   }
 
-  public void testBatchWriterTimeout(AccumuloClient conn, String tableName) throws Exception {
-    conn.tableOperations().create(tableName);
-    conn.tableOperations().addConstraint(tableName, SlowConstraint.class.getName());
+  public void testBatchWriterTimeout(AccumuloClient client, String tableName) throws Exception {
+    client.tableOperations().create(tableName);
+    client.tableOperations().addConstraint(tableName, SlowConstraint.class.getName());
 
     // give constraint time to propagate through zookeeper
     sleepUninterruptibly(1, TimeUnit.SECONDS);
 
-    BatchWriter bw = conn.createBatchWriter(tableName,
+    BatchWriter bw = client.createBatchWriter(tableName,
         new BatchWriterConfig().setTimeout(3, TimeUnit.SECONDS));
 
     Mutation mut = new Mutation("r1");
@@ -77,7 +77,7 @@ public class TimeoutIT extends AccumuloClusterHarness {
     }
   }
 
-  public void testBatchScannerTimeout(AccumuloClient conn, String tableName) throws Exception {
+  public void testBatchScannerTimeout(AccumuloClient client, String tableName) throws Exception {
     getAccumuloClient().tableOperations().create(tableName);
 
     BatchWriter bw = getAccumuloClient().createBatchWriter(tableName, new BatchWriterConfig());
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java b/test/src/main/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java
index 0ff167a..49598c5 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java
@@ -166,10 +166,10 @@ public class WALSunnyDayIT extends ConfigurableMacBase {
     }
   }
 
-  private void writeSomeData(AccumuloClient conn, String tableName, int row, int col)
+  private void writeSomeData(AccumuloClient client, String tableName, int row, int col)
       throws Exception {
     Random rand = new SecureRandom();
-    BatchWriter bw = conn.createBatchWriter(tableName, null);
+    BatchWriter bw = client.createBatchWriter(tableName, null);
     byte[] rowData = new byte[10];
     byte[] cq = new byte[10];
     byte[] value = new byte[10];
diff --git a/test/src/main/java/org/apache/accumulo/test/gc/replication/CloseWriteAheadLogReferencesIT.java b/test/src/main/java/org/apache/accumulo/test/gc/replication/CloseWriteAheadLogReferencesIT.java
index a6e63eb..81072cb 100644
--- a/test/src/main/java/org/apache/accumulo/test/gc/replication/CloseWriteAheadLogReferencesIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/gc/replication/CloseWriteAheadLogReferencesIT.java
@@ -64,7 +64,7 @@ import com.google.common.collect.Iterables;
 public class CloseWriteAheadLogReferencesIT extends ConfigurableMacBase {
 
   private WrappedCloseWriteAheadLogReferences refs;
-  private AccumuloClient conn;
+  private AccumuloClient client;
 
   private static class WrappedCloseWriteAheadLogReferences extends CloseWriteAheadLogReferences {
     public WrappedCloseWriteAheadLogReferences(ServerContext context) {
@@ -72,19 +72,19 @@ public class CloseWriteAheadLogReferencesIT extends ConfigurableMacBase {
     }
 
     @Override
-    protected long updateReplicationEntries(AccumuloClient conn, Set<String> closedWals) {
-      return super.updateReplicationEntries(conn, closedWals);
+    protected long updateReplicationEntries(AccumuloClient client, Set<String> closedWals) {
+      return super.updateReplicationEntries(client, closedWals);
     }
   }
 
   @Before
   public void setupInstance() throws Exception {
-    conn = getClient();
-    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME,
+    client = getClient();
+    client.securityOperations().grantTablePermission(client.whoami(), ReplicationTable.NAME,
         TablePermission.WRITE);
-    conn.securityOperations().grantTablePermission(conn.whoami(), MetadataTable.NAME,
+    client.securityOperations().grantTablePermission(client.whoami(), MetadataTable.NAME,
         TablePermission.WRITE);
-    ReplicationTable.setOnline(conn);
+    ReplicationTable.setOnline(client);
   }
 
   @Before
@@ -124,7 +124,7 @@ public class CloseWriteAheadLogReferencesIT extends ConfigurableMacBase {
   @Test
   public void unclosedWalsLeaveStatusOpen() throws Exception {
     Set<String> wals = Collections.emptySet();
-    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     Mutation m = new Mutation(
         ReplicationSection.getRowPrefix() + "file:/accumulo/wal/tserver+port/12345");
     m.put(ReplicationSection.COLF, new Text("1"),
@@ -132,9 +132,9 @@ public class CloseWriteAheadLogReferencesIT extends ConfigurableMacBase {
     bw.addMutation(m);
     bw.close();
 
-    refs.updateReplicationEntries(conn, wals);
+    refs.updateReplicationEntries(client, wals);
 
-    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+    try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
       s.fetchColumnFamily(ReplicationSection.COLF);
       Entry<Key,Value> entry = Iterables.getOnlyElement(s);
       Status status = Status.parseFrom(entry.getValue().get());
@@ -146,16 +146,16 @@ public class CloseWriteAheadLogReferencesIT extends ConfigurableMacBase {
   public void closedWalsUpdateStatus() throws Exception {
     String file = "file:/accumulo/wal/tserver+port/12345";
     Set<String> wals = Collections.singleton(file);
-    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     Mutation m = new Mutation(ReplicationSection.getRowPrefix() + file);
     m.put(ReplicationSection.COLF, new Text("1"),
         StatusUtil.fileCreatedValue(System.currentTimeMillis()));
     bw.addMutation(m);
     bw.close();
 
-    refs.updateReplicationEntries(conn, wals);
+    refs.updateReplicationEntries(client, wals);
 
-    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+    try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
       s.fetchColumnFamily(ReplicationSection.COLF);
       Entry<Key,Value> entry = Iterables.getOnlyElement(s);
       Status status = Status.parseFrom(entry.getValue().get());
@@ -167,15 +167,15 @@ public class CloseWriteAheadLogReferencesIT extends ConfigurableMacBase {
   public void partiallyReplicatedReferencedWalsAreNotClosed() throws Exception {
     String file = "file:/accumulo/wal/tserver+port/12345";
     Set<String> wals = Collections.singleton(file);
-    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
+    BatchWriter bw = ReplicationTable.getBatchWriter(client);
     Mutation m = new Mutation(file);
     StatusSection.add(m, Table.ID.of("1"), ProtobufUtil.toValue(StatusUtil.ingestedUntil(1000)));
     bw.addMutation(m);
     bw.close();
 
-    refs.updateReplicationEntries(conn, wals);
+    refs.updateReplicationEntries(client, wals);
 
-    try (Scanner s = ReplicationTable.getScanner(conn)) {
+    try (Scanner s = ReplicationTable.getScanner(client)) {
       Entry<Key,Value> entry = Iterables.getOnlyElement(s);
       Status status = Status.parseFrom(entry.getValue().get());
       assertFalse(status.getClosed());
diff --git a/test/src/main/java/org/apache/accumulo/test/mapred/AccumuloRowInputFormatIT.java b/test/src/main/java/org/apache/accumulo/test/mapred/AccumuloRowInputFormatIT.java
index 035f855..f18dd91 100644
--- a/test/src/main/java/org/apache/accumulo/test/mapred/AccumuloRowInputFormatIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/mapred/AccumuloRowInputFormatIT.java
@@ -183,12 +183,12 @@ public class AccumuloRowInputFormatIT extends AccumuloClusterHarness {
 
   @Test
   public void test() throws Exception {
-    final AccumuloClient conn = getAccumuloClient();
+    final AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
     BatchWriter writer = null;
     try {
-      writer = conn.createBatchWriter(tableName, new BatchWriterConfig());
+      writer = client.createBatchWriter(tableName, new BatchWriterConfig());
       insertList(writer, row1);
       insertList(writer, row2);
       insertList(writer, row3);
diff --git a/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloInputFormatIT.java b/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloInputFormatIT.java
index 9e275fa..8190ff6 100644
--- a/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloInputFormatIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloInputFormatIT.java
@@ -94,9 +94,9 @@ public class AccumuloInputFormatIT extends AccumuloClusterHarness {
    */
   @Test
   public void testGetSplits() throws Exception {
-    AccumuloClient conn = getAccumuloClient();
+    AccumuloClient client = getAccumuloClient();
     String table = getUniqueNames(1)[0];
-    conn.tableOperations().create(table);
+    client.tableOperations().create(table);
     insertData(table, currentTimeMillis());
 
     Job job = Job.getInstance();
@@ -107,11 +107,11 @@ public class AccumuloInputFormatIT extends AccumuloClusterHarness {
     TreeSet<Text> splitsToAdd = new TreeSet<>();
     for (int i = 0; i < 10000; i += 1000)
       splitsToAdd.add(new Text(String.format("%09d", i)));
-    conn.tableOperations().addSplits(table, splitsToAdd);
+    client.tableOperations().addSplits(table, splitsToAdd);
     sleepUninterruptibly(500, TimeUnit.MILLISECONDS); // wait for splits to be propagated
 
     // get splits without setting any range
-    Collection<Text> actualSplits = conn.tableOperations().listSplits(table);
+    Collection<Text> actualSplits = client.tableOperations().listSplits(table);
     List<InputSplit> splits = inputFormat.getSplits(job);
     assertEquals(actualSplits.size() + 1, splits.size()); // No ranges set on the job so it'll start
                                                           // with -inf
@@ -131,7 +131,7 @@ public class AccumuloInputFormatIT extends AccumuloClusterHarness {
       fail("An exception should have been thrown");
     } catch (IOException e) {}
 
-    conn.tableOperations().offline(table, true);
+    client.tableOperations().offline(table, true);
     splits = inputFormat.getSplits(job);
     assertEquals(actualSplits.size(), splits.size());
 
@@ -159,7 +159,7 @@ public class AccumuloInputFormatIT extends AccumuloClusterHarness {
       fail("An exception should have been thrown");
     } catch (IllegalArgumentException e) {}
 
-    conn.tableOperations().online(table, true);
+    client.tableOperations().online(table, true);
     AccumuloInputFormat.setOfflineTableScan(job, false);
 
     // test for resumption of success
@@ -187,7 +187,7 @@ public class AccumuloInputFormatIT extends AccumuloClusterHarness {
     AccumuloInputFormat.setLocalIterators(job, false);
 
     // Check we are getting back correct type pf split
-    conn.tableOperations().online(table);
+    client.tableOperations().online(table);
     splits = inputFormat.getSplits(job);
     for (InputSplit split : splits)
       assert (split instanceof BatchInputSplit);
diff --git a/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloRowInputFormatIT.java b/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloRowInputFormatIT.java
index a30a01a..7452a76 100644
--- a/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloRowInputFormatIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloRowInputFormatIT.java
@@ -180,12 +180,12 @@ public class AccumuloRowInputFormatIT extends AccumuloClusterHarness {
 
   @Test
   public void test() throws Exception {
-    final AccumuloClient conn = getAccumuloClient();
+    final AccumuloClient client = getAccumuloClient();
     String tableName = getUniqueNames(1)[0];
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
     BatchWriter writer = null;
     try {
-      writer = conn.createBatchWriter(tableName, new BatchWriterConfig());
+      writer = client.createBatchWriter(tableName, new BatchWriterConfig());
       insertList(writer, row1);
       insertList(writer, row2);
       insertList(writer, row3);
diff --git a/test/src/main/java/org/apache/accumulo/test/master/SuspendedTabletsIT.java b/test/src/main/java/org/apache/accumulo/test/master/SuspendedTabletsIT.java
index 9f9dae6..50b95ea 100644
--- a/test/src/main/java/org/apache/accumulo/test/master/SuspendedTabletsIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/master/SuspendedTabletsIT.java
@@ -168,16 +168,16 @@ public class SuspendedTabletsIT extends ConfigurableMacBase {
 
     String tableName = getUniqueNames(1)[0];
 
-    AccumuloClient conn = ctx.getClient();
+    AccumuloClient client = ctx.getClient();
 
     // Create a table with a bunch of splits
     log.info("Creating table " + tableName);
-    conn.tableOperations().create(tableName);
+    client.tableOperations().create(tableName);
     SortedSet<Text> splitPoints = new TreeSet<>();
     for (int i = 1; i < TABLETS; ++i) {
       splitPoints.add(new Text("" + i));
     }
-    conn.tableOperations().addSplits(tableName, splitPoints);
+    client.tableOperations().addSplits(tableName, splitPoints);
 
     // Wait for all of the tablets to hosted ...
     log.info("Waiting on hosting and balance");
@@ -188,7 +188,7 @@ public class SuspendedTabletsIT extends ConfigurableMacBase {
     }
 
     // ... and balanced.
-    conn.instanceOperations().waitForBalance();
+    client.instanceOperations().waitForBalance();
     do {
       // Give at least another 5 seconds for migrations to finish up
       Thread.sleep(5000);
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/ContinuousIngest.java b/test/src/main/java/org/apache/accumulo/test/performance/ContinuousIngest.java
index 4020c11..4b97088 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/ContinuousIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/ContinuousIngest.java
@@ -89,14 +89,14 @@ public class ContinuousIngest {
     if (opts.min < 0 || opts.max < 0 || opts.max <= opts.min) {
       throw new IllegalArgumentException("bad min and max");
     }
-    AccumuloClient conn = clientOpts.getClient();
+    AccumuloClient client = clientOpts.getClient();
 
-    if (!conn.tableOperations().exists(clientOpts.getTableName())) {
+    if (!client.tableOperations().exists(clientOpts.getTableName())) {
       throw new TableNotFoundException(null, clientOpts.getTableName(),
           "Consult the README and create the table before starting ingest.");
     }
 
-    BatchWriter bw = conn.createBatchWriter(clientOpts.getTableName(),
+    BatchWriter bw = client.createBatchWriter(clientOpts.getTableName(),
         bwOpts.getBatchWriterConfig());
     bw = Trace.wrapAll(bw, new CountSampler(1024));
 
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java b/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
index d8be949..722ddfc 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
@@ -215,13 +215,13 @@ public class CollectTabletStats {
 
       ArrayList<Test> tests = new ArrayList<>();
 
-      final AccumuloClient conn = opts.getClient();
+      final AccumuloClient client = opts.getClient();
 
       for (final KeyExtent ke : tabletsToTest) {
         Test test = new Test(ke) {
           @Override
           public int runTest() throws Exception {
-            return scanTablet(conn, opts.getTableName(), opts.auths, scanOpts.scanBatchSize,
+            return scanTablet(client, opts.getTableName(), opts.auths, scanOpts.scanBatchSize,
                 ke.getPrevEndRow(), ke.getEndRow(), columns);
           }
         };
@@ -233,13 +233,13 @@ public class CollectTabletStats {
     }
 
     for (final KeyExtent ke : tabletsToTest) {
-      final AccumuloClient conn = opts.getClient();
+      final AccumuloClient client = opts.getClient();
 
       threadPool.submit(new Runnable() {
         @Override
         public void run() {
           try {
-            calcTabletStats(conn, opts.getTableName(), opts.auths, scanOpts.scanBatchSize, ke,
+            calcTabletStats(client, opts.getTableName(), opts.auths, scanOpts.scanBatchSize, ke,
                 columns);
           } catch (Exception e) {
             log.error("Failed to calculate tablet stats.", e);
@@ -522,10 +522,10 @@ public class CollectTabletStats {
 
   }
 
-  private static int scanTablet(AccumuloClient conn, String table, Authorizations auths,
+  private static int scanTablet(AccumuloClient client, String table, Authorizations auths,
       int batchSize, Text prevEndRow, Text endRow, String[] columns) throws Exception {
 
-    try (Scanner scanner = conn.createScanner(table, auths)) {
+    try (Scanner scanner = client.createScanner(table, auths)) {
       scanner.setBatchSize(batchSize);
       scanner.setRange(new Range(prevEndRow, false, endRow, true));
 
@@ -543,12 +543,12 @@ public class CollectTabletStats {
     }
   }
 
-  private static void calcTabletStats(AccumuloClient conn, String table, Authorizations auths,
+  private static void calcTabletStats(AccumuloClient client, String table, Authorizations auths,
       int batchSize, KeyExtent ke, String[] columns) throws Exception {
 
     // long t1 = System.currentTimeMillis();
 
-    try (Scanner scanner = conn.createScanner(table, auths)) {
+    try (Scanner scanner = client.createScanner(table, auths)) {
       scanner.setBatchSize(batchSize);
       scanner.setRange(new Range(ke.getPrevEndRow(), false, ke.getEndRow(), true));
 
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java
index 1ae79c9..1dc0cff 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java
@@ -213,76 +213,76 @@ public class CyclicReplicationIT {
     }
 
     try {
-      AccumuloClient connMaster1 = master1Cluster.getAccumuloClient("root",
+      AccumuloClient clientMaster1 = master1Cluster.getAccumuloClient("root",
           new PasswordToken(password)),
-          connMaster2 = master2Cluster.getAccumuloClient("root", new PasswordToken(password));
+          clientMaster2 = master2Cluster.getAccumuloClient("root", new PasswordToken(password));
 
       String master1UserName = "master1", master1Password = "foo";
       String master2UserName = "master2", master2Password = "bar";
       String master1Table = master1Cluster.getInstanceName(),
           master2Table = master2Cluster.getInstanceName();
 
-      connMaster1.securityOperations().createLocalUser(master1UserName,
+      clientMaster1.securityOperations().createLocalUser(master1UserName,
           new PasswordToken(master1Password));
-      connMaster2.securityOperations().createLocalUser(master2UserName,
+      clientMaster2.securityOperations().createLocalUser(master2UserName,
           new PasswordToken(master2Password));
 
       // Configure the credentials we should use to authenticate ourselves to the peer for
       // replication
-      connMaster1.instanceOperations().setProperty(
+      clientMaster1.instanceOperations().setProperty(
           Property.REPLICATION_PEER_USER.getKey() + master2Cluster.getInstanceName(),
           master2UserName);
-      connMaster1.instanceOperations().setProperty(
+      clientMaster1.instanceOperations().setProperty(
           Property.REPLICATION_PEER_PASSWORD.getKey() + master2Cluster.getInstanceName(),
           master2Password);
 
-      connMaster2.instanceOperations().setProperty(
+      clientMaster2.instanceOperations().setProperty(
           Property.REPLICATION_PEER_USER.getKey() + master1Cluster.getInstanceName(),
           master1UserName);
-      connMaster2.instanceOperations().setProperty(
+      clientMaster2.instanceOperations().setProperty(
           Property.REPLICATION_PEER_PASSWORD.getKey() + master1Cluster.getInstanceName(),
           master1Password);
 
-      connMaster1.instanceOperations().setProperty(
+      clientMaster1.instanceOperations().setProperty(
           Property.REPLICATION_PEERS.getKey() + master2Cluster.getInstanceName(),
           ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
               AccumuloReplicaSystem.buildConfiguration(master2Cluster.getInstanceName(),
                   master2Cluster.getZooKeepers())));
 
-      connMaster2.instanceOperations().setProperty(
+      clientMaster2.instanceOperations().setProperty(
           Property.REPLICATION_PEERS.getKey() + master1Cluster.getInstanceName(),
           ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
               AccumuloReplicaSystem.buildConfiguration(master1Cluster.getInstanceName(),
                   master1Cluster.getZooKeepers())));
 
-      connMaster1.tableOperations().create(master1Table,
+      clientMaster1.tableOperations().create(master1Table,
           new NewTableConfiguration().withoutDefaultIterators());
-      String master1TableId = connMaster1.tableOperations().tableIdMap().get(master1Table);
+      String master1TableId = clientMaster1.tableOperations().tableIdMap().get(master1Table);
       assertNotNull(master1TableId);
 
-      connMaster2.tableOperations().create(master2Table,
+      clientMaster2.tableOperations().create(master2Table,
           new NewTableConfiguration().withoutDefaultIterators());
-      String master2TableId = connMaster2.tableOperations().tableIdMap().get(master2Table);
+      String master2TableId = clientMaster2.tableOperations().tableIdMap().get(master2Table);
       assertNotNull(master2TableId);
 
       // Replicate master1 in the master1 cluster to master2 in the master2 cluster
-      connMaster1.tableOperations().setProperty(master1Table, Property.TABLE_REPLICATION.getKey(),
+      clientMaster1.tableOperations().setProperty(master1Table, Property.TABLE_REPLICATION.getKey(),
           "true");
-      connMaster1.tableOperations().setProperty(master1Table,
+      clientMaster1.tableOperations().setProperty(master1Table,
           Property.TABLE_REPLICATION_TARGET.getKey() + master2Cluster.getInstanceName(),
           master2TableId);
 
       // Replicate master2 in the master2 cluster to master1 in the master2 cluster
-      connMaster2.tableOperations().setProperty(master2Table, Property.TABLE_REPLICATION.getKey(),
+      clientMaster2.tableOperations().setProperty(master2Table, Property.TABLE_REPLICATION.getKey(),
           "true");
-      connMaster2.tableOperations().setProperty(master2Table,
+      clientMaster2.tableOperations().setProperty(master2Table,
           Property.TABLE_REPLICATION_TARGET.getKey() + master1Cluster.getInstanceName(),
           master1TableId);
 
       // Give our replication user the ability to write to the respective table
-      connMaster1.securityOperations().grantTablePermission(master1UserName, master1Table,
+      clientMaster1.securityOperations().grantTablePermission(master1UserName, master1Table,
           TablePermission.WRITE);
-      connMaster2.securityOperations().grantTablePermission(master2UserName, master2Table,
+      clientMaster2.securityOperations().grantTablePermission(master2UserName, master2Table,
           TablePermission.WRITE);
 
       IteratorSetting summingCombiner = new IteratorSetting(50, SummingCombiner.class);
@@ -291,17 +291,17 @@ public class CyclicReplicationIT {
 
       // Set a combiner on both instances that will sum multiple values
       // We can use this to verify that the mutation was not sent multiple times
-      connMaster1.tableOperations().attachIterator(master1Table, summingCombiner);
-      connMaster2.tableOperations().attachIterator(master2Table, summingCombiner);
+      clientMaster1.tableOperations().attachIterator(master1Table, summingCombiner);
+      clientMaster2.tableOperations().attachIterator(master2Table, summingCombiner);
 
       // Write a single entry
-      BatchWriter bw = connMaster1.createBatchWriter(master1Table, new BatchWriterConfig());
+      BatchWriter bw = clientMaster1.createBatchWriter(master1Table, new BatchWriterConfig());
       Mutation m = new Mutation("row");
       m.put("count", "", "1");
       bw.addMutation(m);
       bw.close();
 
-      Set<String> files = connMaster1.replicationOperations().referencedFiles(master1Table);
+      Set<String> files = clientMaster1.replicationOperations().referencedFiles(master1Table);
 
       log.info("Found {} that need replication from master1", files);
 
@@ -319,23 +319,23 @@ public class CyclicReplicationIT {
 
       // Sanity check that the element is there on master1
       Entry<Key,Value> entry;
-      try (Scanner s = connMaster1.createScanner(master1Table, Authorizations.EMPTY)) {
+      try (Scanner s = clientMaster1.createScanner(master1Table, Authorizations.EMPTY)) {
         entry = Iterables.getOnlyElement(s);
         assertEquals("1", entry.getValue().toString());
 
         // Wait for this table to replicate
-        connMaster1.replicationOperations().drain(master1Table, files);
+        clientMaster1.replicationOperations().drain(master1Table, files);
 
         Thread.sleep(5000);
       }
 
       // Check that the element made it to master2 only once
-      try (Scanner s = connMaster2.createScanner(master2Table, Authorizations.EMPTY)) {
+      try (Scanner s = clientMaster2.createScanner(master2Table, Authorizations.EMPTY)) {
         entry = Iterables.getOnlyElement(s);
         assertEquals("1", entry.getValue().toString());
 
         // Wait for master2 to finish replicating it back
-        files = connMaster2.replicationOperations().referencedFiles(master2Table);
+        files = clientMaster2.replicationOperations().referencedFiles(master2Table);
 
         // Kill and restart the tserver to close the WAL on master2
         for (ProcessReference proc : master2Cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
@@ -349,17 +349,17 @@ public class CyclicReplicationIT {
       }
 
       // Check that the element made it to master2 only once
-      try (Scanner s = connMaster2.createScanner(master2Table, Authorizations.EMPTY)) {
+      try (Scanner s = clientMaster2.createScanner(master2Table, Authorizations.EMPTY)) {
         entry = Iterables.getOnlyElement(s);
         assertEquals("1", entry.getValue().toString());
 
-        connMaster2.replicationOperations().drain(master2Table, files);
+        clientMaster2.replicationOperations().drain(master2Table, files);
 
         Thread.sleep(5000);
       }
 
       // Verify that the entry wasn't sent back to master1
-      try (Scanner s = connMaster1.createScanner(master1Table, Authorizations.EMPTY)) {
+      try (Scanner s = clientMaster1.createScanner(master1Table, Authorizations.EMPTY)) {
         entry = Iterables.getOnlyElement(s);
         assertEquals("1", entry.getValue().toString());
       }
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/FinishedWorkUpdaterIT.java b/test/src/main/java/org/apache/accumulo/test/replication/FinishedWorkUpdaterIT.java
index cf8030a..d11c748 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/FinishedWorkUpdaterIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/FinishedWorkUpdaterIT.java
@@ -45,13 +45,13 @@ import com.google.common.collect.Iterables;
 
 public class FinishedWorkUpdaterIT extends ConfigurableMacBase {
 
-  private AccumuloClient conn;
+  private AccumuloClient client;
   private FinishedWorkUpdater updater;
 
   @Before
   public void configureUpdater() throws Exception {
-    conn = getClient();
-    updater = new FinishedWorkUpdater(conn);
+    client = getClient();
+    updater = new FinishedWorkUpdater(client);
   }
 
   @Test
@@ -61,11 +61,11 @@ public class FinishedWorkUpdaterIT extends ConfigurableMacBase {
 
   @Test
   public void recordsWithProgressUpdateBothTables() throws Exception {
-    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME,
+    client.securityOperations().grantTablePermission(client.whoami(), ReplicationTable.NAME,
         TablePermission.READ);
-    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME,
+    client.securityOperations().grantTablePermission(client.whoami(), ReplicationTable.NAME,
         TablePermission.WRITE);
-    ReplicationTable.setOnline(conn);
+    ReplicationTable.setOnline(client);
 
     String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
     Status stat = Status.newBuilder().setBegin(100).setEnd(200).setClosed(true)
@@ -73,7 +73,7 @@ public class FinishedWorkUpdaterIT extends ConfigurableMacBase {
     ReplicationTarget target = new ReplicationTarget("peer", "table1", Table.ID.of("1"));
 
     // Create a single work record for a file to some peer
-    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
+    BatchWriter bw = ReplicationTable.getBatchWriter(client);
     Mutation m = new Mutation(file);
     WorkSection.add(m, target.toText(), ProtobufUtil.toValue(stat));
     bw.addMutation(m);
@@ -81,7 +81,7 @@ public class FinishedWorkUpdaterIT extends ConfigurableMacBase {
 
     updater.run();
 
-    try (Scanner s = ReplicationTable.getScanner(conn)) {
+    try (Scanner s = ReplicationTable.getScanner(client)) {
       s.setRange(Range.exact(file));
       StatusSection.limit(s);
       Entry<Key,Value> entry = Iterables.getOnlyElement(s);
@@ -98,11 +98,11 @@ public class FinishedWorkUpdaterIT extends ConfigurableMacBase {
 
   @Test
   public void chooseMinimumBeginOffset() throws Exception {
-    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME,
+    client.securityOperations().grantTablePermission(client.whoami(), ReplicationTable.NAME,
         TablePermission.READ);
-    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME,
+    client.securityOperations().grantTablePermission(client.whoami(), ReplicationTable.NAME,
         TablePermission.WRITE);
-    ReplicationTable.setOnline(conn);
+    ReplicationTable.setOnline(client);
 
     String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
     Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true)
@@ -116,7 +116,7 @@ public class FinishedWorkUpdaterIT extends ConfigurableMacBase {
     ReplicationTarget target3 = new ReplicationTarget("peer3", "table3", Table.ID.of("1"));
 
     // Create a single work record for a file to some peer
-    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
+    BatchWriter bw = ReplicationTable.getBatchWriter(client);
     Mutation m = new Mutation(file);
     WorkSection.add(m, target1.toText(), ProtobufUtil.toValue(stat1));
     WorkSection.add(m, target2.toText(), ProtobufUtil.toValue(stat2));
@@ -126,7 +126,7 @@ public class FinishedWorkUpdaterIT extends ConfigurableMacBase {
 
     updater.run();
 
-    try (Scanner s = ReplicationTable.getScanner(conn)) {
+    try (Scanner s = ReplicationTable.getScanner(client)) {
       s.setRange(Range.exact(file));
       StatusSection.limit(s);
       Entry<Key,Value> entry = Iterables.getOnlyElement(s);
@@ -143,11 +143,11 @@ public class FinishedWorkUpdaterIT extends ConfigurableMacBase {
 
   @Test
   public void chooseMinimumBeginOffsetInfiniteEnd() throws Exception {
-    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME,
+    client.securityOperations().grantTablePermission(client.whoami(), ReplicationTable.NAME,
         TablePermission.READ);
-    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME,
+    client.securityOperations().grantTablePermission(client.whoami(), ReplicationTable.NAME,
         TablePermission.WRITE);
-    ReplicationTable.setOnline(conn);
+    ReplicationTable.setOnline(client);
 
     String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
     Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true)
@@ -161,7 +161,7 @@ public class FinishedWorkUpdaterIT extends ConfigurableMacBase {
     ReplicationTarget target3 = new ReplicationTarget("peer3", "table3", Table.ID.of("1"));
 
     // Create a single work record for a file to some peer
-    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
+    BatchWriter bw = ReplicationTable.getBatchWriter(client);
     Mutation m = new Mutation(file);
     WorkSection.add(m, target1.toText(), ProtobufUtil.toValue(stat1));
     WorkSection.add(m, target2.toText(), ProtobufUtil.toValue(stat2));
@@ -171,7 +171,7 @@ public class FinishedWorkUpdaterIT extends ConfigurableMacBase {
 
     updater.run();
 
-    try (Scanner s = ReplicationTable.getScanner(conn)) {
+    try (Scanner s = ReplicationTable.getScanner(client)) {
       s.setRange(Range.exact(file));
       StatusSection.limit(s);
       Entry<Key,Value> entry = Iterables.getOnlyElement(s);
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java b/test/src/main/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java
index d0394a1..d5fffd7 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java
@@ -106,8 +106,8 @@ public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacB
    */
   private Set<String> getWalsForTable(String tableName) throws Exception {
     final ServerContext context = getServerContext();
-    final AccumuloClient conn = context.getClient();
-    final String tableId = conn.tableOperations().tableIdMap().get(tableName);
+    final AccumuloClient client = context.getClient();
+    final String tableId = client.tableOperations().tableIdMap().get(tableName);
 
     assertNotNull("Could not determine table ID for " + tableName, tableId);
 
@@ -125,13 +125,13 @@ public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacB
    * Fetch all of the rfiles referenced by tablets in the metadata table for this table
    */
   private Set<String> getFilesForTable(String tableName) throws Exception {
-    final AccumuloClient conn = getClient();
-    final Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
+    final AccumuloClient client = getClient();
+    final Table.ID tableId = Table.ID.of(client.tableOperations().tableIdMap().get(tableName));
 
     assertNotNull("Could not determine table ID for " + tableName, tableId);
 
     Set<String> rfiles = new HashSet<>();
-    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+    try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
       Range r = MetadataSchema.TabletsSection.getRange(tableId);
       s.setRange(r);
       s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
@@ -153,13 +153,13 @@ public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacB
    * entries)
    */
   private Map<String,Status> getMetadataStatusForTable(String tableName) throws Exception {
-    final AccumuloClient conn = getClient();
-    final String tableId = conn.tableOperations().tableIdMap().get(tableName);
+    final AccumuloClient client = getClient();
+    final String tableId = client.tableOperations().tableIdMap().get(tableName);
 
     assertNotNull("Could not determine table ID for " + tableName, tableId);
 
     Map<String,Status> fileToStatus = new HashMap<>();
-    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+    try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
       Range r = MetadataSchema.ReplicationSection.getRange();
       s.setRange(r);
       s.fetchColumn(MetadataSchema.ReplicationSection.COLF, new Text(tableId));
@@ -178,19 +178,19 @@ public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacB
   @Test
   public void testActiveWalPrecludesClosing() throws Exception {
     final String table = getUniqueNames(1)[0];
-    final AccumuloClient conn = getClient();
+    final AccumuloClient client = getClient();
 
     // Bring the replication table online first and foremost
-    ReplicationTable.setOnline(conn);
+    ReplicationTable.setOnline(client);
 
     log.info("Creating {}", table);
-    conn.tableOperations().create(table);
+    client.tableOperations().create(table);
 
-    conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
+    client.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
 
     log.info("Writing a few mutations to the table");
 
-    BatchWriter bw = conn.createBatchWriter(table, null);
+    BatchWriter bw = client.createBatchWriter(table, null);
 
     byte[] empty = new byte[0];
     for (int i = 0; i < 5; i++) {
@@ -209,9 +209,9 @@ public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacB
     assertEquals("Expected to only find two WALs for the table", 2, wals.size());
 
     // Flush our test table to remove the WAL references in it
-    conn.tableOperations().flush(table, null, null, true);
+    client.tableOperations().flush(table, null, null, true);
     // Flush the metadata table too because it will have a reference to the WAL
-    conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
+    client.tableOperations().flush(MetadataTable.NAME, null, null, true);
 
     log.info("Waiting for replication table to come online");
 
@@ -234,7 +234,7 @@ public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacB
     log.info("Files for table before MajC: {}", filesForTable);
 
     // Issue a MajC to roll a new file in HDFS
-    conn.tableOperations().compact(table, null, null, false, true);
+    client.tableOperations().compact(table, null, null, false, true);
 
     Set<String> filesForTableAfterCompaction = getFilesForTable(table);
 
@@ -270,19 +270,19 @@ public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacB
     final String[] names = getUniqueNames(2);
     // `table` will be replicated, `otherTable` is only used to roll the WAL on the tserver
     final String table = names[0], otherTable = names[1];
-    final AccumuloClient conn = getClient();
+    final AccumuloClient client = getClient();
 
     // Bring the replication table online first and foremost
-    ReplicationTable.setOnline(conn);
+    ReplicationTable.setOnline(client);
 
     log.info("Creating {}", table);
-    conn.tableOperations().create(table);
+    client.tableOperations().create(table);
 
-    conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
+    client.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
 
     log.info("Writing a few mutations to the table");
 
-    BatchWriter bw = conn.createBatchWriter(table, null);
+    BatchWriter bw = client.createBatchWriter(table, null);
 
     byte[] empty = new byte[0];
     for (int i = 0; i < 5; i++) {
@@ -302,9 +302,9 @@ public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacB
     log.info("Compacting the table which will remove all WALs from the tablets");
 
     // Flush our test table to remove the WAL references in it
-    conn.tableOperations().flush(table, null, null, true);
+    client.tableOperations().flush(table, null, null, true);
     // Flush the metadata table too because it will have a reference to the WAL
-    conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
+    client.tableOperations().flush(MetadataTable.NAME, null, null, true);
 
     log.info("Fetching replication statuses from metadata table");
 
@@ -325,7 +325,7 @@ public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacB
     log.info("Files for table before MajC: {}", filesForTable);
 
     // Issue a MajC to roll a new file in HDFS
-    conn.tableOperations().compact(table, null, null, false, true);
+    client.tableOperations().compact(table, null, null, false, true);
 
     Set<String> filesForTableAfterCompaction = getFilesForTable(table);
 
@@ -362,8 +362,8 @@ public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacB
      * data that will exceed the 1.33% full threshold that the logger keeps track of
      */
 
-    conn.tableOperations().create(otherTable);
-    bw = conn.createBatchWriter(otherTable, null);
+    client.tableOperations().create(otherTable);
+    bw = client.createBatchWriter(otherTable, null);
     // 500k
     byte[] bigValue = new byte[1024 * 500];
     Arrays.fill(bigValue, (byte) 1);
@@ -379,12 +379,12 @@ public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacB
 
     bw.close();
 
-    conn.tableOperations().flush(otherTable, null, null, true);
+    client.tableOperations().flush(otherTable, null, null, true);
 
     // Get the tservers which the master deems as active
     final ClientContext context = getClientContext();
     List<String> tservers = MasterClient.execute(context,
-        client -> client.getActiveTservers(Tracer.traceInfo(), context.rpcCreds()));
+        cli -> cli.getActiveTservers(Tracer.traceInfo(), context.rpcCreds()));
 
     assertEquals("Expected only one active tservers", 1, tservers.size());
 
@@ -393,9 +393,8 @@ public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacB
     // Get the active WALs from that server
     log.info("Fetching active WALs from {}", tserver);
 
-    Client client = ThriftUtil.getTServerClient(tserver, context);
-    List<String> activeWalsForTserver = client.getActiveLogs(Tracer.traceInfo(),
-        context.rpcCreds());
+    Client cli = ThriftUtil.getTServerClient(tserver, context);
+    List<String> activeWalsForTserver = cli.getActiveLogs(Tracer.traceInfo(), context.rpcCreds());
 
     log.info("Active wals: {}", activeWalsForTserver);
 
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/KerberosReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/KerberosReplicationIT.java
index 09377ac..7995246 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/KerberosReplicationIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/KerberosReplicationIT.java
@@ -166,49 +166,51 @@ public class KerberosReplicationIT extends AccumuloITBase {
     ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
       log.info("testing {}", ugi);
       final KerberosToken token = new KerberosToken();
-      final AccumuloClient primaryConn = primary.getAccumuloClient(rootUser.getPrincipal(), token);
-      final AccumuloClient peerConn = peer.getAccumuloClient(rootUser.getPrincipal(), token);
+      final AccumuloClient primaryclient = primary.getAccumuloClient(rootUser.getPrincipal(),
+          token);
+      final AccumuloClient peerclient = peer.getAccumuloClient(rootUser.getPrincipal(), token);
 
       ClusterUser replicationUser = kdc.getClientPrincipal(0);
 
       // Create user for replication to the peer
-      peerConn.securityOperations().createLocalUser(replicationUser.getPrincipal(), null);
+      peerclient.securityOperations().createLocalUser(replicationUser.getPrincipal(), null);
 
-      primaryConn.instanceOperations().setProperty(
+      primaryclient.instanceOperations().setProperty(
           Property.REPLICATION_PEER_USER.getKey() + PEER_NAME, replicationUser.getPrincipal());
-      primaryConn.instanceOperations().setProperty(
+      primaryclient.instanceOperations().setProperty(
           Property.REPLICATION_PEER_KEYTAB.getKey() + PEER_NAME,
           replicationUser.getKeytab().getAbsolutePath());
 
       // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
-      primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + PEER_NAME,
+      primaryclient.instanceOperations().setProperty(
+          Property.REPLICATION_PEERS.getKey() + PEER_NAME,
           ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
-              AccumuloReplicaSystem.buildConfiguration(peerConn.info().getInstanceName(),
-                  peerConn.info().getZooKeepers())));
+              AccumuloReplicaSystem.buildConfiguration(peerclient.info().getInstanceName(),
+                  peerclient.info().getZooKeepers())));
 
       String primaryTable1 = "primary", peerTable1 = "peer";
 
       // Create tables
-      primaryConn.tableOperations().create(primaryTable1);
-      String masterTableId1 = primaryConn.tableOperations().tableIdMap().get(primaryTable1);
+      primaryclient.tableOperations().create(primaryTable1);
+      String masterTableId1 = primaryclient.tableOperations().tableIdMap().get(primaryTable1);
       assertNotNull(masterTableId1);
 
-      peerConn.tableOperations().create(peerTable1);
-      String peerTableId1 = peerConn.tableOperations().tableIdMap().get(peerTable1);
+      peerclient.tableOperations().create(peerTable1);
+      String peerTableId1 = peerclient.tableOperations().tableIdMap().get(peerTable1);
       assertNotNull(peerTableId1);
 
       // Grant write permission
-      peerConn.securityOperations().grantTablePermission(replicationUser.getPrincipal(), peerTable1,
-          TablePermission.WRITE);
+      peerclient.securityOperations().grantTablePermission(replicationUser.getPrincipal(),
+          peerTable1, TablePermission.WRITE);
 
       // Replicate this table to the peerClusterName in a table with the peerTableId table id
-      primaryConn.tableOperations().setProperty(primaryTable1, Property.TABLE_REPLICATION.getKey(),
-          "true");
-      primaryConn.tableOperations().setProperty(primaryTable1,
+      primaryclient.tableOperations().setProperty(primaryTable1,
+          Property.TABLE_REPLICATION.getKey(), "true");
+      primaryclient.tableOperations().setProperty(primaryTable1,
           Property.TABLE_REPLICATION_TARGET.getKey() + PEER_NAME, peerTableId1);
 
       // Write some data to table1
-      BatchWriter bw = primaryConn.createBatchWriter(primaryTable1, new BatchWriterConfig());
+      BatchWriter bw = primaryclient.createBatchWriter(primaryTable1, new BatchWriterConfig());
       long masterTable1Records = 0L;
       for (int rows = 0; rows < 2500; rows++) {
         Mutation m = new Mutation(primaryTable1 + rows);
@@ -224,7 +226,7 @@ public class KerberosReplicationIT extends AccumuloITBase {
 
       log.info("Wrote all data to primary cluster");
 
-      Set<String> filesFor1 = primaryConn.replicationOperations().referencedFiles(primaryTable1);
+      Set<String> filesFor1 = primaryclient.replicationOperations().referencedFiles(primaryTable1);
 
       // Restart the tserver to force a close on the WAL
       for (ProcessReference proc : primary.getProcesses().get(ServerType.TABLET_SERVER)) {
@@ -235,14 +237,14 @@ public class KerberosReplicationIT extends AccumuloITBase {
       log.info("Restarted the tserver");
 
       // Read the data -- the tserver is back up and running and tablets are assigned
-      Iterators.size(primaryConn.createScanner(primaryTable1, Authorizations.EMPTY).iterator());
+      Iterators.size(primaryclient.createScanner(primaryTable1, Authorizations.EMPTY).iterator());
 
       // Wait for both tables to be replicated
       log.info("Waiting for {} for {}", filesFor1, primaryTable1);
-      primaryConn.replicationOperations().drain(primaryTable1, filesFor1);
+      primaryclient.replicationOperations().drain(primaryTable1, filesFor1);
 
       long countTable = 0L;
-      for (Entry<Key,Value> entry : peerConn.createScanner(peerTable1, Authorizations.EMPTY)) {
+      for (Entry<Key,Value> entry : peerclient.createScanner(peerTable1, Authorizations.EMPTY)) {
         countTable++;
         assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " "
             + entry.getValue(), entry.getKey().getRow().toString().startsWith(primaryTable1));
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java
index 74ded1e..684a57e 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java
@@ -176,25 +176,26 @@ public class MultiInstanceReplicationIT extends ConfigurableMacBase {
     peerCluster.start();
 
     try {
-      final AccumuloClient connMaster = getClient();
-      final AccumuloClient connPeer = peerCluster.getAccumuloClient("root",
+      final AccumuloClient clientMaster = getClient();
+      final AccumuloClient clientPeer = peerCluster.getAccumuloClient("root",
           new PasswordToken(ROOT_PASSWORD));
 
-      ReplicationTable.setOnline(connMaster);
+      ReplicationTable.setOnline(clientMaster);
 
       String peerUserName = "peer", peerPassword = "foo";
 
       String peerClusterName = "peer";
 
-      connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
+      clientPeer.securityOperations().createLocalUser(peerUserName,
+          new PasswordToken(peerPassword));
 
-      connMaster.instanceOperations()
+      clientMaster.instanceOperations()
           .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
-      connMaster.instanceOperations()
+      clientMaster.instanceOperations()
           .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
 
       // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
-      connMaster.instanceOperations().setProperty(
+      clientMaster.instanceOperations().setProperty(
           Property.REPLICATION_PEERS.getKey() + peerClusterName,
           ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
               AccumuloReplicaSystem.buildConfiguration(peerCluster.getInstanceName(),
@@ -202,25 +203,25 @@ public class MultiInstanceReplicationIT extends ConfigurableMacBase {
 
       final String masterTable = "master", peerTable = "peer";
 
-      connMaster.tableOperations().create(masterTable);
-      String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
+      clientMaster.tableOperations().create(masterTable);
+      String masterTableId = clientMaster.tableOperations().tableIdMap().get(masterTable);
       assertNotNull(masterTableId);
 
-      connPeer.tableOperations().create(peerTable);
-      String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
+      clientPeer.tableOperations().create(peerTable);
+      String peerTableId = clientPeer.tableOperations().tableIdMap().get(peerTable);
       assertNotNull(peerTableId);
 
-      connPeer.securityOperations().grantTablePermission(peerUserName, peerTable,
+      clientPeer.securityOperations().grantTablePermission(peerUserName, peerTable,
           TablePermission.WRITE);
 
       // Replicate this table to the peerClusterName in a table with the peerTableId table id
-      connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(),
+      clientMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(),
           "true");
-      connMaster.tableOperations().setProperty(masterTable,
+      clientMaster.tableOperations().setProperty(masterTable,
           Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);
 
       // Write some data to table1
-      BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
+      BatchWriter bw = clientMaster.createBatchWriter(masterTable, new BatchWriterConfig());
       for (int rows = 0; rows < 5000; rows++) {
         Mutation m = new Mutation(Integer.toString(rows));
         for (int cols = 0; cols < 100; cols++) {
@@ -234,7 +235,7 @@ public class MultiInstanceReplicationIT extends ConfigurableMacBase {
 
       log.info("Wrote all data to master cluster");
 
-      final Set<String> filesNeedingReplication = connMaster.replicationOperations()
+      final Set<String> filesNeedingReplication = clientMaster.replicationOperations()
           .referencedFiles(masterTable);
 
       log.info("Files to replicate: " + filesNeedingReplication);
@@ -245,17 +246,17 @@ public class MultiInstanceReplicationIT extends ConfigurableMacBase {
       cluster.exec(TabletServer.class);
 
       log.info("TabletServer restarted");
-      Iterators.size(ReplicationTable.getScanner(connMaster).iterator());
+      Iterators.size(ReplicationTable.getScanner(clientMaster).iterator());
       log.info("TabletServer is online");
 
-      while (!ReplicationTable.isOnline(connMaster)) {
+      while (!ReplicationTable.isOnline(clientMaster)) {
         log.info("Replication table still offline, waiting");
         Thread.sleep(5000);
       }
 
       log.info("");
       log.info("Fetching metadata records:");
-      for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME,
+      for (Entry<Key,Value> kv : clientMaster.createScanner(MetadataTable.NAME,
           Authorizations.EMPTY)) {
         if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
           log.info("{} {}", kv.getKey().toStringNoTruncate(),
@@ -267,14 +268,14 @@ public class MultiInstanceReplicationIT extends ConfigurableMacBase {
 
       log.info("");
       log.info("Fetching replication records:");
-      for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
+      for (Entry<Key,Value> kv : ReplicationTable.getScanner(clientMaster)) {
         log.info("{} {}", kv.getKey().toStringNoTruncate(),
             ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
       }
 
       Future<Boolean> future = executor.submit(() -> {
         long then = System.currentTimeMillis();
-        connMaster.replicationOperations().drain(masterTable, filesNeedingReplication);
+        clientMaster.replicationOperations().drain(masterTable, filesNeedingReplication);
         long now = System.currentTimeMillis();
         log.info("Drain completed in " + (now - then) + "ms");
         return true;
@@ -293,7 +294,7 @@ public class MultiInstanceReplicationIT extends ConfigurableMacBase {
 
       log.info("");
       log.info("Fetching metadata records:");
-      for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME,
+      for (Entry<Key,Value> kv : clientMaster.createScanner(MetadataTable.NAME,
           Authorizations.EMPTY)) {
         if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
           log.info("{} {}", kv.getKey().toStringNoTruncate(),
@@ -305,13 +306,13 @@ public class MultiInstanceReplicationIT extends ConfigurableMacBase {
 
       log.info("");
       log.info("Fetching replication records:");
-      for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
+      for (Entry<Key,Value> kv : ReplicationTable.getScanner(clientMaster)) {
         log.info("{} {}", kv.getKey().toStringNoTruncate(),
             ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
       }
 
-      try (Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY);
-          Scanner peer = connPeer.createScanner(peerTable, Authorizations.EMPTY)) {
+      try (Scanner master = clientMaster.createScanner(masterTable, Authorizations.EMPTY);
+          Scanner peer = clientPeer.createScanner(peerTable, Authorizations.EMPTY)) {
         Iterator<Entry<Key,Value>> masterIter = master.iterator(), peerIter = peer.iterator();
         Entry<Key,Value> masterEntry = null, peerEntry = null;
         while (masterIter.hasNext() && peerIter.hasNext()) {
@@ -350,23 +351,24 @@ public class MultiInstanceReplicationIT extends ConfigurableMacBase {
     peer1Cluster.start();
 
     try {
-      AccumuloClient connMaster = getClient();
-      AccumuloClient connPeer = peer1Cluster.getAccumuloClient("root",
+      AccumuloClient clientMaster = getClient();
+      AccumuloClient clientPeer = peer1Cluster.getAccumuloClient("root",
           new PasswordToken(ROOT_PASSWORD));
 
       String peerClusterName = "peer";
       String peerUserName = "peer", peerPassword = "foo";
 
       // Create local user
-      connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
+      clientPeer.securityOperations().createLocalUser(peerUserName,
+          new PasswordToken(peerPassword));
... 3155 lines suppressed ...


Mime
View raw message