accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mwa...@apache.org
Subject [accumulo] branch master updated: Deprecated Instance, ZooKeeperInstance & HdfsZooInstance (#579)
Date Tue, 07 Aug 2018 17:07:29 GMT
This is an automated email from the ASF dual-hosted git repository.

mwalch pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/master by this push:
     new 15754af  Deprecated Instance, ZooKeeperInstance & HdfsZooInstance (#579)
15754af is described below

commit 15754af7711efa73243299a94e9da4e4eae5fd83
Author: Mike Walch <mwalch@apache.org>
AuthorDate: Tue Aug 7 13:07:26 2018 -0400

    Deprecated Instance, ZooKeeperInstance & HdfsZooInstance (#579)
    
    * Renamed AccumuloServerContext to ServerContext
    * Created ServerInfo class
---
 .../core/client/mapred/AbstractInputFormat.java    |   5 +-
 .../core/client/mapred/AccumuloOutputFormat.java   |  11 +-
 .../core/client/mapreduce/AbstractInputFormat.java |   5 +-
 .../client/mapreduce/AccumuloOutputFormat.java     |  11 +-
 .../mapreduce/lib/impl/ConfiguratorBase.java       |  14 +-
 .../mapreduce/lib/impl/InputConfigurator.java      |  17 --
 .../org/apache/accumulo/core/cli/ClientOpts.java   |  13 --
 .../org/apache/accumulo/core/client/Connector.java |   2 +
 .../org/apache/accumulo/core/client/Instance.java  |   3 +
 .../core/client/MutationsRejectedException.java    |  40 +++-
 .../core/client/TableOfflineException.java         |   5 +-
 .../accumulo/core/client/ZooKeeperInstance.java    |   7 +-
 .../core/client/impl/ClientConfConverter.java      |  11 --
 .../accumulo/core/client/impl/ClientContext.java   | 215 +++++++++++++++------
 .../core/client/impl/ClientInfoFactory.java        |   8 -
 .../core/client/impl/ConditionalWriterImpl.java    |   8 +-
 .../accumulo/core/client/impl/ConnectorImpl.java   |  10 +-
 .../accumulo/core/client/impl/Credentials.java     |  10 +-
 .../core/client/impl/DelegationTokenImpl.java      |   7 +-
 .../core/client/impl/NamespaceOperationsImpl.java  |   5 +-
 .../accumulo/core/client/impl/Namespaces.java      |  41 ++--
 .../core/client/impl/ReplicationClient.java        |   7 +-
 .../core/client/impl/RootTabletLocator.java        |   9 +-
 .../core/client/impl/SecurityOperationsImpl.java   |   2 +-
 .../core/client/impl/SyncingTabletLocator.java     |   5 +-
 .../apache/accumulo/core/client/impl/TableMap.java |  10 +-
 .../core/client/impl/TableOperationsImpl.java      |   4 +-
 .../apache/accumulo/core/client/impl/Tables.java   | 122 ++++--------
 .../accumulo/core/client/impl/TabletLocator.java   |  10 +-
 .../core/client/impl/TabletLocatorImpl.java        |   3 +-
 .../impl/TabletServerBatchReaderIterator.java      |   2 +-
 .../core/client/impl/TabletServerBatchWriter.java  |   4 +-
 .../accumulo/core/client/impl/ThriftScanner.java   |   4 +-
 .../core/client/impl/ZookeeperLockChecker.java     |  11 +-
 .../core/metadata/MetadataLocationObtainer.java    |   4 +-
 .../core/replication/ReplicationTable.java         |   3 +-
 .../accumulo/core/rpc/SaslConnectionParams.java    |   8 +
 .../accumulo/core/trace/DistributedTrace.java      |  12 --
 .../org/apache/accumulo/core/util/MonitorUtil.java |  13 +-
 .../apache/accumulo/core/zookeeper/ZooUtil.java    |   5 -
 .../core/client/ZooKeeperInstanceTest.java         |  15 +-
 .../core/client/impl/RootTabletLocatorTest.java    |  17 +-
 .../core/client/impl/TableOperationsImplTest.java  |  22 +--
 .../core/client/impl/TabletLocatorImplTest.java    |  92 +++------
 .../core/client/impl/ZookeeperLockCheckerTest.java |  17 +-
 .../core/metadata/MetadataServicerTest.java        |   5 +-
 .../accumulo/core/security/CredentialsTest.java    |  18 +-
 .../apache/accumulo/core/util/MonitorUtilTest.java |  13 +-
 .../org/apache/accumulo/plugin/it/PluginIT.java    |  21 +-
 .../apache/accumulo/cluster/AccumuloCluster.java   |   6 +
 .../standalone/StandaloneAccumuloCluster.java      |   6 +
 .../accumulo/minicluster/MiniAccumuloCluster.java  |  16 ++
 .../accumulo/minicluster/MiniAccumuloInstance.java |  10 +-
 .../minicluster/impl/MiniAccumuloClusterImpl.java  |   6 +
 .../org/apache/accumulo/proxy/ProxyServer.java     |   8 +-
 .../java/org/apache/accumulo/server/Accumulo.java  |   9 +-
 ...cumuloServerContext.java => ServerContext.java} | 137 +++++++++----
 .../org/apache/accumulo/server/ServerInfo.java     | 156 +++++++++++++++
 .../accumulo/server/cli/ClientOnDefaultTable.java  |  21 +-
 .../accumulo/server/cli/ClientOnRequiredTable.java |  21 +-
 .../org/apache/accumulo/server/cli/ClientOpts.java |  18 +-
 .../server/client/ClientServiceHandler.java        |  18 +-
 .../accumulo/server/client/HdfsZooInstance.java    |  29 ++-
 .../accumulo/server/conf/ConfigSanityCheck.java    |   4 +-
 .../accumulo/server/conf/NamespaceConfWatcher.java |  15 +-
 .../server/conf/NamespaceConfiguration.java        |  17 +-
 .../server/conf/ServerConfigurationFactory.java    |  22 +--
 .../accumulo/server/conf/TableConfWatcher.java     |  15 +-
 .../accumulo/server/conf/TableConfiguration.java   |  21 +-
 .../server/conf/TableParentConfiguration.java      |   9 +-
 .../server/conf/ZooConfigurationFactory.java       |  27 +--
 .../server/constraints/MetadataConstraints.java    |   4 +-
 .../accumulo/server/fs/PerTableVolumeChooser.java  |   4 +-
 .../accumulo/server/fs/PreferredVolumeChooser.java |   4 +-
 .../org/apache/accumulo/server/fs/VolumeUtil.java  |   6 +-
 .../apache/accumulo/server/init/Initialize.java    |  23 +--
 .../balancer/HostRegexTableLoadBalancer.java       |  10 +-
 .../server/master/balancer/RegexGroupBalancer.java |   6 +-
 .../server/master/balancer/TableLoadBalancer.java  |   4 +-
 .../server/master/balancer/TabletBalancer.java     |  11 +-
 .../server/master/state/MetaDataStateStore.java    |   6 +-
 .../server/master/state/RootTabletStateStore.java  |   4 +-
 .../server/master/state/TabletStateStore.java      |  12 +-
 .../accumulo/server/master/state/ZooStore.java     |   5 +-
 .../accumulo/server/problems/ProblemReport.java    |  34 ++--
 .../server/problems/ProblemReportingIterator.java  |   6 +-
 .../accumulo/server/problems/ProblemReports.java   |  16 +-
 .../server/replication/ReplicationUtil.java        |  12 +-
 .../apache/accumulo/server/rpc/TServerUtils.java   |   4 +-
 .../server/security/AuditedSecurityOperation.java  |  10 +-
 .../server/security/SecurityOperation.java         |  10 +-
 .../server/security/SystemCredentials.java         |  19 +-
 .../AuthenticationTokenSecretManager.java          |  15 +-
 .../accumulo/server/tables/TableManager.java       |  51 +++--
 .../server/tablets/UniqueNameAllocator.java        |   4 +-
 .../tabletserver/LargestFirstMemoryManager.java    |   7 +-
 .../org/apache/accumulo/server/util/Admin.java     |  23 +--
 .../apache/accumulo/server/util/ChangeSecret.java  |  51 +++--
 .../accumulo/server/util/CleanZookeeper.java       |  15 +-
 .../accumulo/server/util/FindOfflineTablets.java   |   8 +-
 .../java/org/apache/accumulo/server/util/Info.java |  11 +-
 .../accumulo/server/util/ListVolumesUsed.java      |   8 +-
 .../accumulo/server/util/LoginProperties.java      |   5 +-
 .../accumulo/server/util/MetadataTableUtil.java    |  26 ++-
 .../accumulo/server/util/NamespacePropUtil.java    |   5 +-
 .../accumulo/server/util/RandomizeVolumes.java     |  10 +-
 .../accumulo/server/util/SystemPropUtil.java       |   7 +-
 .../apache/accumulo/server/util/TablePropUtil.java |   5 +-
 .../accumulo/server/util/TabletServerLocks.java    |  10 +-
 .../apache/accumulo/server/util/ZooKeeperMain.java |  11 +-
 .../server/zookeeper/TransactionWatcher.java       |  30 ++-
 ...rverContextTest.java => ServerContextTest.java} |   9 +-
 .../accumulo/server/client/BulkImporterTest.java   |   3 +-
 .../server/conf/NamespaceConfigurationTest.java    |  24 +--
 .../conf/ServerConfigurationFactoryTest.java       |  21 +-
 .../server/conf/TableConfigurationTest.java        |  21 +-
 .../server/conf/ZooConfigurationFactoryTest.java   |  26 +--
 .../BaseHostRegexTableLoadBalancerTest.java        |  79 +++-----
 ...tRegexTableLoadBalancerReconfigurationTest.java |  14 +-
 .../balancer/HostRegexTableLoadBalancerTest.java   |  58 ++++--
 .../master/balancer/TableLoadBalancerTest.java     |  45 +++--
 .../server/problems/ProblemReportTest.java         |  17 +-
 .../server/replication/ReplicationUtilTest.java    |   6 +-
 .../server/rpc/SaslDigestCallbackHandlerTest.java  |  25 +--
 .../server/security/SystemCredentialsTest.java     |  21 +-
 .../AuthenticationTokenSecretManagerTest.java      |  54 ++----
 .../ZooAuthenticationKeyWatcherTest.java           |  60 +++---
 .../org/apache/accumulo/server/util/AdminTest.java |  12 +-
 .../accumulo/server/util/TServerUtilsTest.java     |  86 ++++-----
 .../accumulo/gc/GarbageCollectWriteAheadLogs.java  |  10 +-
 .../apache/accumulo/gc/SimpleGarbageCollector.java |  77 ++++----
 .../replication/CloseWriteAheadLogReferences.java  |   6 +-
 .../gc/GarbageCollectWriteAheadLogsTest.java       |  12 +-
 .../accumulo/gc/SimpleGarbageCollectorTest.java    |  51 ++---
 server/master/src/main/findbugs/exclude-filter.xml |   2 +-
 .../apache/accumulo/master/FateServiceHandler.java |  38 ++--
 .../java/org/apache/accumulo/master/Master.java    | 108 +++++------
 .../master/MasterClientServiceHandler.java         |  18 +-
 .../apache/accumulo/master/TabletGroupWatcher.java |  12 +-
 .../master/metrics/Metrics2ReplicationMetrics.java |   4 +-
 .../master/metrics/ReplicationMetrics.java         |   4 +-
 .../replication/MasterReplicationCoordinator.java  |   7 +-
 .../master/replication/ReplicationDriver.java      |   2 +-
 .../master/replication/SequentialWorkAssigner.java |   2 +-
 .../master/replication/UnorderedWorkAssigner.java  |   2 +-
 .../accumulo/master/replication/WorkMaker.java     |   8 +-
 .../apache/accumulo/master/state/MergeStats.java   |   4 +-
 .../apache/accumulo/master/state/SetGoalState.java |  13 +-
 .../master/tableOps/CancelCompactions.java         |   8 +-
 .../apache/accumulo/master/tableOps/CleanUp.java   |  10 +-
 .../accumulo/master/tableOps/CloneMetadata.java    |   6 +-
 .../accumulo/master/tableOps/ClonePermissions.java |  10 +-
 .../accumulo/master/tableOps/CloneTable.java       |   2 +-
 .../accumulo/master/tableOps/CloneZookeeper.java   |   6 +-
 .../accumulo/master/tableOps/CompactRange.java     |   8 +-
 .../accumulo/master/tableOps/CompactionDriver.java |  11 +-
 .../accumulo/master/tableOps/CreateNamespace.java  |   2 +-
 .../accumulo/master/tableOps/CreateTable.java      |   2 +-
 .../master/tableOps/ImportPopulateZookeeper.java   |  12 +-
 .../master/tableOps/ImportSetupPermissions.java    |  10 +-
 .../accumulo/master/tableOps/ImportTable.java      |   4 +-
 .../accumulo/master/tableOps/NamespaceCleanUp.java |   5 +-
 .../accumulo/master/tableOps/PopulateMetadata.java |   4 +-
 .../master/tableOps/PopulateMetadataTable.java     |   2 +-
 .../master/tableOps/PopulateZookeeper.java         |  10 +-
 .../tableOps/PopulateZookeeperWithNamespace.java   |   6 +-
 .../accumulo/master/tableOps/RenameNamespace.java  |   5 +-
 .../accumulo/master/tableOps/RenameTable.java      |  11 +-
 .../master/tableOps/SetupNamespacePermissions.java |   4 +-
 .../accumulo/master/tableOps/SetupPermissions.java |  12 +-
 .../org/apache/accumulo/master/tableOps/Utils.java |  24 +--
 .../accumulo/master/tableOps/WriteExportFiles.java |  19 +-
 .../master/tableOps/bulkVer1/BulkImport.java       |   6 +-
 .../tableOps/bulkVer1/CleanUpBulkImport.java       |   4 +-
 .../master/tableOps/bulkVer1/LoadFiles.java        |   7 +-
 .../master/tableOps/bulkVer2/BulkImportMove.java   |   2 +-
 .../tableOps/bulkVer2/CleanUpBulkImport.java       |   4 +-
 .../master/tableOps/bulkVer2/LoadFiles.java        |  10 +-
 .../master/tableOps/bulkVer2/PrepBulkImport.java   |   6 +-
 .../master/tserverOps/ShutdownTServer.java         |   4 +-
 .../org/apache/accumulo/master/util/FateAdmin.java |  15 +-
 .../metrics/Metrics2ReplicationMetricsTest.java    |   3 +
 .../MasterReplicationCoordinatorTest.java          |  31 ++-
 .../replication/SequentialWorkAssignerTest.java    |   9 +-
 .../replication/UnorderedWorkAssignerTest.java     |   9 +-
 .../java/org/apache/accumulo/monitor/Monitor.java  |  47 ++---
 .../monitor/rest/trace/TracesResource.java         |   4 +-
 .../rest/tservers/TabletServerResource.java        |   2 +-
 .../monitor/util/AccumuloMonitorAppender.java      |  12 +-
 .../org/apache/accumulo/monitor/view/WebViews.java |   2 +-
 .../org/apache/accumulo/monitor/it/WebViewsIT.java |  13 +-
 .../org/apache/accumulo/tracer/TraceServer.java    |  23 +--
 .../org/apache/accumulo/tserver/FileManager.java   |  12 +-
 .../org/apache/accumulo/tserver/TabletServer.java  | 145 +++++++-------
 .../tserver/TabletServerResourceManager.java       |  12 +-
 .../TooManyDeletesCompactionStrategy.java          |   3 +-
 .../accumulo/tserver/log/TabletServerLogger.java   |   2 +-
 .../tserver/replication/AccumuloReplicaSystem.java |  29 +--
 .../replication/ReplicationServicerHandler.java    |   9 +-
 .../apache/accumulo/tserver/tablet/Compactor.java  |   6 +-
 .../accumulo/tserver/tablet/DatafileManager.java   |  10 +-
 .../accumulo/tserver/tablet/MinorCompactor.java    |  13 +-
 .../org/apache/accumulo/tserver/tablet/Tablet.java |  52 +++--
 .../apache/accumulo/tserver/InMemoryMapTest.java   |   2 -
 .../tserver/LargestFirstMemoryManagerTest.java     |  16 +-
 .../main/java/org/apache/accumulo/shell/Shell.java |   5 -
 .../accumulo/shell/commands/FateCommand.java       |  20 +-
 .../accumulo/shell/commands/ListBulkCommand.java   |   8 +-
 .../accumulo/harness/AccumuloClusterHarness.java   |   6 +
 .../accumulo/test/BadDeleteMarkersCreatedIT.java   |   9 +-
 .../apache/accumulo/test/BatchWriterIterator.java  |   2 +-
 .../accumulo/test/DetectDeadTabletServersIT.java   |   2 +-
 .../org/apache/accumulo/test/GetMasterStats.java   |   9 +-
 .../test/MasterRepairsDualAssignmentIT.java        |   2 +-
 .../accumulo/test/MetaConstraintRetryIT.java       |   2 +-
 .../accumulo/test/MultiTableBatchWriterIT.java     |   2 +-
 .../apache/accumulo/test/QueryMetadataTable.java   |   4 +-
 .../accumulo/test/TableConfigurationUpdateIT.java  |   8 +-
 .../apache/accumulo/test/TableOperationsIT.java    |   2 +-
 .../ThriftServerBindsBeforeZooKeeperLockIT.java    |   2 +-
 .../org/apache/accumulo/test/TotalQueuedIT.java    |   7 +-
 .../apache/accumulo/test/TransportCachingIT.java   |  29 +--
 .../java/org/apache/accumulo/test/UnusedWALIT.java |   2 -
 .../accumulo/test/UserCompactionStrategyIT.java    |   2 +-
 .../java/org/apache/accumulo/test/VolumeIT.java    |   6 +-
 .../org/apache/accumulo/test/WrongTabletTest.java  |  14 +-
 .../functional/BalanceAfterCommsFailureIT.java     |   2 +-
 .../BalanceInPresenceOfOfflineTableIT.java         |  10 +-
 .../accumulo/test/functional/BulkFileIT.java       |   4 +-
 .../accumulo/test/functional/BulkLoadIT.java       |   3 +-
 .../test/functional/ConcurrentDeleteTableIT.java   |   4 +-
 .../test/functional/ConfigurableMacBase.java       |   7 +-
 .../test/functional/DynamicThreadPoolsIT.java      |   5 +-
 .../accumulo/test/functional/FateStarvationIT.java |   2 +-
 .../test/functional/FunctionalTestUtils.java       |  17 +-
 .../test/functional/MasterAssignmentIT.java        |   2 +-
 .../test/functional/MetadataMaxFilesIT.java        |   2 +-
 .../accumulo/test/functional/MonitorSslIT.java     |   2 +-
 .../accumulo/test/functional/ReadWriteIT.java      |   2 +-
 .../apache/accumulo/test/functional/RenameIT.java  |   2 +-
 .../test/functional/SimpleBalancerFairnessIT.java  |   4 +-
 .../accumulo/test/functional/SplitRecoveryIT.java  |  23 +--
 .../functional/TabletStateChangeIteratorIT.java    |   2 +-
 .../accumulo/test/functional/ZombieTServer.java    |  12 +-
 .../CloseWriteAheadLogReferencesIT.java            |  27 +--
 .../apache/accumulo/test/master/MergeStateIT.java  |   4 +-
 .../accumulo/test/master/SuspendedTabletsIT.java   |   2 +-
 .../accumulo/test/performance/NullTserver.java     |  19 +-
 .../test/performance/scan/CollectTabletStats.java  |  10 +-
 ...GarbageCollectorCommunicatesWithTServersIT.java |   4 +-
 .../replication/MultiTserverReplicationIT.java     |   4 +-
 .../accumulo/test/replication/ReplicationIT.java   |   2 -
 .../replication/ReplicationOperationsImplIT.java   |   9 +-
 .../test/server/security/SystemCredentialsIT.java  |  94 +--------
 254 files changed, 1993 insertions(+), 2117 deletions(-)

diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
index c4d08be..3aef5fe 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
@@ -36,7 +36,6 @@ import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.ClientSideIteratorScanner;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.IsolatedScanner;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.Scanner;
@@ -293,8 +292,10 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
    *          the Hadoop context for the configured job
    * @return an Accumulo instance
    * @since 1.5.0
+   * @deprecated since 2.0.0, Use {@link #getClientInfo(JobConf)} instead
    */
-  protected static Instance getInstance(JobConf job) {
+  @Deprecated
+  protected static org.apache.accumulo.core.client.Instance getInstance(JobConf job) {
     return InputConfigurator.getInstance(CLASS, job);
   }
 
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
index 67221b1..39d787d 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
@@ -28,12 +28,10 @@ import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.MultiTableBatchWriter;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
 import org.apache.accumulo.core.client.admin.SecurityOperations;
 import org.apache.accumulo.core.client.impl.AuthenticationTokenIdentifier;
@@ -223,7 +221,7 @@ public class AccumuloOutputFormat implements OutputFormat<Text,Mutation> {
   }
 
   /**
-   * Configures a {@link ZooKeeperInstance} for this job.
+   * Configures a {@link org.apache.accumulo.core.client.ZooKeeperInstance} for this job.
    *
    * @param job
    *          the Hadoop job instance to be configured
@@ -240,14 +238,17 @@ public class AccumuloOutputFormat implements OutputFormat<Text,Mutation> {
   }
 
   /**
-   * Initializes an Accumulo {@link Instance} based on the configuration.
+   * Initializes an Accumulo {@link org.apache.accumulo.core.client.Instance} based on the
+   * configuration.
    *
    * @param job
    *          the Hadoop context for the configured job
    * @return an Accumulo instance
    * @since 1.5.0
+   * @deprecated since 2.0.0; Use {@link #getClientInfo(JobConf)} instead
    */
-  protected static Instance getInstance(JobConf job) {
+  @Deprecated
+  protected static org.apache.accumulo.core.client.Instance getInstance(JobConf job) {
     return OutputConfigurator.getInstance(CLASS, job);
   }
 
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
index 7bd4546..5366c61 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
@@ -36,7 +36,6 @@ import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.ClientSideIteratorScanner;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.IsolatedScanner;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.Scanner;
@@ -297,8 +296,10 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
    *          the Hadoop context for the configured job
    * @return an Accumulo instance
    * @since 1.5.0
+   * @deprecated since 2.0.0, use {@link #getClientInfo(JobContext)} instead
    */
-  protected static Instance getInstance(JobContext context) {
+  @Deprecated
+  protected static org.apache.accumulo.core.client.Instance getInstance(JobContext context) {
     return InputConfigurator.getInstance(CLASS, context.getConfiguration());
   }
 
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
index b878b03..acb46d1 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
@@ -28,12 +28,10 @@ import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.MultiTableBatchWriter;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
 import org.apache.accumulo.core.client.admin.SecurityOperations;
 import org.apache.accumulo.core.client.impl.AuthenticationTokenIdentifier;
@@ -226,7 +224,7 @@ public class AccumuloOutputFormat extends OutputFormat<Text,Mutation> {
   }
 
   /**
-   * Configures a {@link ZooKeeperInstance} for this job.
+   * Configures a {@link org.apache.accumulo.core.client.ZooKeeperInstance} for this job.
    *
    * @param job
    *          the Hadoop job instance to be configured
@@ -243,14 +241,17 @@ public class AccumuloOutputFormat extends OutputFormat<Text,Mutation> {
   }
 
   /**
-   * Initializes an Accumulo {@link Instance} based on the configuration.
+   * Initializes an Accumulo {@link org.apache.accumulo.core.client.Instance} based on the
+   * configuration.
    *
    * @param context
    *          the Hadoop context for the configured job
    * @return an Accumulo instance
    * @since 1.5.0
+   * @deprecated since 2.0.0; Use {@link #getClientInfo(JobContext)} instead.
    */
-  protected static Instance getInstance(JobContext context) {
+  @Deprecated
+  protected static org.apache.accumulo.core.client.Instance getInstance(JobContext context) {
     return OutputConfigurator.getInstance(CLASS, context.getConfiguration());
   }
 
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
index 438f755..165f53e 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
@@ -34,8 +34,6 @@ import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
 import org.apache.accumulo.core.client.impl.AuthenticationTokenIdentifier;
 import org.apache.accumulo.core.client.impl.ClientConfConverter;
@@ -66,7 +64,7 @@ public class ConfiguratorBase {
   protected static final Logger log = Logger.getLogger(ConfiguratorBase.class);
 
   /**
-   * Configuration keys for {@link Instance#getConnector(String, AuthenticationToken)}.
+   * Specifies that connection info was configured
    *
    * @since 1.6.0
    */
@@ -299,7 +297,7 @@ public class ConfiguratorBase {
   }
 
   /**
-   * Configures a {@link ZooKeeperInstance} for this job.
+   * Configures a {@link org.apache.accumulo.core.client.ZooKeeperInstance} for this job.
    *
    * @param implementingClass
    *          the class whose name will be used as a prefix for the property configuration key
@@ -323,7 +321,8 @@ public class ConfiguratorBase {
   }
 
   /**
-   * Initializes an Accumulo {@link Instance} based on the configuration.
+   * Initializes an Accumulo {@link org.apache.accumulo.core.client.Instance} based on the
+   * configuration.
    *
    * @param implementingClass
    *          the class whose name will be used as a prefix for the property configuration key
@@ -331,8 +330,11 @@ public class ConfiguratorBase {
    *          the Hadoop configuration object to configure
    * @return an Accumulo instance
    * @since 1.6.0
+   * @deprecated since 2.0.0, replaced by {@link #getClientInfo(Class, Configuration)}
    */
-  public static Instance getInstance(Class<?> implementingClass, Configuration conf) {
+  @Deprecated
+  public static org.apache.accumulo.core.client.Instance getInstance(Class<?> implementingClass,
+      Configuration conf) {
     return getConnector(implementingClass, conf).getInstance();
   }
 
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
index a6913ef..7361c00 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
@@ -43,7 +43,6 @@ import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.ClientSideIteratorScanner;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.IsolatedScanner;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.RowIterator;
@@ -729,22 +728,6 @@ public class InputConfigurator extends ConfiguratorBase {
   }
 
   /**
-   * Validates and extracts an {@link Instance} from the configuration
-   *
-   * @param implementingClass
-   *          the class whose name will be used as a prefix for the property configuration key
-   * @param conf
-   *          the Hadoop configuration object to configure
-   * @since 1.7.0
-   */
-  public static Instance validateInstance(Class<?> implementingClass, Configuration conf)
-      throws IOException {
-    if (!isConnectorInfoSet(implementingClass, conf))
-      throw new IOException("Input info has not been set.");
-    return getInstance(implementingClass, conf);
-  }
-
-  /**
    * Validates that the user has permissions on the requested tables
    *
    * @param implementingClass
diff --git a/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java b/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
index a867f6d..2511b99 100644
--- a/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
+++ b/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
@@ -29,7 +29,6 @@ import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.conf.ClientProperty;
 import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
@@ -160,20 +159,8 @@ public class ClientOpts extends Help {
 
   private ClientInfo cachedInfo = null;
   private Connector cachedConnector = null;
-  protected Instance cachedInstance = null;
   private Properties cachedProps = null;
 
-  synchronized public Instance getInstance() {
-    if (cachedInstance == null) {
-      try {
-        cachedInstance = getConnector().getInstance();
-      } catch (AccumuloSecurityException | AccumuloException e) {
-        throw new IllegalStateException(e);
-      }
-    }
-    return cachedInstance;
-  }
-
   public String getPrincipal() {
     return getClientInfo().getPrincipal();
   }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/Connector.java b/core/src/main/java/org/apache/accumulo/core/client/Connector.java
index c85da31..78b9e8d 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/Connector.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/Connector.java
@@ -285,7 +285,9 @@ public abstract class Connector {
    * Accessor method for internal instance object.
    *
    * @return the internal instance object
+   * @deprecated since 2.0.0, use {@link #info()} instead
    */
+  @Deprecated
   public abstract Instance getInstance();
 
   /**
diff --git a/core/src/main/java/org/apache/accumulo/core/client/Instance.java b/core/src/main/java/org/apache/accumulo/core/client/Instance.java
index 8581546..f05243c 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/Instance.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/Instance.java
@@ -24,7 +24,10 @@ import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 /**
  * This class represents the information a client needs to know to connect to an instance of
  * accumulo.
+ *
+ * @deprecated since 2.0.0, use {@link Connector#builder()} and {@link ClientInfo} instead
  */
+@Deprecated
 public interface Instance {
   /**
    * Returns the location of the tablet server that is serving the root tablet.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/MutationsRejectedException.java b/core/src/main/java/org/apache/accumulo/core/client/MutationsRejectedException.java
index 178e7cb..0d8259d 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/MutationsRejectedException.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/MutationsRejectedException.java
@@ -24,6 +24,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.security.SecurityErrorCode;
@@ -53,12 +54,44 @@ public class MutationsRejectedException extends AccumuloException {
    *          number of unknown errors
    *
    * @since 1.7.0
+   * @deprecated since 2.0.0, replaced by
+   *             {@link #MutationsRejectedException(ClientInfo, List, Map, Collection, int, Throwable)}
    */
+  @Deprecated
   public MutationsRejectedException(Instance instance, List<ConstraintViolationSummary> cvsList,
       Map<TabletId,Set<SecurityErrorCode>> hashMap, Collection<String> serverSideErrors,
       int unknownErrors, Throwable cause) {
+    super(
+        "# constraint violations : " + cvsList.size() + "  security codes: " + hashMap.toString()
+            + "  # server errors " + serverSideErrors.size() + " # exceptions " + unknownErrors,
+        cause);
+    this.cvsl = cvsList;
+    this.af = hashMap;
+    this.es = serverSideErrors;
+    this.unknownErrors = unknownErrors;
+  }
+
+  /**
+   * Creates Mutations rejected exception
+   *
+   * @param info
+   *          Client info
+   * @param cvsList
+   *          list of constraint violations
+   * @param hashMap
+   *          authorization failures
+   * @param serverSideErrors
+   *          server side errors
+   * @param unknownErrors
+   *          number of unknown errors
+   *
+   * @since 2.0.0
+   */
+  public MutationsRejectedException(ClientInfo info, List<ConstraintViolationSummary> cvsList,
+      Map<TabletId,Set<SecurityErrorCode>> hashMap, Collection<String> serverSideErrors,
+      int unknownErrors, Throwable cause) {
     super("# constraint violations : " + cvsList.size() + "  security codes: "
-        + format(hashMap, instance) + "  # server errors " + serverSideErrors.size()
+        + format(hashMap, new ClientContext(info)) + "  # server errors " + serverSideErrors.size()
         + " # exceptions " + unknownErrors, cause);
     this.cvsl = cvsList;
     this.af = hashMap;
@@ -66,12 +99,13 @@ public class MutationsRejectedException extends AccumuloException {
     this.unknownErrors = unknownErrors;
   }
 
-  private static String format(Map<TabletId,Set<SecurityErrorCode>> hashMap, Instance instance) {
+  private static String format(Map<TabletId,Set<SecurityErrorCode>> hashMap,
+      ClientContext context) {
     Map<String,Set<SecurityErrorCode>> result = new HashMap<>();
 
     for (Entry<TabletId,Set<SecurityErrorCode>> entry : hashMap.entrySet()) {
       TabletId tabletId = entry.getKey();
-      String tableInfo = Tables.getPrintableTableInfoFromId(instance,
+      String tableInfo = Tables.getPrintableTableInfoFromId(context,
           Table.ID.of(tabletId.getTableId().toString()));
 
       if (!result.containsKey(tableInfo)) {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/TableOfflineException.java b/core/src/main/java/org/apache/accumulo/core/client/TableOfflineException.java
index 33f6576..951274e 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/TableOfflineException.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/TableOfflineException.java
@@ -16,9 +16,6 @@
  */
 package org.apache.accumulo.core.client;
 
-import org.apache.accumulo.core.client.impl.Table;
-import org.apache.accumulo.core.client.impl.Tables;
-
 public class TableOfflineException extends RuntimeException {
 
   private static final long serialVersionUID = 1L;
@@ -28,7 +25,7 @@ public class TableOfflineException extends RuntimeException {
    */
   @Deprecated
   public TableOfflineException(Instance instance, String tableId) {
-    super(Tables.getTableOfflineMsg(instance, Table.ID.of(tableId)));
+    super("Table with ID (" + tableId + ") is offline");
   }
 
   public TableOfflineException(String msg) {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java b/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
index 60e9326..cc2d0fe 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
@@ -56,7 +56,10 @@ import org.slf4j.LoggerFactory;
  *
  * If you do not know the instance names then run accumulo
  * org.apache.accumulo.server.util.ListInstances on an accumulo server.
+ *
+ * @deprecated since 2.0.0, Use {@link Connector#builder()} instead
  */
+@Deprecated
 public class ZooKeeperInstance implements Instance {
 
   private static final Logger log = LoggerFactory.getLogger(ZooKeeperInstance.class);
@@ -145,7 +148,7 @@ public class ZooKeeperInstance implements Instance {
 
   @Override
   public List<String> getMasterLocations() {
-    String masterLocPath = ZooUtil.getRoot(this) + Constants.ZMASTER_LOCK;
+    String masterLocPath = ZooUtil.getRoot(getInstanceID()) + Constants.ZMASTER_LOCK;
 
     OpTimer timer = null;
 
@@ -172,7 +175,7 @@ public class ZooKeeperInstance implements Instance {
 
   @Override
   public String getRootTabletLocation() {
-    String zRootLocPath = ZooUtil.getRoot(this) + RootTable.ZROOT_TABLET_LOCATION;
+    String zRootLocPath = ZooUtil.getRoot(getInstanceID()) + RootTable.ZROOT_TABLET_LOCATION;
 
     OpTimer timer = null;
 
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ClientConfConverter.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ClientConfConverter.java
index abef062..1b4c530 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ClientConfConverter.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ClientConfConverter.java
@@ -24,7 +24,6 @@ import java.util.Map;
 import java.util.Properties;
 import java.util.function.Predicate;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.ClientProperty;
 import org.apache.accumulo.core.conf.CredentialProviderFactoryShim;
@@ -135,16 +134,6 @@ public class ClientConfConverter {
     return props;
   }
 
-  public static Properties toProperties(AccumuloConfiguration config, Instance instance,
-      Credentials credentials) {
-    Properties properties = toProperties(toClientConf(config));
-    properties.setProperty(ClientProperty.INSTANCE_NAME.getKey(), instance.getInstanceName());
-    properties.setProperty(ClientProperty.INSTANCE_ZOOKEEPERS.getKey(), instance.getZooKeepers());
-    properties.setProperty(ClientProperty.AUTH_PRINCIPAL.getKey(), credentials.getPrincipal());
-    ClientProperty.setAuthenticationToken(properties, credentials.getToken());
-    return properties;
-  }
-
   public static Properties toProperties(AccumuloConfiguration config) {
     return toProperties(toClientConf(config));
   }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ClientContext.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ClientContext.java
index 99d5b72..58b6067 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ClientContext.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ClientContext.java
@@ -17,24 +17,33 @@
 package org.apache.accumulo.core.client.impl;
 
 import static com.google.common.base.Preconditions.checkArgument;
+import static java.nio.charset.StandardCharsets.UTF_8;
 
+import java.util.Collections;
 import java.util.List;
+import java.util.Properties;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Supplier;
 
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.ClientProperty;
 import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.rpc.SaslConnectionParams;
 import org.apache.accumulo.core.rpc.SslConnectionParams;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.OpTimer;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.ZooCache;
+import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Suppliers;
 
@@ -49,8 +58,12 @@ import com.google.common.base.Suppliers;
  */
 public class ClientContext {
 
+  private static final Logger log = LoggerFactory.getLogger(ClientContext.class);
+
   private ClientInfo info;
-  private Instance inst;
+  private String instanceId = null;
+  private final ZooCache zooCache;
+
   private Credentials creds;
   private BatchWriterConfig batchWriterConfig;
   private AccumuloConfiguration serverConf;
@@ -69,57 +82,69 @@ public class ClientContext {
   }
 
   public ClientContext(ClientInfo info) {
-    this(info, ClientInfoFactory.getInstance(info), ClientInfoFactory.getCredentials(info),
-        ClientConfConverter.toAccumuloConf(info.getProperties()));
-  }
-
-  /**
-   * Instantiate a client context from an existing {@link AccumuloConfiguration}. This is primarily
-   * intended for subclasses and testing.
-   */
-  public ClientContext(Instance instance, Credentials credentials,
-      AccumuloConfiguration serverConf) {
-    this(null, instance, credentials, serverConf);
+    this(info, ClientConfConverter.toAccumuloConf(info.getProperties()));
   }
 
-  public ClientContext(ClientInfo info, Instance instance, Credentials credentials,
-      AccumuloConfiguration serverConf) {
+  public ClientContext(ClientInfo info, AccumuloConfiguration serverConf) {
     this.info = info;
-    inst = instance;
-    creds = credentials;
+    zooCache = new ZooCacheFactory().getZooCache(info.getZooKeepers(),
+        info.getZooKeepersSessionTimeOut());
     this.serverConf = serverConf;
-    saslSupplier = () -> {
-      // Use the clientProps if we have it
-      if (info != null) {
-        if (!ClientProperty.SASL_ENABLED.getBoolean(info.getProperties())) {
-          return null;
-        }
-        return new SaslConnectionParams(info.getProperties(), getCredentials().getToken());
-      }
-      AccumuloConfiguration conf = getConfiguration();
-      if (!conf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
-        return null;
-      }
-      return new SaslConnectionParams(conf, getCredentials().getToken());
-    };
-
     timeoutSupplier = memoizeWithExpiration(
         () -> getConfiguration().getTimeInMillis(Property.GENERAL_RPC_TIMEOUT));
     sslSupplier = memoizeWithExpiration(() -> SslConnectionParams.forClient(getConfiguration()));
-    saslSupplier = memoizeWithExpiration(saslSupplier);
+    saslSupplier = memoizeWithExpiration(
+        () -> SaslConnectionParams.from(getConfiguration(), getCredentials().getToken()));
   }
 
   /**
    * Retrieve the instance used to construct this context
+   *
+   * @deprecated since 2.0.0
    */
-  public Instance getInstance() {
-    return inst;
+  @Deprecated
+  public org.apache.accumulo.core.client.Instance getDeprecatedInstance() {
+    final ClientContext context = this;
+    return new org.apache.accumulo.core.client.Instance() {
+      @Override
+      public String getRootTabletLocation() {
+        return context.getRootTabletLocation();
+      }
+
+      @Override
+      public List<String> getMasterLocations() {
+        return context.getMasterLocations();
+      }
+
+      @Override
+      public String getInstanceID() {
+        return context.getInstanceID();
+      }
+
+      @Override
+      public String getInstanceName() {
+        return context.getInstanceName();
+      }
+
+      @Override
+      public String getZooKeepers() {
+        return context.getZooKeepers();
+      }
+
+      @Override
+      public int getZooKeepersSessionTimeOut() {
+        return context.getZooKeepersSessionTimeOut();
+      }
+
+      @Override
+      public Connector getConnector(String principal, AuthenticationToken token)
+          throws AccumuloException, AccumuloSecurityException {
+        return context.getConnector().changeUser(principal, token);
+      }
+    };
   }
 
   public ClientInfo getClientInfo() {
-    if (info == null) {
-      info = new ClientInfoImpl(ClientConfConverter.toProperties(serverConf, inst, creds));
-    }
     return info;
   }
 
@@ -127,9 +152,24 @@ public class ClientContext {
    * Retrieve the credentials used to construct this context
    */
   public synchronized Credentials getCredentials() {
+    if (creds == null) {
+      creds = new Credentials(info.getPrincipal(), info.getAuthenticationToken());
+    }
     return creds;
   }
 
+  public String getPrincipal() {
+    return getCredentials().getPrincipal();
+  }
+
+  public AuthenticationToken getAuthenticationToken() {
+    return getCredentials().getToken();
+  }
+
+  public Properties getProperties() {
+    return info.getProperties();
+  }
+
   /**
    * Update the credentials in the current context after changing the current user's password or
    * other auth token
@@ -171,16 +211,9 @@ public class ClientContext {
   /**
    * Retrieve a connector
    */
-  public Connector getConnector() throws AccumuloException, AccumuloSecurityException {
-    // avoid making more connectors than necessary
+  public synchronized Connector getConnector() throws AccumuloException, AccumuloSecurityException {
     if (conn == null) {
-      if (getInstance() instanceof ZooKeeperInstance) {
-        // reuse existing context
-        conn = new ConnectorImpl(this);
-      } else {
-        Credentials c = getCredentials();
-        conn = getInstance().getConnector(c.getPrincipal(), c.getToken());
-      }
+      conn = new ConnectorImpl(this);
     }
     return conn;
   }
@@ -201,7 +234,7 @@ public class ClientContext {
     }
 
     if (rpcCreds == null) {
-      rpcCreds = getCredentials().toThrift(getInstance());
+      rpcCreds = getCredentials().toThrift(getInstanceID());
     }
 
     return rpcCreds;
@@ -213,7 +246,30 @@ public class ClientContext {
    * @return location in "hostname:port" form
    */
   public String getRootTabletLocation() {
-    return inst.getRootTabletLocation();
+    String zRootLocPath = getZooKeeperRoot() + RootTable.ZROOT_TABLET_LOCATION;
+
+    OpTimer timer = null;
+
+    if (log.isTraceEnabled()) {
+      log.trace("tid={} Looking up root tablet location in zookeeper.",
+          Thread.currentThread().getId());
+      timer = new OpTimer().start();
+    }
+
+    byte[] loc = zooCache.get(zRootLocPath);
+
+    if (timer != null) {
+      timer.stop();
+      log.trace("tid={} Found root tablet at {} in {}", Thread.currentThread().getId(),
+          (loc == null ? "null" : new String(loc, UTF_8)),
+          String.format("%.3f secs", timer.scale(TimeUnit.SECONDS)));
+    }
+
+    if (loc == null) {
+      return null;
+    }
+
+    return new String(loc, UTF_8).split("\\|")[0];
   }
 
   /**
@@ -222,7 +278,29 @@ public class ClientContext {
    * @return a list of locations in "hostname:port" form
    */
   public List<String> getMasterLocations() {
-    return inst.getMasterLocations();
+    String masterLocPath = getZooKeeperRoot() + Constants.ZMASTER_LOCK;
+
+    OpTimer timer = null;
+
+    if (log.isTraceEnabled()) {
+      log.trace("tid={} Looking up master location in zookeeper.", Thread.currentThread().getId());
+      timer = new OpTimer().start();
+    }
+
+    byte[] loc = ZooUtil.getLockData(zooCache, masterLocPath);
+
+    if (timer != null) {
+      timer.stop();
+      log.trace("tid={} Found master at {} in {}", Thread.currentThread().getId(),
+          (loc == null ? "null" : new String(loc, UTF_8)),
+          String.format("%.3f secs", timer.scale(TimeUnit.SECONDS)));
+    }
+
+    if (loc == null) {
+      return Collections.emptyList();
+    }
+
+    return Collections.singletonList(new String(loc, UTF_8));
   }
 
   /**
@@ -231,7 +309,32 @@ public class ClientContext {
    * @return a UUID
    */
   public String getInstanceID() {
-    return inst.getInstanceID();
+    final String instanceName = info.getInstanceName();
+    if (instanceId == null) {
+      // want the instance id to be stable for the life of this instance object,
+      // so only get it once
+      String instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + instanceName;
+      byte[] iidb = zooCache.get(instanceNamePath);
+      if (iidb == null) {
+        throw new RuntimeException(
+            "Instance name " + instanceName + " does not exist in zookeeper. "
+                + "Run \"accumulo org.apache.accumulo.server.util.ListInstances\" to see a list.");
+      }
+      instanceId = new String(iidb, UTF_8);
+    }
+
+    if (zooCache.get(Constants.ZROOT + "/" + instanceId) == null) {
+      if (instanceName == null)
+        throw new RuntimeException("Instance id " + instanceId + " does not exist in zookeeper");
+      throw new RuntimeException("Instance id " + instanceId + " pointed to by the name "
+          + instanceName + " does not exist in zookeeper");
+    }
+
+    return instanceId;
+  }
+
+  public String getZooKeeperRoot() {
+    return ZooUtil.getRoot(getInstanceID());
   }
 
   /**
@@ -240,7 +343,7 @@ public class ClientContext {
    * @return current instance name
    */
   public String getInstanceName() {
-    return inst.getInstanceName();
+    return info.getInstanceName();
   }
 
   /**
@@ -249,7 +352,7 @@ public class ClientContext {
    * @return the zookeeper servers this instance is using in "hostname:port" form
    */
   public String getZooKeepers() {
-    return inst.getZooKeepers();
+    return info.getZooKeepers();
   }
 
   /**
@@ -258,6 +361,6 @@ public class ClientContext {
    * @return the configured timeout to connect to zookeeper
    */
   public int getZooKeepersSessionTimeOut() {
-    return inst.getZooKeepersSessionTimeOut();
+    return info.getZooKeepersSessionTimeOut();
   }
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ClientInfoFactory.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ClientInfoFactory.java
index c12e416..cfcb192 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ClientInfoFactory.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ClientInfoFactory.java
@@ -24,8 +24,6 @@ import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Durability;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.conf.ClientProperty;
 
 /**
@@ -46,12 +44,6 @@ public class ClientInfoFactory {
     return new ConnectorImpl(new ClientContext(info));
   }
 
-  public static Instance getInstance(ClientInfo info) {
-    @SuppressWarnings("deprecation")
-    Instance i = new ZooKeeperInstance(ClientConfConverter.toClientConf(info.getProperties()));
-    return i;
-  }
-
   public static Credentials getCredentials(ClientInfo info) {
     return new Credentials(info.getPrincipal(), info.getAuthenticationToken());
   }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
index 928d2b0..6378ec7 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConditionalWriterImpl.java
@@ -631,12 +631,12 @@ class ConditionalWriterImpl implements ConditionalWriter {
           Tables.getPrintableTableInfoFromId(context, tableId), tse);
       queueException(location, cmidToCm, ase);
     } catch (TTransportException e) {
-      locator.invalidateCache(context.getInstance(), location.toString());
+      locator.invalidateCache(context, location.toString());
       invalidateSession(location, mutations, cmidToCm, sessionId);
     } catch (TApplicationException tae) {
       queueException(location, cmidToCm, new AccumuloServerException(location.toString(), tae));
     } catch (TException e) {
-      locator.invalidateCache(context.getInstance(), location.toString());
+      locator.invalidateCache(context, location.toString());
       invalidateSession(location, mutations, cmidToCm, sessionId);
     } catch (Exception e) {
       queueException(location, cmidToCm, e);
@@ -700,7 +700,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
         // ACCUMULO-1152 added a tserver lock check to the tablet location cache, so this
         // invalidation prevents future attempts to contact the
         // tserver even its gone zombie and is still running w/o a lock
-        locator.invalidateCache(context.getInstance(), location.toString());
+        locator.invalidateCache(context, location.toString());
         return;
       }
 
@@ -713,7 +713,7 @@ class ConditionalWriterImpl implements ConditionalWriter {
       } catch (TApplicationException tae) {
         throw new AccumuloServerException(location.toString(), tae);
       } catch (TException e) {
-        locator.invalidateCache(context.getInstance(), location.toString());
+        locator.invalidateCache(context, location.toString());
       }
 
       if ((System.currentTimeMillis() - startTime) + sleepTime > timeout)
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
index eac7532..d6ebfcb 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
@@ -35,7 +35,6 @@ import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.ConditionalWriter;
 import org.apache.accumulo.core.client.ConditionalWriterConfig;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.MultiTableBatchWriter;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
@@ -56,6 +55,7 @@ public class ConnectorImpl extends Connector {
   private static final String SYSTEM_TOKEN_NAME = "org.apache.accumulo.server.security."
       + "SystemCredentials$SystemToken";
   private final ClientContext context;
+  private final String instanceID;
   private SecurityOperations secops = null;
   private TableOperationsImpl tableops = null;
   private NamespaceOperations namespaceops = null;
@@ -72,6 +72,7 @@ public class ConnectorImpl extends Connector {
           SecurityErrorCode.TOKEN_EXPIRED);
 
     this.context = context;
+    instanceID = context.getInstanceID();
 
     // Skip fail fast for system services; string literal for class name, to avoid dependency on
     // server jar
@@ -96,8 +97,9 @@ public class ConnectorImpl extends Connector {
   }
 
   @Override
-  public Instance getInstance() {
-    return context.getInstance();
+  @Deprecated
+  public org.apache.accumulo.core.client.Instance getInstance() {
+    return context.getDeprecatedInstance();
   }
 
   @Override
@@ -217,7 +219,7 @@ public class ConnectorImpl extends Connector {
 
   @Override
   public String getInstanceID() {
-    return getInstance().getInstanceID();
+    return instanceID;
   }
 
   @Override
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/Credentials.java b/core/src/main/java/org/apache/accumulo/core/client/impl/Credentials.java
index b0772ca..4e46cbb 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/Credentials.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/Credentials.java
@@ -23,7 +23,6 @@ import java.util.Base64;
 
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
@@ -84,16 +83,15 @@ public class Credentials {
    * a non-destroyable version of the {@link AuthenticationToken}, so this should be used just
    * before placing on the wire, and references to it should be tightly controlled.
    *
-   * @param instance
-   *          client instance
+   * @param instanceID
+   *          Accumulo instance ID
    * @return Thrift credentials
    * @throws RuntimeException
    *           if the authentication token has been destroyed (expired)
    */
-  public TCredentials toThrift(Instance instance) {
+  public TCredentials toThrift(String instanceID) {
     TCredentials tCreds = new TCredentials(getPrincipal(), getToken().getClass().getName(),
-        ByteBuffer.wrap(AuthenticationTokenSerializer.serialize(getToken())),
-        instance.getInstanceID());
+        ByteBuffer.wrap(AuthenticationTokenSerializer.serialize(getToken())), instanceID);
     if (getToken().isDestroyed())
       throw new RuntimeException("Token has been destroyed",
           new AccumuloSecurityException(getPrincipal(), SecurityErrorCode.TOKEN_EXPIRED));
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/DelegationTokenImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/DelegationTokenImpl.java
index f9d1111..59a4829 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/DelegationTokenImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/DelegationTokenImpl.java
@@ -24,7 +24,6 @@ import java.io.IOException;
 import java.util.Collections;
 import java.util.Set;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.security.tokens.DelegationToken;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.hadoop.io.Text;
@@ -54,15 +53,15 @@ public class DelegationTokenImpl extends PasswordToken implements DelegationToke
     this.identifier = identifier;
   }
 
-  public DelegationTokenImpl(Instance instance, UserGroupInformation user,
+  public DelegationTokenImpl(String instanceID, UserGroupInformation user,
       AuthenticationTokenIdentifier identifier) {
-    requireNonNull(instance);
+    requireNonNull(instanceID);
     requireNonNull(user);
     requireNonNull(identifier);
 
     Credentials creds = user.getCredentials();
     Token<? extends TokenIdentifier> token = creds
-        .getToken(new Text(SERVICE_NAME + "-" + instance.getInstanceID()));
+        .getToken(new Text(SERVICE_NAME + "-" + instanceID));
     if (null == token) {
       throw new IllegalArgumentException(
           "Did not find Accumulo delegation token in provided UserGroupInformation");
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/NamespaceOperationsImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/NamespaceOperationsImpl.java
index c397a0f..916c6c7 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/NamespaceOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/NamespaceOperationsImpl.java
@@ -76,8 +76,7 @@ public class NamespaceOperationsImpl extends NamespaceOperationsHelper {
       timer = new OpTimer().start();
     }
 
-    TreeSet<String> namespaces = new TreeSet<>(
-        Namespaces.getNameToIdMap(context.getInstance()).keySet());
+    TreeSet<String> namespaces = new TreeSet<>(Namespaces.getNameToIdMap(context).keySet());
 
     if (timer != null) {
       timer.stop();
@@ -212,7 +211,7 @@ public class NamespaceOperationsImpl extends NamespaceOperationsHelper {
 
   @Override
   public Map<String,String> namespaceIdMap() {
-    return Namespaces.getNameToIdMap(context.getInstance()).entrySet().stream()
+    return Namespaces.getNameToIdMap(context).entrySet().stream()
         .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().canonicalID(), (v1, v2) -> {
           throw new RuntimeException(String.format("Duplicate key for values %s and %s", v1, v2));
         }, TreeMap::new));
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/Namespaces.java b/core/src/main/java/org/apache/accumulo/core/client/impl/Namespaces.java
index d2a6f21..3e50ed9 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/Namespaces.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/Namespaces.java
@@ -27,7 +27,6 @@ import java.util.TreeMap;
 import java.util.function.BiConsumer;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.NamespaceNotFoundException;
 import org.apache.accumulo.core.util.Validator;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
@@ -79,15 +78,15 @@ public class Namespaces {
     }
   };
 
-  private static ZooCache getZooCache(Instance instance) {
-    return new ZooCacheFactory().getZooCache(instance.getZooKeepers(),
-        instance.getZooKeepersSessionTimeOut());
+  private static ZooCache getZooCache(ClientContext context) {
+    return new ZooCacheFactory().getZooCache(context.getZooKeepers(),
+        context.getZooKeepersSessionTimeOut());
   }
 
   public static boolean exists(ClientContext context, Namespace.ID namespaceId) {
-    ZooCache zc = getZooCache(context.getInstance());
+    ZooCache zc = getZooCache(context);
     List<String> namespaceIds = zc
-        .getChildren(ZooUtil.getRoot(context.getInstance()) + Constants.ZNAMESPACES);
+        .getChildren(ZooUtil.getRoot(context.getInstanceID()) + Constants.ZNAMESPACES);
     return namespaceIds.contains(namespaceId.canonicalID());
   }
 
@@ -103,7 +102,7 @@ public class Namespaces {
 
   public static List<String> getTableNames(ClientContext context, Namespace.ID namespaceId)
       throws NamespaceNotFoundException {
-    String namespace = getNamespaceName(context.getInstance(), namespaceId);
+    String namespace = getNamespaceName(context, namespaceId);
     List<String> names = new LinkedList<>();
     for (String name : Tables.getNameToIdMap(context).keySet())
       if (namespace.equals(Tables.qualify(name).getFirst()))
@@ -115,12 +114,13 @@ public class Namespaces {
    * Gets all the namespaces from ZK. The first arg (t) the BiConsumer accepts is the ID and the
    * second (u) is the namespaceName.
    */
-  private static void getAllNamespaces(Instance instance, BiConsumer<String,String> biConsumer) {
-    final ZooCache zc = getZooCache(instance);
-    List<String> namespaceIds = zc.getChildren(ZooUtil.getRoot(instance) + Constants.ZNAMESPACES);
+  private static void getAllNamespaces(ClientContext context,
+      BiConsumer<String,String> biConsumer) {
+    final ZooCache zc = getZooCache(context);
+    List<String> namespaceIds = zc.getChildren(context.getZooKeeperRoot() + Constants.ZNAMESPACES);
     for (String id : namespaceIds) {
-      byte[] path = zc.get(
-          ZooUtil.getRoot(instance) + Constants.ZNAMESPACES + "/" + id + Constants.ZNAMESPACE_NAME);
+      byte[] path = zc.get(context.getZooKeeperRoot() + Constants.ZNAMESPACES + "/" + id
+          + Constants.ZNAMESPACE_NAME);
       if (path != null) {
         biConsumer.accept(id, new String(path, UTF_8));
       }
@@ -132,16 +132,16 @@ public class Namespaces {
    */
   public static SortedMap<Namespace.ID,String> getIdToNameMap(ClientContext context) {
     SortedMap<Namespace.ID,String> idMap = new TreeMap<>();
-    getAllNamespaces(context.getInstance(), (id, name) -> idMap.put(Namespace.ID.of(id), name));
+    getAllNamespaces(context, (id, name) -> idMap.put(Namespace.ID.of(id), name));
     return idMap;
   }
 
   /**
    * Return sorted map with key = namespaceName, value = ID
    */
-  public static SortedMap<String,Namespace.ID> getNameToIdMap(Instance instance) {
+  public static SortedMap<String,Namespace.ID> getNameToIdMap(ClientContext context) {
     SortedMap<String,Namespace.ID> nameMap = new TreeMap<>();
-    getAllNamespaces(instance, (id, name) -> nameMap.put(name, Namespace.ID.of(id)));
+    getAllNamespaces(context, (id, name) -> nameMap.put(name, Namespace.ID.of(id)));
     return nameMap;
   }
 
@@ -151,7 +151,7 @@ public class Namespaces {
   public static Namespace.ID getNamespaceId(ClientContext context, String namespaceName)
       throws NamespaceNotFoundException {
     final ArrayList<Namespace.ID> singleId = new ArrayList<>(1);
-    getAllNamespaces(context.getInstance(), (id, name) -> {
+    getAllNamespaces(context, (id, name) -> {
       if (name.equals(namespaceName))
         singleId.add(Namespace.ID.of(id));
     });
@@ -187,14 +187,9 @@ public class Namespaces {
    */
   public static String getNamespaceName(ClientContext context, Namespace.ID namespaceId)
       throws NamespaceNotFoundException {
-    return getNamespaceName(context.getInstance(), namespaceId);
-  }
-
-  public static String getNamespaceName(Instance instance, Namespace.ID namespaceId)
-      throws NamespaceNotFoundException {
     String name;
-    ZooCache zc = getZooCache(instance);
-    byte[] path = zc.get(ZooUtil.getRoot(instance) + Constants.ZNAMESPACES + "/"
+    ZooCache zc = getZooCache(context);
+    byte[] path = zc.get(context.getZooKeeperRoot() + Constants.ZNAMESPACES + "/"
         + namespaceId.canonicalID() + Constants.ZNAMESPACE_NAME);
     if (path != null)
       name = new String(path, UTF_8);
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ReplicationClient.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ReplicationClient.java
index d33296a..3226b28 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ReplicationClient.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ReplicationClient.java
@@ -24,13 +24,11 @@ import java.util.List;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 import org.apache.accumulo.core.replication.thrift.ReplicationCoordinator;
 import org.apache.accumulo.core.replication.thrift.ReplicationServicer;
 import org.apache.accumulo.core.rpc.ThriftUtil;
 import org.apache.accumulo.core.util.HostAndPort;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
 import org.apache.thrift.TServiceClient;
 import org.apache.thrift.transport.TTransportException;
@@ -69,7 +67,6 @@ public class ReplicationClient {
   }
 
   public static ReplicationCoordinator.Client getCoordinatorConnection(ClientContext context) {
-    Instance instance = context.getInstance();
     List<String> locations = context.getMasterLocations();
 
     if (locations.size() == 0) {
@@ -85,11 +82,11 @@ public class ReplicationClient {
       return null;
     }
 
-    String zkPath = ZooUtil.getRoot(instance) + Constants.ZMASTER_REPLICATION_COORDINATOR_ADDR;
+    String zkPath = context.getZooKeeperRoot() + Constants.ZMASTER_REPLICATION_COORDINATOR_ADDR;
     String replCoordinatorAddr;
 
     log.debug("Using ZooKeeper quorum at {} with path {} to find peer Master information",
-        instance.getZooKeepers(), zkPath);
+        context.getZooKeepers(), zkPath);
 
     // Get the coordinator port for the master we're trying to connect to
     try {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/RootTabletLocator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/RootTabletLocator.java
index 132261c..78ec6d8 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/RootTabletLocator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/RootTabletLocator.java
@@ -27,7 +27,6 @@ import java.util.concurrent.TimeUnit;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.TabletLocatorImpl.TabletServerLockChecker;
 import org.apache.accumulo.core.data.Mutation;
@@ -95,10 +94,10 @@ public class RootTabletLocator extends TabletLocator {
   public void invalidateCache(Collection<KeyExtent> keySet) {}
 
   @Override
-  public void invalidateCache(Instance instance, String server) {
-    ZooCache zooCache = zcf.getZooCache(instance.getZooKeepers(),
-        instance.getZooKeepersSessionTimeOut());
-    String root = ZooUtil.getRoot(instance) + Constants.ZTSERVERS;
+  public void invalidateCache(ClientContext context, String server) {
+    ZooCache zooCache = zcf.getZooCache(context.getZooKeepers(),
+        context.getZooKeepersSessionTimeOut());
+    String root = ZooUtil.getRoot(context.getInstanceID()) + Constants.ZTSERVERS;
     zooCache.clear(root + "/" + server);
   }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/SecurityOperationsImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/SecurityOperationsImpl.java
index 6c3a774..b2c1ed7 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/SecurityOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/SecurityOperationsImpl.java
@@ -148,7 +148,7 @@ public class SecurityOperationsImpl implements SecurityOperations {
     checkArgument(token != null, "token is null");
     final Credentials toAuth = new Credentials(principal, token);
     return execute(client -> client.authenticateUser(Tracer.traceInfo(), context.rpcCreds(),
-        toAuth.toThrift(context.getInstance())));
+        toAuth.toThrift(context.getInstanceID())));
   }
 
   @Override
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/SyncingTabletLocator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/SyncingTabletLocator.java
index 24e088a..18c61cd 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/SyncingTabletLocator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/SyncingTabletLocator.java
@@ -23,7 +23,6 @@ import java.util.concurrent.Callable;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
@@ -107,7 +106,7 @@ public class SyncingTabletLocator extends TabletLocator {
   }
 
   @Override
-  public void invalidateCache(Instance instance, String server) {
-    syncLocator().invalidateCache(instance, server);
+  public void invalidateCache(ClientContext context, String server) {
+    syncLocator().invalidateCache(context, server);
   }
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TableMap.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TableMap.java
index 027807f..f2098e5 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TableMap.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TableMap.java
@@ -24,9 +24,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.NamespaceNotFoundException;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -42,14 +40,14 @@ public class TableMap {
   private final Map<String,Table.ID> tableNameToIdMap;
   private final Map<Table.ID,String> tableIdToNameMap;
 
-  public TableMap(Instance instance, ZooCache zooCache) {
-    List<String> tableIds = zooCache.getChildren(ZooUtil.getRoot(instance) + Constants.ZTABLES);
+  public TableMap(ClientContext context, ZooCache zooCache) {
+    List<String> tableIds = zooCache.getChildren(context.getZooKeeperRoot() + Constants.ZTABLES);
     Map<Namespace.ID,String> namespaceIdToNameMap = new HashMap<>();
     ImmutableMap.Builder<String,Table.ID> tableNameToIdBuilder = new ImmutableMap.Builder<>();
     ImmutableMap.Builder<Table.ID,String> tableIdToNameBuilder = new ImmutableMap.Builder<>();
     // use StringBuilder to construct zPath string efficiently across many tables
     StringBuilder zPathBuilder = new StringBuilder();
-    zPathBuilder.append(ZooUtil.getRoot(instance)).append(Constants.ZTABLES).append("/");
+    zPathBuilder.append(context.getZooKeeperRoot()).append(Constants.ZTABLES).append("/");
     int prefixLength = zPathBuilder.length();
 
     for (String tableIdStr : tableIds) {
@@ -71,7 +69,7 @@ public class TableMap {
           try {
             namespaceName = namespaceIdToNameMap.get(namespaceId);
             if (namespaceName == null) {
-              namespaceName = Namespaces.getNamespaceName(instance, namespaceId);
+              namespaceName = Namespaces.getNamespaceName(context, namespaceId);
               namespaceIdToNameMap.put(namespaceId, namespaceName);
             }
           } catch (NamespaceNotFoundException e) {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java
index 1f62817..2ca0ad6 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java
@@ -568,7 +568,7 @@ public class TableOperationsImpl extends TableOperationsHelper {
         } catch (TApplicationException tae) {
           throw new AccumuloServerException(address.toString(), tae);
         } catch (TTransportException e) {
-          tabLocator.invalidateCache(context.getInstance(), tl.tablet_location);
+          tabLocator.invalidateCache(context, tl.tablet_location);
           continue;
         } catch (ThriftSecurityException e) {
           Tables.clearCache(context);
@@ -586,7 +586,7 @@ public class TableOperationsImpl extends TableOperationsHelper {
           tabLocator.invalidateCache(tl.tablet_extent);
           continue;
         } catch (TException e) {
-          tabLocator.invalidateCache(context.getInstance(), tl.tablet_location);
+          tabLocator.invalidateCache(context, tl.tablet_location);
           continue;
         }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/Tables.java b/core/src/main/java/org/apache/accumulo/core/client/impl/Tables.java
index c9a4e93..ddf77d8 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/Tables.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/Tables.java
@@ -26,7 +26,6 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.NamespaceNotFoundException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.master.state.tables.TableState;
@@ -67,21 +66,21 @@ public class Tables {
   }
 
   /**
-   * Return the cached ZooCache for provided instance. ZooCache is initially created with a watcher
+   * Return the cached ZooCache for provided context. ZooCache is initially created with a watcher
    * that will clear the TableMap cache for that instance when WatchedEvent occurs.
    */
-  private static ZooCache getZooCache(final Instance instance) {
+  private static ZooCache getZooCache(final ClientContext context) {
     SecurityManager sm = System.getSecurityManager();
     if (sm != null) {
       sm.checkPermission(TABLES_PERMISSION);
     }
 
-    final String uuid = instance.getInstanceID();
+    final String uuid = context.getInstanceID();
 
     try {
       return instanceToZooCache.get(uuid, () -> {
-        final String zks = instance.getZooKeepers();
-        final int timeOut = instance.getZooKeepersSessionTimeOut();
+        final String zks = context.getZooKeepers();
+        final int timeOut = context.getZooKeepersSessionTimeOut();
         return new ZooCacheFactory().getZooCache(zks, timeOut,
             watchedEvent -> instanceToMapCache.invalidate(uuid));
       });
@@ -99,11 +98,11 @@ public class Tables {
     if (tableId == null) {
       // maybe the table exist, but the cache was not updated yet... so try to clear the cache and
       // check again
-      clearCache(context.getInstance());
+      clearCache(context);
       tableId = getNameToIdMap(context).get(tableName);
       if (tableId == null) {
         String namespace = qualify(tableName).getFirst();
-        if (Namespaces.getNameToIdMap(context.getInstance()).containsKey(namespace))
+        if (Namespaces.getNameToIdMap(context).containsKey(namespace))
           throw new TableNotFoundException(null, tableName, null);
         else
           throw new NamespaceNotFoundException(null, namespace, null);
@@ -114,26 +113,17 @@ public class Tables {
 
   public static String getTableName(ClientContext context, Table.ID tableId)
       throws TableNotFoundException {
-    return getTableName(context.getInstance(), tableId);
-  }
-
-  public static String getTableName(Instance instance, Table.ID tableId)
-      throws TableNotFoundException {
-    String tableName = getIdToNameMap(instance).get(tableId);
+    String tableName = getIdToNameMap(context).get(tableId);
     if (tableName == null)
       throw new TableNotFoundException(tableId.canonicalID(), null, null);
     return tableName;
   }
 
   public static String getTableOfflineMsg(ClientContext context, Table.ID tableId) {
-    return getTableOfflineMsg(context.getInstance(), tableId);
-  }
-
-  public static String getTableOfflineMsg(Instance instance, Table.ID tableId) {
     if (tableId == null)
       return "Table <unknown table> is offline";
     try {
-      String tableName = Tables.getTableName(instance, tableId);
+      String tableName = Tables.getTableName(context, tableId);
       return "Table " + tableName + " (" + tableId.canonicalID() + ") is offline";
     } catch (TableNotFoundException e) {
       return "Table <unknown table> (" + tableId.canonicalID() + ") is offline";
@@ -141,27 +131,23 @@ public class Tables {
   }
 
   public static Map<String,Table.ID> getNameToIdMap(ClientContext context) {
-    return getTableMap(context.getInstance()).getNameToIdMap();
+    return getTableMap(context).getNameToIdMap();
   }
 
   public static Map<Table.ID,String> getIdToNameMap(ClientContext context) {
-    return getIdToNameMap(context.getInstance());
-  }
-
-  public static Map<Table.ID,String> getIdToNameMap(Instance instance) {
-    return getTableMap(instance).getIdtoNameMap();
+    return getTableMap(context).getIdtoNameMap();
   }
 
   /**
    * Get the TableMap from the cache. A new one will be populated when needed. Cache is cleared
-   * manually by calling {@link #clearCache(Instance)} or automatically cleared by ZooCache watcher
-   * created in {@link #getZooCache(Instance)}. See ACCUMULO-4778.
+   * manually by calling {@link #clearCache(ClientContext)} or automatically cleared by ZooCache
+   * watcher created in {@link #getZooCache(ClientContext)}. See ACCUMULO-4778.
    */
-  private static TableMap getTableMap(final Instance instance) {
+  private static TableMap getTableMap(final ClientContext context) {
     TableMap map;
     try {
-      map = instanceToMapCache.get(instance.getInstanceID(),
-          () -> new TableMap(instance, getZooCache(instance)));
+      map = instanceToMapCache.get(context.getInstanceID(),
+          () -> new TableMap(context, getZooCache(context)));
     } catch (ExecutionException e) {
       throw new RuntimeException(e);
     }
@@ -169,47 +155,35 @@ public class Tables {
   }
 
   public static boolean exists(ClientContext context, Table.ID tableId) {
-    return exists(context.getInstance(), tableId);
-  }
-
-  public static boolean exists(Instance instance, Table.ID tableId) {
-    ZooCache zc = getZooCache(instance);
-    List<String> tableIds = zc.getChildren(ZooUtil.getRoot(instance) + Constants.ZTABLES);
+    ZooCache zc = getZooCache(context);
+    List<String> tableIds = zc.getChildren(context.getZooKeeperRoot() + Constants.ZTABLES);
     return tableIds.contains(tableId.canonicalID());
   }
 
   public static void clearCache(ClientContext context) {
-    clearCache(context.getInstance());
-  }
-
-  public static void clearCache(Instance instance) {
-    getZooCache(instance).clear(ZooUtil.getRoot(instance) + Constants.ZTABLES);
-    getZooCache(instance).clear(ZooUtil.getRoot(instance) + Constants.ZNAMESPACES);
-    instanceToMapCache.invalidate(instance.getInstanceID());
+    getZooCache(context).clear(ZooUtil.getRoot(context.getInstanceID()) + Constants.ZTABLES);
+    getZooCache(context).clear(ZooUtil.getRoot(context.getInstanceID()) + Constants.ZNAMESPACES);
+    instanceToMapCache.invalidate(context.getInstanceID());
   }
 
   /**
    * Clears the zoo cache from instance/root/{PATH}
    *
-   * @param instance
-   *          The Accumulo Instance
+   * @param context
+   *          The Accumulo client context
    * @param zooPath
    *          A zookeeper path
    */
-  public static void clearCacheByPath(Instance instance, final String zooPath) {
+  public static void clearCacheByPath(ClientContext context, final String zooPath) {
     String thePath = zooPath.startsWith("/") ? zooPath : "/" + zooPath;
-    getZooCache(instance).clear(ZooUtil.getRoot(instance) + thePath);
-    instanceToMapCache.invalidate(instance.getInstanceID());
+    getZooCache(context).clear(context.getZooKeeperRoot() + thePath);
+    instanceToMapCache.invalidate(context.getInstanceID());
   }
 
   public static String getPrintableTableInfoFromId(ClientContext context, Table.ID tableId) {
-    return getPrintableTableInfoFromId(context.getInstance(), tableId);
-  }
-
-  public static String getPrintableTableInfoFromId(Instance instance, Table.ID tableId) {
     String tableName = null;
     try {
-      tableName = getTableName(instance, tableId);
+      tableName = getTableName(context, tableId);
     } catch (TableNotFoundException e) {
       // handled in the string formatting
     }
@@ -229,41 +203,32 @@ public class Tables {
   }
 
   public static TableState getTableState(ClientContext context, Table.ID tableId) {
-    return getTableState(context.getInstance(), tableId);
-  }
-
-  public static TableState getTableState(Instance instance, Table.ID tableId) {
-    return getTableState(instance, tableId, false);
-  }
-
-  public static TableState getTableState(ClientContext context, Table.ID tableId,
-      boolean clearCachedState) {
-    return getTableState(context.getInstance(), tableId, clearCachedState);
+    return getTableState(context, tableId, false);
   }
 
   /**
    * Get the current state of the table using the tableid. The boolean clearCache, if true will
    * clear the table state in zookeeper before fetching the state. Added with ACCUMULO-4574.
    *
-   * @param instance
-   *          the Accumulo instance.
+   * @param context
+   *          the Accumulo client context
    * @param tableId
    *          the table id
    * @param clearCachedState
    *          if true clear the table state in zookeeper before checking status
    * @return the table state.
    */
-  public static TableState getTableState(Instance instance, Table.ID tableId,
+  public static TableState getTableState(ClientContext context, Table.ID tableId,
       boolean clearCachedState) {
 
-    String statePath = ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId.canonicalID()
+    String statePath = context.getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId.canonicalID()
         + Constants.ZTABLE_STATE;
 
     if (clearCachedState) {
-      Tables.clearCacheByPath(instance, statePath);
+      Tables.clearCacheByPath(context, statePath);
     }
 
-    ZooCache zc = getZooCache(instance);
+    ZooCache zc = getZooCache(context);
     byte[] state = zc.get(statePath);
     if (state == null)
       return TableState.UNKNOWN;
@@ -300,30 +265,25 @@ public class Tables {
     return new Pair<>(defaultNamespace, tableName);
   }
 
-  public static Namespace.ID getNamespaceId(ClientContext context, Table.ID tableId)
-      throws TableNotFoundException {
-    return getNamespaceId(context.getInstance(), tableId);
-  }
-
   /**
    * Returns the namespace id for a given table ID.
    *
-   * @param instance
-   *          The Accumulo Instance
+   * @param context
+   *          The Accumulo client context
    * @param tableId
    *          The tableId
    * @return The namespace id which this table resides in.
    * @throws IllegalArgumentException
    *           if the table doesn't exist in ZooKeeper
    */
-  public static Namespace.ID getNamespaceId(Instance instance, Table.ID tableId)
+  public static Namespace.ID getNamespaceId(ClientContext context, Table.ID tableId)
       throws TableNotFoundException {
-    checkArgument(instance != null, "instance is null");
+    checkArgument(context != null, "instance is null");
     checkArgument(tableId != null, "tableId is null");
 
-    ZooCache zc = getZooCache(instance);
-    byte[] n = zc.get(
-        ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_NAMESPACE);
+    ZooCache zc = getZooCache(context);
+    byte[] n = zc.get(context.getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId
+        + Constants.ZTABLE_NAMESPACE);
 
     // We might get null out of ZooCache if this tableID doesn't exist
     if (null == n) {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocator.java
index 80c7609..7c075f3 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocator.java
@@ -28,7 +28,6 @@ import java.util.WeakHashMap;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
@@ -73,7 +72,7 @@ public abstract class TabletLocator {
   /**
    * Invalidate all metadata entries that point to server
    */
-  public abstract void invalidateCache(Instance instance, String server);
+  public abstract void invalidateCache(ClientContext context, String server);
 
   private static class LocatorKey {
     String instanceId;
@@ -112,20 +111,19 @@ public abstract class TabletLocator {
   }
 
   public static synchronized TabletLocator getLocator(ClientContext context, Table.ID tableId) {
-    Instance instance = context.getInstance();
     LocatorKey key = new LocatorKey(context.getInstanceID(), tableId);
     TabletLocator tl = locators.get(key);
     if (tl == null) {
       MetadataLocationObtainer mlo = new MetadataLocationObtainer();
 
       if (RootTable.ID.equals(tableId)) {
-        tl = new RootTabletLocator(new ZookeeperLockChecker(instance));
+        tl = new RootTabletLocator(new ZookeeperLockChecker(context));
       } else if (MetadataTable.ID.equals(tableId)) {
         tl = new TabletLocatorImpl(MetadataTable.ID, getLocator(context, RootTable.ID), mlo,
-            new ZookeeperLockChecker(instance));
+            new ZookeeperLockChecker(context));
       } else {
         tl = new TabletLocatorImpl(tableId, getLocator(context, MetadataTable.ID), mlo,
-            new ZookeeperLockChecker(instance));
+            new ZookeeperLockChecker(context));
       }
       locators.put(key, tl);
     }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocatorImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocatorImpl.java
index 746ae5d..62b025b 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocatorImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocatorImpl.java
@@ -37,7 +37,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
@@ -400,7 +399,7 @@ public class TabletLocatorImpl extends TabletLocator {
   }
 
   @Override
-  public void invalidateCache(Instance instance, String server) {
+  public void invalidateCache(ClientContext context, String server) {
     int invalidatedCount = 0;
 
     wLock.lock();
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
index dd10167..7438fd1 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
@@ -376,7 +376,7 @@ public class TabletServerBatchReaderIterator implements Iterator<Entry<Key,Value
             failures.putAll(unscanned);
           }
 
-          locator.invalidateCache(context.getInstance(), tsLocation);
+          locator.invalidateCache(context, tsLocation);
         }
         log.debug("IOException thrown", e);
       } catch (AccumuloSecurityException e) {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java
index 989f343..425d8f0 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java
@@ -568,7 +568,7 @@ public class TabletServerBatchWriter {
         af.put(new TabletIdImpl(entry.getKey()), codes);
       }
 
-      throw new MutationsRejectedException(context.getInstance(), cvsList, af, serverSideErrors,
+      throw new MutationsRejectedException(context.getClientInfo(), cvsList, af, serverSideErrors,
           unknownErrors, lastUnknownError);
     }
   }
@@ -900,7 +900,7 @@ public class TabletServerBatchWriter {
             tables.add(ke.getTableId());
 
           for (Table.ID table : tables)
-            getLocator(table).invalidateCache(context.getInstance(), location);
+            getLocator(table).invalidateCache(context, location);
 
           failedMutations.add(location, tsm);
         } finally {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftScanner.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftScanner.java
index 69a278e..4d33447 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftScanner.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftScanner.java
@@ -382,8 +382,8 @@ public class ThriftScanner {
 
           sleepMillis = pause(sleepMillis, maxSleepTime);
         } catch (TException e) {
-          TabletLocator.getLocator(context, scanState.tableId)
-              .invalidateCache(context.getInstance(), loc.tablet_location);
+          TabletLocator.getLocator(context, scanState.tableId).invalidateCache(context,
+              loc.tablet_location);
           error = "Scan failed, thrift error " + e.getClass().getName() + "  " + e.getMessage()
               + " " + loc;
           if (!error.equals(lastError))
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ZookeeperLockChecker.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ZookeeperLockChecker.java
index 19df080..c6d3e7f 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ZookeeperLockChecker.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ZookeeperLockChecker.java
@@ -17,7 +17,6 @@
 package org.apache.accumulo.core.client.impl;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.TabletLocatorImpl.TabletServerLockChecker;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
@@ -30,13 +29,13 @@ public class ZookeeperLockChecker implements TabletServerLockChecker {
   private final ZooCache zc;
   private final String root;
 
-  ZookeeperLockChecker(Instance instance) {
-    this(instance, new ZooCacheFactory());
+  ZookeeperLockChecker(ClientContext context) {
+    this(context, new ZooCacheFactory());
   }
 
-  ZookeeperLockChecker(Instance instance, ZooCacheFactory zcf) {
-    zc = zcf.getZooCache(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut());
-    this.root = ZooUtil.getRoot(instance) + Constants.ZTSERVERS;
+  ZookeeperLockChecker(ClientContext context, ZooCacheFactory zcf) {
+    zc = zcf.getZooCache(context.getZooKeepers(), context.getZooKeepersSessionTimeOut());
+    this.root = ZooUtil.getRoot(context.getInstanceID()) + Constants.ZTSERVERS;
   }
 
   @Override
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java
index e71712b..b0f9ee3 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java
@@ -145,7 +145,7 @@ public class MetadataLocationObtainer implements TabletLocationObtainer {
     } catch (AccumuloException e) {
       if (log.isTraceEnabled())
         log.trace("{} lookup failed", src.tablet_extent.getTableId(), e);
-      parent.invalidateCache(context.getInstance(), src.tablet_location);
+      parent.invalidateCache(context, src.tablet_location);
     }
 
     return null;
@@ -210,7 +210,7 @@ public class MetadataLocationObtainer implements TabletLocationObtainer {
       }
     } catch (IOException e) {
       log.trace("lookupTablets failed server={}", tserver, e);
-      parent.invalidateCache(context.getInstance(), tserver);
+      parent.invalidateCache(context, tserver);
     } catch (AccumuloServerException e) {
       log.trace("lookupTablets failed server={}", tserver, e);
       throw e;
diff --git a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTable.java b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTable.java
index 488c6e3..1b1536a 100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTable.java
@@ -29,6 +29,7 @@ import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.TableOfflineException;
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.Namespace;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.client.impl.Tables;
@@ -89,7 +90,7 @@ public class ReplicationTable {
   }
 
   public static boolean isOnline(Connector conn) {
-    return TableState.ONLINE == Tables.getTableState(conn.getInstance(), ID);
+    return TableState.ONLINE == Tables.getTableState(new ClientContext(conn.info()), ID);
   }
 
   public static void setOnline(Connector conn) throws AccumuloSecurityException, AccumuloException {
diff --git a/core/src/main/java/org/apache/accumulo/core/rpc/SaslConnectionParams.java b/core/src/main/java/org/apache/accumulo/core/rpc/SaslConnectionParams.java
index 13300f1..ade379c 100644
--- a/core/src/main/java/org/apache/accumulo/core/rpc/SaslConnectionParams.java
+++ b/core/src/main/java/org/apache/accumulo/core/rpc/SaslConnectionParams.java
@@ -33,6 +33,7 @@ import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.KerberosToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.ClientProperty;
+import org.apache.accumulo.core.conf.Property;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
@@ -139,6 +140,13 @@ public class SaslConnectionParams {
     updateFromToken(token);
   }
 
+  public static SaslConnectionParams from(AccumuloConfiguration config, AuthenticationToken token) {
+    if (!config.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
+      return null;
+    }
+    return new SaslConnectionParams(config, token);
+  }
+
   protected void updateFromToken(AuthenticationToken token) {
     if (token instanceof KerberosToken) {
       mechanism = SaslMechanism.GSSAPI;
diff --git a/core/src/main/java/org/apache/accumulo/core/trace/DistributedTrace.java b/core/src/main/java/org/apache/accumulo/core/trace/DistributedTrace.java
index 73e3f3d..3a79f9d 100644
--- a/core/src/main/java/org/apache/accumulo/core/trace/DistributedTrace.java
+++ b/core/src/main/java/org/apache/accumulo/core/trace/DistributedTrace.java
@@ -22,18 +22,15 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.ClientProperty;
 import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.fate.zookeeper.ZooReader;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.htrace.HTraceConfiguration;
 import org.apache.htrace.SpanReceiver;
 import org.apache.htrace.SpanReceiverBuilder;
-import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -55,15 +52,6 @@ public class DistributedTrace {
   private static final HashSet<SpanReceiver> receivers = new HashSet<>();
 
   /**
-   * @deprecated since 1.7, use {@link DistributedTrace#enable(String, String, Properties)} instead
-   */
-  @Deprecated
-  public static void enable(Instance instance, ZooReader zoo, String application, String address)
-      throws IOException, KeeperException, InterruptedException {
-    enable(address, application);
-  }
-
-  /**
    * Enable tracing by setting up SpanReceivers for the current process.
    */
   public static void enable() {
diff --git a/core/src/main/java/org/apache/accumulo/core/util/MonitorUtil.java b/core/src/main/java/org/apache/accumulo/core/util/MonitorUtil.java
index 862d70a..b846e27 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/MonitorUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/MonitorUtil.java
@@ -19,8 +19,7 @@ package org.apache.accumulo.core.util;
 import static java.nio.charset.StandardCharsets.UTF_8;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.KeeperException.NoNodeException;
@@ -28,15 +27,17 @@ import org.apache.zookeeper.KeeperException.NoNodeException;
 import com.google.common.annotations.VisibleForTesting;
 
 public class MonitorUtil {
-  public static String getLocation(Instance instance) throws KeeperException, InterruptedException {
-    return getLocation(new ZooReader(instance.getZooKeepers(), 30000), instance);
+
+  public static String getLocation(ClientContext context)
+      throws KeeperException, InterruptedException {
+    return getLocation(new ZooReader(context.getZooKeepers(), 30000), context);
   }
 
   @VisibleForTesting
-  static String getLocation(ZooReader zr, Instance instance)
+  static String getLocation(ZooReader zr, ClientContext context)
       throws KeeperException, InterruptedException {
     try {
-      byte[] loc = zr.getData(ZooUtil.getRoot(instance) + Constants.ZMONITOR_HTTP_ADDR, null);
+      byte[] loc = zr.getData(context.getZooKeeperRoot() + Constants.ZMONITOR_HTTP_ADDR, null);
       return loc == null ? null : new String(loc, UTF_8);
     } catch (NoNodeException e) {
       // If there's no node advertising the monitor, there's no monitor.
diff --git a/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java b/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java
index 18c24fc..61f6716 100644
--- a/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooUtil.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.net.UnknownHostException;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.volume.VolumeConfiguration;
@@ -36,10 +35,6 @@ public class ZooUtil extends org.apache.accumulo.fate.zookeeper.ZooUtil {
 
   private static final Logger log = LoggerFactory.getLogger(ZooUtil.class);
 
-  public static String getRoot(final Instance instance) {
-    return getRoot(instance.getInstanceID());
-  }
-
   public static String getRoot(final String instanceId) {
     return Constants.ZROOT + "/" + instanceId;
   }
diff --git a/core/src/test/java/org/apache/accumulo/core/client/ZooKeeperInstanceTest.java b/core/src/test/java/org/apache/accumulo/core/client/ZooKeeperInstanceTest.java
index 877ddb9..6e388d2 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/ZooKeeperInstanceTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/ZooKeeperInstanceTest.java
@@ -32,6 +32,7 @@ import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
 
+@Deprecated
 public class ZooKeeperInstanceTest {
   private static final UUID IID = UUID.randomUUID();
   private static final String IID_STRING = IID.toString();
@@ -39,16 +40,11 @@ public class ZooKeeperInstanceTest {
   private ZooCache zc;
   private ZooKeeperInstance zki;
 
-  @SuppressWarnings("deprecation")
   private static org.apache.accumulo.core.client.ClientConfiguration.ClientProperty INSTANCE_ID = org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.INSTANCE_ID;
-  @SuppressWarnings("deprecation")
   private static org.apache.accumulo.core.client.ClientConfiguration.ClientProperty INSTANCE_NAME = org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.INSTANCE_NAME;
-  @SuppressWarnings("deprecation")
   private static org.apache.accumulo.core.client.ClientConfiguration.ClientProperty INSTANCE_ZK_HOST = org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.INSTANCE_ZK_HOST;
-  @SuppressWarnings("deprecation")
   private static org.apache.accumulo.core.client.ClientConfiguration.ClientProperty INSTANCE_ZK_TIMEOUT = org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.INSTANCE_ZK_TIMEOUT;
 
-  @SuppressWarnings("deprecation")
   private void mockIdConstruction(ClientConfiguration config) {
     expect(config.get(INSTANCE_ID)).andReturn(IID_STRING);
     expect(config.get(INSTANCE_NAME)).andReturn(null);
@@ -56,7 +52,6 @@ public class ZooKeeperInstanceTest {
     expect(config.get(INSTANCE_ZK_TIMEOUT)).andReturn("30");
   }
 
-  @SuppressWarnings("deprecation")
   private void mockNameConstruction(ClientConfiguration config) {
     expect(config.get(INSTANCE_ID)).andReturn(null);
     expect(config.get(INSTANCE_NAME)).andReturn("instance");
@@ -66,7 +61,6 @@ public class ZooKeeperInstanceTest {
 
   @Before
   public void setUp() {
-    @SuppressWarnings("deprecation")
     ClientConfiguration config = createMock(ClientConfiguration.class);
     mockNameConstruction(config);
     replay(config);
@@ -82,7 +76,6 @@ public class ZooKeeperInstanceTest {
   }
 
   @Test(expected = IllegalArgumentException.class)
-  @SuppressWarnings("deprecation")
   public void testInvalidConstruction() {
     ClientConfiguration config = createMock(ClientConfiguration.class);
     expect(config.get(INSTANCE_ID)).andReturn(IID_STRING);
@@ -92,7 +85,6 @@ public class ZooKeeperInstanceTest {
   }
 
   @Test(expected = IllegalArgumentException.class)
-  @SuppressWarnings("deprecation")
   public void testInvalidConstruction2() {
     ClientConfiguration config = createMock(ClientConfiguration.class);
     expect(config.get(INSTANCE_ID)).andReturn(null);
@@ -121,7 +113,6 @@ public class ZooKeeperInstanceTest {
 
   @Test
   public void testGetInstanceID_Direct() {
-    @SuppressWarnings("deprecation")
     ClientConfiguration config = createMock(ClientConfiguration.class);
     mockIdConstruction(config);
     replay(config);
@@ -133,7 +124,6 @@ public class ZooKeeperInstanceTest {
 
   @Test(expected = RuntimeException.class)
   public void testGetInstanceID_NoMapping() {
-    @SuppressWarnings("deprecation")
     ClientConfiguration config = createMock(ClientConfiguration.class);
     expect(zc.get(Constants.ZROOT + Constants.ZINSTANCES + "/instance")).andReturn(null);
     replay(zc);
@@ -152,7 +142,6 @@ public class ZooKeeperInstanceTest {
 
   @Test(expected = RuntimeException.class)
   public void testGetInstanceID_IDMissingForID() {
-    @SuppressWarnings("deprecation")
     ClientConfiguration config = createMock(ClientConfiguration.class);
     mockIdConstruction(config);
     replay(config);
@@ -164,7 +153,6 @@ public class ZooKeeperInstanceTest {
 
   @Test
   public void testGetInstanceName() {
-    @SuppressWarnings("deprecation")
     ClientConfiguration config = createMock(ClientConfiguration.class);
     mockIdConstruction(config);
     replay(config);
@@ -192,7 +180,6 @@ public class ZooKeeperInstanceTest {
         .andReturn(IID_STRING.getBytes(UTF_8));
     expect(zc.get(Constants.ZROOT + "/" + IID_STRING)).andReturn("yup".getBytes());
     replay(zc, factory);
-    @SuppressWarnings("deprecation")
     ClientConfiguration cfg = ClientConfiguration.loadDefault().withInstance(instanceName)
         .withZkHosts(zookeepers);
     ZooKeeperInstance zki = new ZooKeeperInstance(cfg, factory);
diff --git a/core/src/test/java/org/apache/accumulo/core/client/impl/RootTabletLocatorTest.java b/core/src/test/java/org/apache/accumulo/core/client/impl/RootTabletLocatorTest.java
index b55e004..dec510f 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/impl/RootTabletLocatorTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/impl/RootTabletLocatorTest.java
@@ -22,7 +22,6 @@ import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.TabletLocatorImpl.TabletServerLockChecker;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
@@ -31,7 +30,7 @@ import org.junit.Before;
 import org.junit.Test;
 
 public class RootTabletLocatorTest {
-  private Instance instance;
+  private ClientContext context;
   private TabletServerLockChecker lockChecker;
   private ZooCacheFactory zcf;
   private ZooCache zc;
@@ -39,11 +38,11 @@ public class RootTabletLocatorTest {
 
   @Before
   public void setUp() {
-    instance = createMock(Instance.class);
-    expect(instance.getInstanceID()).andReturn("iid").anyTimes();
-    expect(instance.getZooKeepers()).andReturn("zk1").anyTimes();
-    expect(instance.getZooKeepersSessionTimeOut()).andReturn(30000).anyTimes();
-    replay(instance);
+    context = createMock(ClientContext.class);
+    expect(context.getInstanceID()).andReturn("iid").anyTimes();
+    expect(context.getZooKeepers()).andReturn("zk1").anyTimes();
+    expect(context.getZooKeepersSessionTimeOut()).andReturn(30000).anyTimes();
+    replay(context);
     lockChecker = createMock(TabletServerLockChecker.class);
     zcf = createMock(ZooCacheFactory.class);
     zc = createMock(ZooCache.class);
@@ -54,9 +53,9 @@ public class RootTabletLocatorTest {
   public void testInvalidateCache_Server() {
     expect(zcf.getZooCache("zk1", 30000)).andReturn(zc);
     replay(zcf);
-    zc.clear(ZooUtil.getRoot(instance) + Constants.ZTSERVERS + "/server");
+    zc.clear(ZooUtil.getRoot(context.getInstanceID()) + Constants.ZTSERVERS + "/server");
     replay(zc);
-    rtl.invalidateCache(instance, "server");
+    rtl.invalidateCache(context, "server");
     verify(zc);
   }
 }
diff --git a/core/src/test/java/org/apache/accumulo/core/client/impl/TableOperationsImplTest.java b/core/src/test/java/org/apache/accumulo/core/client/impl/TableOperationsImplTest.java
index c5b7e84..8ae302a 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/impl/TableOperationsImplTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/impl/TableOperationsImplTest.java
@@ -16,13 +16,10 @@
  */
 package org.apache.accumulo.core.client.impl;
 
-import java.util.Properties;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.impl.KeyExtent;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema;
@@ -35,11 +32,8 @@ public class TableOperationsImplTest {
   @Test
   public void waitForStoreTransitionScannerConfiguredCorrectly() throws Exception {
     final String tableName = "metadata";
-    Instance instance = EasyMock.createMock(Instance.class);
-    Credentials credentials = EasyMock.createMock(Credentials.class);
+    ClientContext context = EasyMock.createMock(ClientContext.class);
 
-    ClientContext context = new ClientContext(new ClientInfoImpl(new Properties(), null), instance,
-        credentials, null);
     TableOperationsImpl topsImpl = new TableOperationsImpl(context);
 
     Connector connector = EasyMock.createMock(Connector.class);
@@ -47,15 +41,7 @@ public class TableOperationsImplTest {
 
     Range range = new KeyExtent(Table.ID.of("1"), null, null).toMetadataRange();
 
-    String user = "root";
-    PasswordToken token = new PasswordToken("password");
-
-    // Credentials expectations
-    EasyMock.expect(credentials.getPrincipal()).andReturn(user);
-    EasyMock.expect(credentials.getToken()).andReturn(token);
-
-    // Create the connector and scanner
-    EasyMock.expect(instance.getConnector(user, token)).andReturn(connector);
+    EasyMock.expect(context.getConnector()).andReturn(connector);
     EasyMock.expect(connector.createScanner(tableName, Authorizations.EMPTY)).andReturn(scanner);
 
     // Fetch the columns on the scanner
@@ -79,11 +65,11 @@ public class TableOperationsImplTest {
     EasyMock.expect(scanner.getBatchSize()).andReturn(1000);
     EasyMock.expect(scanner.getReadaheadThreshold()).andReturn(100L);
 
-    EasyMock.replay(instance, credentials, connector, scanner);
+    EasyMock.replay(context, connector, scanner);
 
     topsImpl.createMetadataScanner(tableName, range);
 
-    EasyMock.verify(instance, credentials, connector, scanner);
+    EasyMock.verify(context, connector, scanner);
   }
 
 }
diff --git a/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java b/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
index a55b54b..d73a4c0 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
@@ -16,6 +16,7 @@
  */
 package org.apache.accumulo.core.client.impl;
 
+import static org.easymock.EasyMock.replay;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -29,21 +30,16 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.Properties;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
 
-import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.TabletLocator.TabletLocation;
 import org.apache.accumulo.core.client.impl.TabletLocator.TabletLocations;
 import org.apache.accumulo.core.client.impl.TabletLocator.TabletServerMutations;
 import org.apache.accumulo.core.client.impl.TabletLocatorImpl.TabletLocationObtainer;
 import org.apache.accumulo.core.client.impl.TabletLocatorImpl.TabletServerLockChecker;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.PartialKey;
@@ -55,6 +51,7 @@ import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.hadoop.io.Text;
+import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -168,14 +165,14 @@ public class TabletLocatorImplTest {
     return createLocators(tservers, "tserver1", "tserver2", table, data);
   }
 
-  private TestInstance testInstance;
   private ClientContext context;
 
   @Before
   public void setUp() {
-    testInstance = new TestInstance("instance1", "tserver1");
-    context = new ClientContext(new ClientInfoImpl(new Properties(), null), testInstance,
-        new Credentials("test", null), null);
+    context = EasyMock.createMock(ClientContext.class);
+    EasyMock.expect(context.getRootTabletLocation()).andReturn("tserver1").anyTimes();
+    EasyMock.expect(context.getInstanceID()).andReturn("instance1").anyTimes();
+    replay(context);
   }
 
   private void runTest(Text tableName, List<Range> ranges, TabletLocatorImpl tab1TabletCache,
@@ -404,57 +401,6 @@ public class TabletLocatorImplTest {
     runTest(mc, nke("0", null, "s"), nkes(nke("0", "g", null), nke("0", "r", "g")));
   }
 
-  static class TestInstance implements Instance {
-
-    private final String iid;
-    private String rtl;
-
-    public TestInstance(String iid, String rtl) {
-      this.iid = iid;
-      this.rtl = rtl;
-    }
-
-    @Override
-    public String getInstanceID() {
-      return iid;
-    }
-
-    @Override
-    public String getInstanceName() {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public List<String> getMasterLocations() {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public String getRootTabletLocation() {
-      return rtl;
-    }
-
-    @Override
-    public String getZooKeepers() {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public int getZooKeepersSessionTimeOut() {
-      throw new UnsupportedOperationException();
-    }
-
-    public void setRootTabletLocation(String rtl) {
-      this.rtl = rtl;
-    }
-
-    @Override
-    public Connector getConnector(String principal, AuthenticationToken token)
-        throws AccumuloException, AccumuloSecurityException {
-      throw new UnsupportedOperationException();
-    }
-  }
-
   static class TServers {
     private final Map<String,Map<KeyExtent,SortedMap<Key,Value>>> tservers = new HashMap<>();
   }
@@ -477,7 +423,7 @@ public class TabletLocatorImplTest {
       Map<KeyExtent,SortedMap<Key,Value>> tablets = tservers.get(src.tablet_location);
 
       if (tablets == null) {
-        parent.invalidateCache(context.getInstance(), src.tablet_location);
+        parent.invalidateCache(context, src.tablet_location);
         return null;
       }
 
@@ -508,7 +454,7 @@ public class TabletLocatorImplTest {
       Map<KeyExtent,SortedMap<Key,Value>> tablets = tservers.get(tserver);
 
       if (tablets == null) {
-        parent.invalidateCache(context.getInstance(), tserver);
+        parent.invalidateCache(context, tserver);
         return list;
       }
 
@@ -574,7 +520,7 @@ public class TabletLocatorImplTest {
     }
 
     @Override
-    public void invalidateCache(Instance instance, String server) {}
+    public void invalidateCache(ClientContext context, String server) {}
 
   }
 
@@ -730,7 +676,7 @@ public class TabletLocatorImplTest {
 
     // simulate a server failure
     setLocation(tservers, "tserver2", MTE, tab1e21, "tserver9");
-    tab1TabletCache.invalidateCache(context.getInstance(), "tserver8");
+    tab1TabletCache.invalidateCache(context, "tserver8");
     locateTabletTest(tab1TabletCache, "r1", tab1e22, "tserver6");
     locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver9");
     locateTabletTest(tab1TabletCache, "a", tab1e1, "tserver4");
@@ -738,15 +684,21 @@ public class TabletLocatorImplTest {
     // simulate all servers failing
     deleteServer(tservers, "tserver1");
     deleteServer(tservers, "tserver2");
-    tab1TabletCache.invalidateCache(context.getInstance(), "tserver4");
-    tab1TabletCache.invalidateCache(context.getInstance(), "tserver6");
-    tab1TabletCache.invalidateCache(context.getInstance(), "tserver9");
+    tab1TabletCache.invalidateCache(context, "tserver4");
+    tab1TabletCache.invalidateCache(context, "tserver6");
+    tab1TabletCache.invalidateCache(context, "tserver9");
 
     locateTabletTest(tab1TabletCache, "r1", null, null);
     locateTabletTest(tab1TabletCache, "h", null, null);
     locateTabletTest(tab1TabletCache, "a", null, null);
 
-    testInstance.setRootTabletLocation("tserver4");
+    EasyMock.verify(context);
+
+    context = EasyMock.createMock(ClientContext.class);
+    EasyMock.expect(context.getInstanceID()).andReturn("instance1").anyTimes();
+    EasyMock.expect(context.getRootTabletLocation()).andReturn("tserver4").anyTimes();
+    replay(context);
+
     setLocation(tservers, "tserver4", RTE, MTE, "tserver5");
     setLocation(tservers, "tserver5", MTE, tab1e1, "tserver1");
     setLocation(tservers, "tserver5", MTE, tab1e21, "tserver2");
@@ -777,7 +729,7 @@ public class TabletLocatorImplTest {
 
     // simulate metadata and regular server down and the reassigned
     deleteServer(tservers, "tserver5");
-    tab1TabletCache.invalidateCache(context.getInstance(), "tserver7");
+    tab1TabletCache.invalidateCache(context, "tserver7");
     locateTabletTest(tab1TabletCache, "a", null, null);
     locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver8");
     locateTabletTest(tab1TabletCache, "r", tab1e22, "tserver9");
@@ -789,7 +741,7 @@ public class TabletLocatorImplTest {
     locateTabletTest(tab1TabletCache, "a", tab1e1, "tserver7");
     locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver8");
     locateTabletTest(tab1TabletCache, "r", tab1e22, "tserver9");
-    tab1TabletCache.invalidateCache(context.getInstance(), "tserver7");
+    tab1TabletCache.invalidateCache(context, "tserver7");
     setLocation(tservers, "tserver10", mte1, tab1e1, "tserver2");
     locateTabletTest(tab1TabletCache, "a", tab1e1, "tserver2");
     locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver8");
diff --git a/core/src/test/java/org/apache/accumulo/core/client/impl/ZookeeperLockCheckerTest.java b/core/src/test/java/org/apache/accumulo/core/client/impl/ZookeeperLockCheckerTest.java
index 8e2e79c..e2544ed 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/impl/ZookeeperLockCheckerTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/impl/ZookeeperLockCheckerTest.java
@@ -22,7 +22,6 @@ import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
@@ -30,28 +29,28 @@ import org.junit.Before;
 import org.junit.Test;
 
 public class ZookeeperLockCheckerTest {
-  private Instance instance;
+  private ClientContext context;
   private ZooCacheFactory zcf;
   private ZooCache zc;
   private ZookeeperLockChecker zklc;
 
   @Before
   public void setUp() {
-    instance = createMock(Instance.class);
-    expect(instance.getInstanceID()).andReturn("iid").anyTimes();
-    expect(instance.getZooKeepers()).andReturn("zk1").anyTimes();
-    expect(instance.getZooKeepersSessionTimeOut()).andReturn(30000).anyTimes();
-    replay(instance);
+    context = createMock(ClientContext.class);
+    expect(context.getInstanceID()).andReturn("iid").anyTimes();
+    expect(context.getZooKeepers()).andReturn("zk1").anyTimes();
+    expect(context.getZooKeepersSessionTimeOut()).andReturn(30000).anyTimes();
+    replay(context);
     zcf = createMock(ZooCacheFactory.class);
     zc = createMock(ZooCache.class);
     expect(zcf.getZooCache("zk1", 30000)).andReturn(zc);
     replay(zcf);
-    zklc = new ZookeeperLockChecker(instance, zcf);
+    zklc = new ZookeeperLockChecker(context, zcf);
   }
 
   @Test
   public void testInvalidateCache() {
-    zc.clear(ZooUtil.getRoot(instance) + Constants.ZTSERVERS + "/server");
+    zc.clear(ZooUtil.getRoot("iid") + Constants.ZTSERVERS + "/server");
     replay(zc);
     zklc.invalidateCache("server");
     verify(zc);
diff --git a/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java b/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java
index 6640f34..03828e4 100644
--- a/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java
@@ -26,7 +26,6 @@ import java.util.HashMap;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.admin.TableOperations;
@@ -53,13 +52,11 @@ public class MetadataServicerTest {
 
     context = EasyMock.createMock(ClientContext.class);
     Connector conn = EasyMock.createMock(Connector.class);
-    Instance inst = EasyMock.createMock(Instance.class);
     TableOperations tableOps = EasyMock.createMock(TableOperations.class);
     EasyMock.expect(tableOps.tableIdMap()).andReturn(tableNameToIdMap).anyTimes();
     EasyMock.expect(conn.tableOperations()).andReturn(tableOps).anyTimes();
-    EasyMock.expect(context.getInstance()).andReturn(inst).anyTimes();
     EasyMock.expect(context.getConnector()).andReturn(conn).anyTimes();
-    EasyMock.replay(context, conn, inst, tableOps);
+    EasyMock.replay(context, conn, tableOps);
   }
 
   @Test
diff --git a/core/src/test/java/org/apache/accumulo/core/security/CredentialsTest.java b/core/src/test/java/org/apache/accumulo/core/security/CredentialsTest.java
index 6709651..f0a72ab 100644
--- a/core/src/test/java/org/apache/accumulo/core/security/CredentialsTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/security/CredentialsTest.java
@@ -27,15 +27,12 @@ import static org.junit.Assert.fail;
 import javax.security.auth.DestroyFailedException;
 
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Credentials;
 import org.apache.accumulo.core.client.security.SecurityErrorCode;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
 import org.apache.accumulo.core.client.security.tokens.NullToken;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.easymock.EasyMock;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestName;
@@ -45,20 +42,13 @@ public class CredentialsTest {
   @Rule
   public TestName test = new TestName();
 
-  private Instance inst;
-
-  @Before
-  public void setupInstance() {
-    inst = EasyMock.createMock(Instance.class);
-    EasyMock.expect(inst.getInstanceID()).andReturn(test.getMethodName()).anyTimes();
-    EasyMock.replay(inst);
-  }
+  private String instanceID = test.getMethodName();
 
   @Test
   public void testToThrift() throws DestroyFailedException {
     // verify thrift serialization
     Credentials creds = new Credentials("test", new PasswordToken("testing"));
-    TCredentials tCreds = creds.toThrift(inst);
+    TCredentials tCreds = creds.toThrift(instanceID);
     assertEquals("test", tCreds.getPrincipal());
     assertEquals(PasswordToken.class.getName(), tCreds.getTokenClassName());
     assertArrayEquals(AuthenticationTokenSerializer.serialize(new PasswordToken("testing")),
@@ -67,7 +57,7 @@ public class CredentialsTest {
     // verify that we can't serialize if it's destroyed
     creds.getToken().destroy();
     try {
-      creds.toThrift(inst);
+      creds.toThrift(instanceID);
       fail();
     } catch (Exception e) {
       assertTrue(e instanceof RuntimeException);
@@ -80,7 +70,7 @@ public class CredentialsTest {
   @Test
   public void roundtripThrift() throws DestroyFailedException {
     Credentials creds = new Credentials("test", new PasswordToken("testing"));
-    TCredentials tCreds = creds.toThrift(inst);
+    TCredentials tCreds = creds.toThrift(instanceID);
     Credentials roundtrip = Credentials.fromThrift(tCreds);
     assertEquals("Roundtrip through thirft changed credentials equality", creds, roundtrip);
   }
diff --git a/core/src/test/java/org/apache/accumulo/core/util/MonitorUtilTest.java b/core/src/test/java/org/apache/accumulo/core/util/MonitorUtilTest.java
index 0eccb2b..932e698 100644
--- a/core/src/test/java/org/apache/accumulo/core/util/MonitorUtilTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/util/MonitorUtilTest.java
@@ -22,8 +22,7 @@ import static org.easymock.EasyMock.replay;
 import static org.junit.Assert.assertNull;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
 import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.junit.Test;
@@ -35,12 +34,12 @@ public class MonitorUtilTest {
     final String instanceId = "12345";
 
     ZooReader zr = mock(ZooReader.class);
-    Instance mockInstance = mock(Instance.class);
-    expect(mockInstance.getInstanceID()).andReturn(instanceId);
-    expect(zr.getData(ZooUtil.getRoot(instanceId) + Constants.ZMONITOR_HTTP_ADDR, null))
+    ClientContext context = mock(ClientContext.class);
+    expect(context.getZooKeeperRoot()).andReturn("/root/");
+    expect(zr.getData("/root/" + Constants.ZMONITOR_HTTP_ADDR, null))
         .andThrow(new NoNodeException());
 
-    replay(zr, mockInstance);
-    assertNull(MonitorUtil.getLocation(zr, mockInstance));
+    replay(zr, context);
+    assertNull(MonitorUtil.getLocation(zr, context));
   }
 }
diff --git a/maven-plugin/src/it/plugin-test/src/test/java/org/apache/accumulo/plugin/it/PluginIT.java b/maven-plugin/src/it/plugin-test/src/test/java/org/apache/accumulo/plugin/it/PluginIT.java
index e9518bf..8caa29a 100644
--- a/maven-plugin/src/it/plugin-test/src/test/java/org/apache/accumulo/plugin/it/PluginIT.java
+++ b/maven-plugin/src/it/plugin-test/src/test/java/org/apache/accumulo/plugin/it/PluginIT.java
@@ -27,8 +27,8 @@ import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableExistsException;
@@ -38,27 +38,26 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.MiniAccumuloInstance;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.plugin.CustomFilter;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class PluginIT {
 
-  private static Instance instance;
+  private static ClientInfo info;
   private static Connector connector;
 
   @BeforeClass
   public static void setUp() throws Exception {
     String instanceName = "plugin-it-instance";
-    instance = new MiniAccumuloInstance(instanceName, new File("target/accumulo-maven-plugin/" + instanceName));
-    connector = instance.getConnector("root", new PasswordToken("ITSecret"));
+    info = MiniAccumuloCluster.getClientInfo(new File("target/accumulo-maven-plugin/" + instanceName));
+    connector = Connector.builder().usingClientInfo(info).build();
   }
 
   @Test
-  public void testInstanceConnection() {
-    assertTrue(instance != null);
-    assertTrue(instance instanceof MiniAccumuloInstance);
+  public void testConnection() {
+    assertTrue(info != null);
     assertTrue(connector != null);
     assertTrue(connector instanceof Connector);
   }
@@ -68,7 +67,7 @@ public class PluginIT {
     String tableName = "testCreateTable";
     connector.tableOperations().create(tableName);
     assertTrue(connector.tableOperations().exists(tableName));
-    assertTrue(new File("target/accumulo-maven-plugin/" + instance.getInstanceName() + "/testCreateTablePassed").createNewFile());
+    assertTrue(new File("target/accumulo-maven-plugin/" + info.getInstanceName() + "/testCreateTablePassed").createNewFile());
   }
 
   @Test
@@ -90,7 +89,7 @@ public class PluginIT {
       assertEquals("V", entry.getValue().toString());
     }
     assertEquals(1, count);
-    assertTrue(new File("target/accumulo-maven-plugin/" + instance.getInstanceName() + "/testWriteToTablePassed").createNewFile());
+    assertTrue(new File("target/accumulo-maven-plugin/" + info.getInstanceName() + "/testWriteToTablePassed").createNewFile());
   }
 
   @Test
@@ -131,7 +130,7 @@ public class PluginIT {
       assertEquals("denied", entry.getKey().getColumnFamily().toString());
     }
     assertEquals(2, count);
-    assertTrue(new File("target/accumulo-maven-plugin/" + instance.getInstanceName() + "/testCheckIteratorPassed").createNewFile());
+    assertTrue(new File("target/accumulo-maven-plugin/" + info.getInstanceName() + "/testCheckIteratorPassed").createNewFile());
   }
 
 }
diff --git a/minicluster/src/main/java/org/apache/accumulo/cluster/AccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/cluster/AccumuloCluster.java
index 41545f3..8fdda83 100644
--- a/minicluster/src/main/java/org/apache/accumulo/cluster/AccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/cluster/AccumuloCluster.java
@@ -24,6 +24,7 @@ import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
@@ -47,6 +48,11 @@ public interface AccumuloCluster {
   String getZooKeepers();
 
   /**
+   * @return ServerContext
+   */
+  ServerContext getServerContext();
+
+  /**
    * Utility method to get a connector to the cluster.
    */
   Connector getConnector(String user, AuthenticationToken token)
diff --git a/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java
index c82ab3b..786f0be 100644
--- a/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java
@@ -37,6 +37,7 @@ import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.master.thrift.MasterGoalState;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -126,6 +127,11 @@ public class StandaloneAccumuloCluster implements AccumuloCluster {
   }
 
   @Override
+  public ServerContext getServerContext() {
+    return new ServerContext(getClientInfo());
+  }
+
+  @Override
   public Connector getConnector(String user, AuthenticationToken token)
       throws AccumuloException, AccumuloSecurityException {
     return Connector.builder().forInstance(getInstanceName(), getZooKeepers())
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
index 60448a0..d2d234d 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
@@ -29,6 +29,8 @@ import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
 import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 
+import com.google.common.base.Preconditions;
+
 /**
  * A utility class that will create Zookeeper and Accumulo processes that write all of their data to
  * a single local directory. This class makes it easy to test code against a real Accumulo instance.
@@ -136,4 +138,18 @@ public class MiniAccumuloCluster {
   public ClientInfo getClientInfo() {
     return impl.getClientInfo();
   }
+
+  /**
+   * Construct a {@link ClientInfo} using a {@link MiniAccumuloCluster} directory
+   *
+   * @param directory
+   *          MiniAccumuloCluster directory
+   * @return {@link ClientInfo} for that directory
+   * @since 2.0.0
+   */
+  public static ClientInfo getClientInfo(File directory) {
+    File clientProps = new File(new File(directory, "conf"), "accumulo-client.properties");
+    Preconditions.checkArgument(clientProps.exists());
+    return Connector.builder().usingProperties(clientProps.getAbsolutePath()).info();
+  }
 }
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloInstance.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloInstance.java
index 2a5a772..afe5b4b 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloInstance.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloInstance.java
@@ -20,19 +20,19 @@ import java.io.File;
 import java.io.FileNotFoundException;
 import java.net.MalformedURLException;
 
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.hadoop.conf.Configuration;
 
 /**
  * @since 1.6.0
+ * @deprecated since 2.0.0, Use {@link MiniAccumuloCluster#getClientInfo(File)} instead
  */
-public class MiniAccumuloInstance extends ZooKeeperInstance {
+@Deprecated
+public class MiniAccumuloInstance extends org.apache.accumulo.core.client.ZooKeeperInstance {
 
   /**
-   * Construct an {@link Instance} entry point to Accumulo using a {@link MiniAccumuloCluster}
-   * directory
+   * Construct an {@link org.apache.accumulo.core.client.Instance} entry point to Accumulo using a
+   * {@link MiniAccumuloCluster} directory
    */
   @SuppressWarnings("deprecation")
   public MiniAccumuloInstance(String instanceName, File directory) throws FileNotFoundException {
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
index b60bf77..e9d8864 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
@@ -81,6 +81,7 @@ import org.apache.accumulo.master.state.SetGoalState;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.minicluster.ServerType;
 import org.apache.accumulo.server.Accumulo;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.init.Initialize;
@@ -712,6 +713,11 @@ public class MiniAccumuloClusterImpl implements AccumuloCluster {
     return config.getZooKeepers();
   }
 
+  @Override
+  public ServerContext getServerContext() {
+    return new ServerContext(getClientInfo());
+  }
+
   /**
    * Stops Accumulo and Zookeeper processes. If stop is not called, there is a shutdown hook that is
    * setup to kill the processes. However it's probably best to call stop in a finally block as soon
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index 93c9d49..abe5965 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -46,7 +46,6 @@ import org.apache.accumulo.core.client.ConditionalWriter;
 import org.apache.accumulo.core.client.ConditionalWriter.Result;
 import org.apache.accumulo.core.client.ConditionalWriterConfig;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.NamespaceExistsException;
@@ -56,7 +55,6 @@ import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.ScannerBase;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.admin.ActiveCompaction;
 import org.apache.accumulo.core.client.admin.ActiveScan;
 import org.apache.accumulo.core.client.admin.CompactionConfig;
@@ -132,7 +130,8 @@ public class ProxyServer implements AccumuloProxy.Iface {
   public static final Logger logger = LoggerFactory.getLogger(ProxyServer.class);
   public static final String RPC_ACCUMULO_PRINCIPAL_MISMATCH_MSG = "RPC"
       + " principal did not match requested Accumulo principal";
-  protected Instance instance;
+  @SuppressWarnings("deprecation")
+  protected org.apache.accumulo.core.client.Instance instance;
 
   protected Class<? extends AuthenticationToken> tokenClass;
 
@@ -191,7 +190,8 @@ public class ProxyServer implements AccumuloProxy.Iface {
   public ProxyServer(Properties props) {
 
     @SuppressWarnings("deprecation")
-    Instance i = new ZooKeeperInstance(ClientConfConverter.toClientConf(props));
+    org.apache.accumulo.core.client.Instance i = new org.apache.accumulo.core.client.ZooKeeperInstance(
+        ClientConfConverter.toClientConf(props));
     instance = i;
 
     try {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
index 7f3920b..e352809 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
@@ -31,16 +31,13 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.util.AddressUtil;
 import org.apache.accumulo.core.volume.Volume;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.ReadOnlyStore;
 import org.apache.accumulo.fate.ReadOnlyTStore;
 import org.apache.accumulo.fate.ZooStore;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.util.time.SimpleTimer;
@@ -109,12 +106,12 @@ public class Accumulo {
     return ServerConstants.getInstanceIdLocation(v);
   }
 
-  public static void init(VolumeManager fs, Instance instance,
+  public static void init(VolumeManager fs, String instanceID,
       ServerConfigurationFactory serverConfig, String application) throws IOException {
     final AccumuloConfiguration conf = serverConfig.getSystemConfiguration();
 
     log.info("{} starting", application);
-    log.info("Instance {}", instance.getInstanceID());
+    log.info("Instance {}", instanceID);
     int dataVersion = Accumulo.getAccumuloPersistentVersion(fs);
     log.info("Data Version {}", dataVersion);
     Accumulo.waitForZookeeperAndHdfs(fs);
@@ -267,7 +264,7 @@ public class Accumulo {
   public static void abortIfFateTransactions() {
     try {
       final ReadOnlyTStore<Accumulo> fate = new ReadOnlyStore<>(
-          new ZooStore<>(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZFATE,
+          new ZooStore<>(ServerContext.getInstance().getZooKeeperRoot() + Constants.ZFATE,
               ZooReaderWriter.getInstance()));
       if (!(fate.list().isEmpty())) {
         throw new AccumuloException("Aborting upgrade because there are"
diff --git a/server/base/src/main/java/org/apache/accumulo/server/AccumuloServerContext.java b/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
similarity index 58%
rename from server/base/src/main/java/org/apache/accumulo/server/AccumuloServerContext.java
rename to server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
index 3d37852..0111794 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/AccumuloServerContext.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
@@ -19,64 +19,132 @@ package org.apache.accumulo.server;
 import static com.google.common.base.Preconditions.checkArgument;
 
 import java.io.IOException;
+import java.util.Objects;
 
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.ConnectorImpl;
-import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.conf.SiteConfiguration;
 import org.apache.accumulo.core.rpc.SslConnectionParams;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.core.trace.DistributedTrace;
 import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.metrics.MetricsSystemHelper;
 import org.apache.accumulo.server.rpc.SaslServerConnectionParams;
 import org.apache.accumulo.server.rpc.ThriftServerType;
 import org.apache.accumulo.server.security.SecurityUtil;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.security.delegation.AuthenticationTokenSecretManager;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Provides a server context for Accumulo server components that operate with the system credentials
  * and have access to the system files and configuration.
  */
-public class AccumuloServerContext extends ClientContext {
+public class ServerContext extends ClientContext {
 
-  private final ServerConfigurationFactory confFactory;
+  private static final Logger log = LoggerFactory.getLogger(ServerContext.class);
+
+  private static ServerContext serverContextInstance = null;
+
+  private final ServerInfo info;
+  private ServerConfigurationFactory serverConfFactory = null;
+  private String applicationName = null;
+  private String applicationClassName = null;
+  private String hostname = null;
   private AuthenticationTokenSecretManager secretManager;
 
-  /**
-   * Construct a server context from the server's configuration
-   */
-  public AccumuloServerContext(Instance instance, ServerConfigurationFactory confFactory) {
-    this(instance, confFactory, null);
+  public ServerContext(ServerInfo info) {
+    super(info, SiteConfiguration.getInstance());
+    this.info = info;
   }
 
-  /**
-   * Construct a server context from the server's configuration
-   */
-  private AccumuloServerContext(Instance instance, ServerConfigurationFactory confFactory,
-      AuthenticationTokenSecretManager secretManager) {
-    super(instance, SystemCredentials.get(instance), confFactory.getSystemConfiguration());
-    this.confFactory = confFactory;
-    this.secretManager = secretManager;
+  public ServerContext(String instanceName, String zooKeepers, int zooKeepersSessionTimeOut) {
+    this(new ServerInfo(instanceName, zooKeepers, zooKeepersSessionTimeOut));
+  }
+
+  public ServerContext(ClientInfo info) {
+    this(new ServerInfo(info));
+  }
+
+  public ServerContext(ClientContext context) {
+    this(new ServerInfo(context.getClientInfo()));
+  }
+
+  synchronized public static ServerContext getInstance() {
+    if (serverContextInstance == null) {
+      serverContextInstance = new ServerContext(new ServerInfo());
+    }
+    return serverContextInstance;
+  }
+
+  public void setupServer(String appName, String appClassName, String hostname) {
+    applicationName = appName;
+    applicationClassName = appClassName;
+    this.hostname = hostname;
+    SecurityUtil.serverLogin(SiteConfiguration.getInstance());
+    log.info("Version " + Constants.VERSION);
+    log.info("Instance " + info.getInstanceID());
+    try {
+      Accumulo.init(getVolumeManager(), getInstanceID(), getServerConfFactory(), applicationName);
+    } catch (IOException e) {
+      throw new IllegalStateException(e);
+    }
+    MetricsSystemHelper.configure(applicationClassName);
+    DistributedTrace.enable(hostname, applicationName,
+        getServerConfFactory().getSystemConfiguration());
     if (null != getSaslParams()) {
       // Server-side "client" check to make sure we're logged in as a user we expect to be
       enforceKerberosLogin();
     }
   }
 
+  public void teardownServer() {
+    DistributedTrace.disable();
+  }
+
+  public String getApplicationName() {
+    Objects.requireNonNull(applicationName);
+    return applicationName;
+  }
+
+  public String getApplicationClassName() {
+    Objects.requireNonNull(applicationClassName);
+    return applicationName;
+  }
+
+  public String getHostname() {
+    Objects.requireNonNull(hostname);
+    return hostname;
+  }
+
+  public synchronized ServerConfigurationFactory getServerConfFactory() {
+    if (serverConfFactory == null) {
+      serverConfFactory = new ServerConfigurationFactory(this);
+    }
+    return serverConfFactory;
+  }
+
+  @Override
+  public AccumuloConfiguration getConfiguration() {
+    return getServerConfFactory().getSystemConfiguration();
+  }
+
   /**
    * A "client-side" assertion for servers to validate that they are logged in as the expected user,
    * per the configuration, before performing any RPC
    */
   // Should be private, but package-protected so EasyMock will work
   void enforceKerberosLogin() {
-    final AccumuloConfiguration conf = confFactory.getSiteConfiguration();
+    final AccumuloConfiguration conf = getServerConfFactory().getSiteConfiguration();
     // Unwrap _HOST into the FQDN to make the kerberos principal we'll compare against
     final String kerberosPrincipal = SecurityUtil
         .getServerPrincipal(conf.get(Property.GENERAL_KERBEROS_PRINCIPAL));
@@ -94,11 +162,8 @@ public class AccumuloServerContext extends ClientContext {
         "Expected login user to be " + kerberosPrincipal + " but was " + loginUser.getUserName());
   }
 
-  /**
-   * Retrieve the configuration factory used to construct this context
-   */
-  public ServerConfigurationFactory getServerConfigurationFactory() {
-    return confFactory;
+  public VolumeManager getVolumeManager() {
+    return info.getVolumeManager();
   }
 
   /**
@@ -110,7 +175,7 @@ public class AccumuloServerContext extends ClientContext {
 
   @Override
   public SaslServerConnectionParams getSaslParams() {
-    AccumuloConfiguration conf = getServerConfigurationFactory().getSiteConfiguration();
+    AccumuloConfiguration conf = getServerConfFactory().getSiteConfiguration();
     if (!conf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
       return null;
     }
@@ -153,20 +218,16 @@ public class AccumuloServerContext extends ClientContext {
     return secretManager;
   }
 
-  // Need to override this from ClientContext to ensure that HdfsZooInstance doesn't "downcast"
-  // the AccumuloServerContext into a ClientContext (via the copy-constructor on ClientContext)
   @Override
-  public Connector getConnector() throws AccumuloException, AccumuloSecurityException {
-    // avoid making more connectors than necessary
+  public synchronized Connector getConnector() throws AccumuloException, AccumuloSecurityException {
     if (conn == null) {
-      if (getInstance() instanceof ZooKeeperInstance || getInstance() instanceof HdfsZooInstance) {
-        // reuse existing context
-        conn = new ConnectorImpl(this);
-      } else {
-        Credentials c = getCredentials();
-        conn = getInstance().getConnector(c.getPrincipal(), c.getToken());
-      }
+      conn = new ConnectorImpl(this);
     }
     return conn;
   }
+
+  public Connector getConnector(String principal, AuthenticationToken token)
+      throws AccumuloSecurityException, AccumuloException {
+    return Connector.builder().usingClientInfo(info).usingToken(principal, token).build();
+  }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/ServerInfo.java b/server/base/src/main/java/org/apache/accumulo/server/ServerInfo.java
new file mode 100644
index 0000000..aa607ea
--- /dev/null
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerInfo.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.IOException;
+import java.util.Properties;
+import java.util.UUID;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.ClientInfo;
+import org.apache.accumulo.core.client.impl.ClientConfConverter;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.client.impl.InstanceOperationsImpl;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.ClientProperty;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.conf.SiteConfiguration;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.ZooCache;
+import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
+import org.apache.accumulo.server.security.SystemCredentials;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ServerInfo implements ClientInfo {
+
+  private static final Logger log = LoggerFactory.getLogger(ServerInfo.class);
+
+  private String instanceID;
+  private String instanceName;
+  private String zooKeepers;
+  private int zooKeepersSessionTimeOut;
+  private String zooKeeperRoot;
+  private VolumeManager volumeManager;
+  private ZooCache zooCache;
+
+  public ServerInfo(ClientInfo info) {
+    this(info.getInstanceName(), info.getZooKeepers(), info.getZooKeepersSessionTimeOut());
+  }
+
+  public ServerInfo(String instanceName, String zooKeepers, int zooKeepersSessionTimeOut) {
+    this.instanceName = instanceName;
+    this.zooKeepers = zooKeepers;
+    this.zooKeepersSessionTimeOut = zooKeepersSessionTimeOut;
+    zooCache = new ZooCacheFactory().getZooCache(zooKeepers, zooKeepersSessionTimeOut);
+    String instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + instanceName;
+    byte[] iidb = zooCache.get(instanceNamePath);
+    if (iidb == null) {
+      throw new RuntimeException("Instance name " + instanceName + " does not exist in zookeeper. "
+          + "Run \"accumulo org.apache.accumulo.server.util.ListInstances\" to see a list.");
+    }
+    instanceID = new String(iidb, UTF_8);
+    if (zooCache.get(Constants.ZROOT + "/" + instanceID) == null) {
+      if (instanceName == null) {
+        throw new RuntimeException("Instance id " + instanceID + " does not exist in zookeeper");
+      }
+      throw new RuntimeException("Instance id " + instanceID + " pointed to by the name "
+          + instanceName + " does not exist in zookeeper");
+    }
+    zooKeeperRoot = ZooUtil.getRoot(instanceID);
+  }
+
+  public ServerInfo() {
+    this(SiteConfiguration.getInstance());
+  }
+
+  public ServerInfo(AccumuloConfiguration config) {
+    try {
+      volumeManager = VolumeManagerImpl.get();
+    } catch (IOException e) {
+      throw new IllegalStateException(e);
+    }
+    Path instanceIdPath = Accumulo.getAccumuloInstanceIdPath(volumeManager);
+    instanceID = ZooUtil.getInstanceIDFromHdfs(instanceIdPath, config);
+    zooKeeperRoot = ZooUtil.getRoot(instanceID);
+    zooKeepers = config.get(Property.INSTANCE_ZK_HOST);
+    zooKeepersSessionTimeOut = (int) config.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT);
+    zooCache = new ZooCacheFactory().getZooCache(zooKeepers, zooKeepersSessionTimeOut);
+    instanceName = InstanceOperationsImpl.lookupInstanceName(zooCache, UUID.fromString(instanceID));
+  }
+
+  public VolumeManager getVolumeManager() {
+    return volumeManager;
+  }
+
+  public String getInstanceID() {
+    return instanceID;
+  }
+
+  public String getZooKeeperRoot() {
+    return zooKeeperRoot;
+  }
+
+  public String getZooKeepers() {
+    return zooKeepers;
+  }
+
+  public int getZooKeepersSessionTimeOut() {
+    return zooKeepersSessionTimeOut;
+  }
+
+  @Override
+  public String getPrincipal() {
+    return getCredentials().getPrincipal();
+  }
+
+  @Override
+  public AuthenticationToken getAuthenticationToken() {
+    return getCredentials().getToken();
+  }
+
+  @Override
+  public boolean saslEnabled() {
+    return SiteConfiguration.getInstance().getBoolean(Property.INSTANCE_RPC_SASL_ENABLED);
+  }
+
+  @Override
+  public Properties getProperties() {
+    Properties properties = ClientConfConverter.toProperties(SiteConfiguration.getInstance());
+    properties.setProperty(ClientProperty.INSTANCE_ZOOKEEPERS.getKey(), getZooKeepers());
+    properties.setProperty(ClientProperty.INSTANCE_ZOOKEEPERS_TIMEOUT.getKey(),
+        Integer.toString(getZooKeepersSessionTimeOut()));
+    properties.setProperty(ClientProperty.INSTANCE_NAME.getKey(), getInstanceName());
+    ClientProperty.setAuthenticationToken(properties, getAuthenticationToken());
+    properties.setProperty(ClientProperty.AUTH_PRINCIPAL.getKey(), getPrincipal());
+    return properties;
+  }
+
+  public String getInstanceName() {
+    return instanceName;
+  }
+
+  public Credentials getCredentials() {
+    return SystemCredentials.get(getInstanceID());
+  }
+}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnDefaultTable.java b/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnDefaultTable.java
index ebf1ad5..74a5e03 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnDefaultTable.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnDefaultTable.java
@@ -16,29 +16,20 @@
  */
 package org.apache.accumulo.server.cli;
 
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.core.client.ClientInfo;
+import org.apache.accumulo.server.ServerContext;
 
 public class ClientOnDefaultTable extends org.apache.accumulo.core.cli.ClientOnDefaultTable {
   {
     setPrincipal("root");
   }
 
-  @Override
-  synchronized public Instance getInstance() {
-    if (cachedInstance != null)
-      return cachedInstance;
-
+  public ServerContext getServerContext() {
     if (instance == null) {
-      return cachedInstance = HdfsZooInstance.getInstance();
-    }
-    try {
-      return cachedInstance = getConnector().getInstance();
-    } catch (AccumuloSecurityException | AccumuloException e) {
-      throw new IllegalStateException(e);
+      return ServerContext.getInstance();
     }
+    ClientInfo info = getClientInfo();
+    return new ServerContext(instance, info.getZooKeepers(), info.getZooKeepersSessionTimeOut());
   }
 
   public ClientOnDefaultTable(String table) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnRequiredTable.java b/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnRequiredTable.java
index 9c147d5..c681f01 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnRequiredTable.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnRequiredTable.java
@@ -16,28 +16,19 @@
  */
 package org.apache.accumulo.server.cli;
 
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.core.client.ClientInfo;
+import org.apache.accumulo.server.ServerContext;
 
 public class ClientOnRequiredTable extends org.apache.accumulo.core.cli.ClientOnRequiredTable {
   {
     setPrincipal("root");
   }
 
-  @Override
-  synchronized public Instance getInstance() {
-    if (cachedInstance != null)
-      return cachedInstance;
-
+  public ServerContext getServerContext() {
     if (instance == null) {
-      return cachedInstance = HdfsZooInstance.getInstance();
-    }
-    try {
-      return cachedInstance = getConnector().getInstance();
-    } catch (AccumuloSecurityException | AccumuloException e) {
-      throw new IllegalStateException(e);
+      return ServerContext.getInstance();
     }
+    ClientInfo info = getClientInfo();
+    return new ServerContext(instance, info.getZooKeepers(), info.getZooKeepersSessionTimeOut());
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOpts.java b/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOpts.java
index d420e9d..a2c0346 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOpts.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOpts.java
@@ -16,11 +16,9 @@
  */
 package org.apache.accumulo.server.cli;
 
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 
 public class ClientOpts extends org.apache.accumulo.core.cli.ClientOpts {
   {
@@ -31,15 +29,11 @@ public class ClientOpts extends org.apache.accumulo.core.cli.ClientOpts {
     return new ClientContext(getClientInfo());
   }
 
-  @Override
-  public Instance getInstance() {
+  public ServerContext getServerContext() {
     if (instance == null) {
-      return HdfsZooInstance.getInstance();
-    }
-    try {
-      return getConnector().getInstance();
-    } catch (AccumuloSecurityException | AccumuloException e) {
-      throw new IllegalStateException(e);
+      return ServerContext.getInstance();
     }
+    ClientInfo info = getClientInfo();
+    return new ServerContext(instance, info.getZooKeepers(), info.getZooKeepersSessionTimeOut());
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java b/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
index d1c6be0..3923e90 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
@@ -59,7 +59,7 @@ import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.trace.thrift.TInfo;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
@@ -75,12 +75,12 @@ import org.slf4j.LoggerFactory;
 public class ClientServiceHandler implements ClientService.Iface {
   private static final Logger log = LoggerFactory.getLogger(ClientServiceHandler.class);
   protected final TransactionWatcher transactionWatcher;
-  private final AccumuloServerContext context;
+  private final ServerContext context;
   private final VolumeManager fs;
   private final SecurityOperation security;
   private final ServerBulkImportStatus bulkImportStatus = new ServerBulkImportStatus();
 
-  public ClientServiceHandler(AccumuloServerContext context, TransactionWatcher transactionWatcher,
+  public ClientServiceHandler(ServerContext context, TransactionWatcher transactionWatcher,
       VolumeManager fs) {
     this.context = context;
     this.transactionWatcher = transactionWatcher;
@@ -310,7 +310,7 @@ public class ClientServiceHandler implements ClientService.Iface {
   @Override
   public Map<String,String> getConfiguration(TInfo tinfo, TCredentials credentials,
       ConfigurationType type) throws TException {
-    ServerConfigurationFactory factory = context.getServerConfigurationFactory();
+    ServerConfigurationFactory factory = context.getServerConfFactory();
     switch (type) {
       case CURRENT:
         return conf(credentials, factory.getSystemConfiguration());
@@ -326,8 +326,7 @@ public class ClientServiceHandler implements ClientService.Iface {
   public Map<String,String> getTableConfiguration(TInfo tinfo, TCredentials credentials,
       String tableName) throws TException, ThriftTableOperationException {
     Table.ID tableId = checkTableId(context, tableName, null);
-    AccumuloConfiguration config = context.getServerConfigurationFactory()
-        .getTableConfiguration(tableId);
+    AccumuloConfiguration config = context.getServerConfFactory().getTableConfiguration(tableId);
     return conf(credentials, config);
   }
 
@@ -395,8 +394,7 @@ public class ClientServiceHandler implements ClientService.Iface {
     try {
       shouldMatch = loader.loadClass(interfaceMatch);
 
-      AccumuloConfiguration conf = context.getServerConfigurationFactory()
-          .getTableConfiguration(tableId);
+      AccumuloConfiguration conf = context.getServerConfFactory().getTableConfiguration(tableId);
 
       String context = conf.get(Property.TABLE_CLASSPATH);
 
@@ -431,7 +429,7 @@ public class ClientServiceHandler implements ClientService.Iface {
     try {
       shouldMatch = loader.loadClass(interfaceMatch);
 
-      AccumuloConfiguration conf = context.getServerConfigurationFactory()
+      AccumuloConfiguration conf = context.getServerConfFactory()
           .getNamespaceConfiguration(namespaceId);
 
       String context = conf.get(Property.TABLE_CLASSPATH);
@@ -496,7 +494,7 @@ public class ClientServiceHandler implements ClientService.Iface {
       throw new ThriftTableOperationException(null, ns, null,
           TableOperationExceptionType.NAMESPACE_NOTFOUND, why);
     }
-    AccumuloConfiguration config = context.getServerConfigurationFactory()
+    AccumuloConfiguration config = context.getServerConfFactory()
         .getNamespaceConfiguration(namespaceId);
     return conf(credentials, config);
   }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java b/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
index 2c4d76c..49479ad 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
@@ -21,20 +21,22 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
+import java.util.Properties;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.ClientConfConverter;
 import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.ConnectorImpl;
-import org.apache.accumulo.core.client.impl.Credentials;
 import org.apache.accumulo.core.client.impl.InstanceOperationsImpl;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.ClientProperty;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.conf.SiteConfiguration;
 import org.apache.accumulo.core.metadata.RootTable;
@@ -43,6 +45,7 @@ import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
 import org.apache.accumulo.server.Accumulo;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.zookeeper.ZooLock;
@@ -55,8 +58,11 @@ import com.google.common.base.Joiner;
 /**
  * An implementation of Instance that looks in HDFS and ZooKeeper to find the master and root tablet
  * location.
+ *
+ * @deprecated since 2.0.0, Use {@link ServerContext#getInstance()} instead
  */
-public class HdfsZooInstance implements Instance {
+@Deprecated
+public class HdfsZooInstance implements org.apache.accumulo.core.client.Instance {
 
   private final AccumuloConfiguration site = SiteConfiguration.getInstance();
 
@@ -67,7 +73,7 @@ public class HdfsZooInstance implements Instance {
 
   private static final HdfsZooInstance cachedHdfsZooInstance = new HdfsZooInstance();
 
-  public static Instance getInstance() {
+  public static org.apache.accumulo.core.client.Instance getInstance() {
     return cachedHdfsZooInstance;
   }
 
@@ -77,7 +83,7 @@ public class HdfsZooInstance implements Instance {
 
   @Override
   public String getRootTabletLocation() {
-    String zRootLocPath = ZooUtil.getRoot(this) + RootTable.ZROOT_TABLET_LOCATION;
+    String zRootLocPath = ZooUtil.getRoot(getInstanceID()) + RootTable.ZROOT_TABLET_LOCATION;
 
     OpTimer timer = null;
 
@@ -106,7 +112,7 @@ public class HdfsZooInstance implements Instance {
   @Override
   public List<String> getMasterLocations() {
 
-    String masterLocPath = ZooUtil.getRoot(this) + Constants.ZMASTER_LOCK;
+    String masterLocPath = ZooUtil.getRoot(getInstanceID()) + Constants.ZMASTER_LOCK;
 
     OpTimer timer = null;
 
@@ -172,11 +178,18 @@ public class HdfsZooInstance implements Instance {
   @Override
   public Connector getConnector(String principal, AuthenticationToken token)
       throws AccumuloException, AccumuloSecurityException {
-    return new ConnectorImpl(new ClientContext(this, new Credentials(principal, token), site));
+    Properties properties = ClientConfConverter.toProperties(site);
+    properties.setProperty(ClientProperty.INSTANCE_NAME.getKey(), getInstanceName());
+    properties.setProperty(ClientProperty.INSTANCE_ZOOKEEPERS.getKey(), getZooKeepers());
+    properties.setProperty(ClientProperty.INSTANCE_ZOOKEEPERS_TIMEOUT.getKey(),
+        Integer.toString(getZooKeepersSessionTimeOut()));
+    properties.setProperty(ClientProperty.AUTH_PRINCIPAL.getKey(), principal);
+    ClientProperty.setAuthenticationToken(properties, token);
+    return new ConnectorImpl(new ClientContext(ClientInfo.from(properties, token)));
   }
 
   public static void main(String[] args) {
-    Instance instance = HdfsZooInstance.getInstance();
+    org.apache.accumulo.core.client.Instance instance = HdfsZooInstance.getInstance();
     System.out.println("Instance Name: " + instance.getInstanceName());
     System.out.println("Instance ID: " + instance.getInstanceID());
     System.out.println("ZooKeepers: " + instance.getZooKeepers());
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/ConfigSanityCheck.java b/server/base/src/main/java/org/apache/accumulo/server/conf/ConfigSanityCheck.java
index 2608a03..1137ba9 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/ConfigSanityCheck.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/ConfigSanityCheck.java
@@ -16,7 +16,7 @@
  */
 package org.apache.accumulo.server.conf;
 
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.start.spi.KeywordExecutable;
 
 import com.google.auto.service.AutoService;
@@ -25,7 +25,7 @@ import com.google.auto.service.AutoService;
 public class ConfigSanityCheck implements KeywordExecutable {
 
   public static void main(String[] args) {
-    new ServerConfigurationFactory(HdfsZooInstance.getInstance()).getSystemConfiguration();
+    ServerContext.getInstance().getServerConfFactory().getSystemConfiguration();
   }
 
   @Override
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/NamespaceConfWatcher.java b/server/base/src/main/java/org/apache/accumulo/server/conf/NamespaceConfWatcher.java
index 0aa3245..46481e5 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/NamespaceConfWatcher.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/NamespaceConfWatcher.java
@@ -17,9 +17,8 @@
 package org.apache.accumulo.server.conf;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Namespace;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.WatchedEvent;
@@ -32,16 +31,16 @@ class NamespaceConfWatcher implements Watcher {
   }
 
   private static final Logger log = Logger.getLogger(NamespaceConfWatcher.class);
-  private final Instance instance;
+  private final ServerContext context;
   private final String namespacesPrefix;
   private final int namespacesPrefixLength;
   private ServerConfigurationFactory scf;
 
-  NamespaceConfWatcher(Instance instance) {
-    this.instance = instance;
-    namespacesPrefix = ZooUtil.getRoot(instance) + Constants.ZNAMESPACES + "/";
+  NamespaceConfWatcher(ServerContext context) {
+    this.context = context;
+    namespacesPrefix = context.getZooKeeperRoot() + Constants.ZNAMESPACES + "/";
     namespacesPrefixLength = namespacesPrefix.length();
-    scf = new ServerConfigurationFactory(instance);
+    scf = new ServerConfigurationFactory(context);
   }
 
   static String toString(WatchedEvent event) {
@@ -89,7 +88,7 @@ class NamespaceConfWatcher implements Watcher {
         break;
       case NodeDeleted:
         if (key == null) {
-          ServerConfigurationFactory.removeCachedNamespaceConfiguration(instance.getInstanceID(),
+          ServerConfigurationFactory.removeCachedNamespaceConfiguration(context.getInstanceID(),
               namespaceId);
         }
         break;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/NamespaceConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/conf/NamespaceConfiguration.java
index 2f2c380..ae27fa2 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/NamespaceConfiguration.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/NamespaceConfiguration.java
@@ -20,15 +20,14 @@ import java.util.Map;
 import java.util.function.Predicate;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Namespace;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.ConfigurationObserver;
 import org.apache.accumulo.core.conf.ObservableConfiguration;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.ZooCachePropertyAccessor.PropCacheKey;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -41,16 +40,16 @@ public class NamespaceConfiguration extends ObservableConfiguration {
   private final AccumuloConfiguration parent;
   private ZooCachePropertyAccessor propCacheAccessor = null;
   protected Namespace.ID namespaceId = null;
-  protected Instance inst = null;
+  protected ServerContext context;
   private ZooCacheFactory zcf = new ZooCacheFactory();
   private final String path;
 
-  public NamespaceConfiguration(Namespace.ID namespaceId, Instance inst,
+  public NamespaceConfiguration(Namespace.ID namespaceId, ServerContext context,
       AccumuloConfiguration parent) {
-    this.inst = inst;
+    this.context = context;
     this.parent = parent;
     this.namespaceId = namespaceId;
-    this.path = ZooUtil.getRoot(inst.getInstanceID()) + Constants.ZNAMESPACES + "/" + namespaceId
+    this.path = context.getZooKeeperRoot() + Constants.ZNAMESPACES + "/" + namespaceId
         + Constants.ZNAMESPACE_CONF;
   }
 
@@ -70,11 +69,11 @@ public class NamespaceConfiguration extends ObservableConfiguration {
   private synchronized ZooCachePropertyAccessor getPropCacheAccessor() {
     if (propCacheAccessor == null) {
       synchronized (propCaches) {
-        PropCacheKey key = new PropCacheKey(inst.getInstanceID(), namespaceId.canonicalID());
+        PropCacheKey key = new PropCacheKey(context.getInstanceID(), namespaceId.canonicalID());
         ZooCache propCache = propCaches.get(key);
         if (propCache == null) {
-          propCache = zcf.getZooCache(inst.getZooKeepers(), inst.getZooKeepersSessionTimeOut(),
-              new NamespaceConfWatcher(inst));
+          propCache = zcf.getZooCache(context.getZooKeepers(),
+              context.getZooKeepersSessionTimeOut(), new NamespaceConfWatcher(context));
           propCaches.put(key, propCache);
         }
         propCacheAccessor = new ZooCachePropertyAccessor(propCache);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java b/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
index e50b522..86a3c7c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
@@ -19,7 +19,6 @@ package org.apache.accumulo.server.conf;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Namespace;
 import org.apache.accumulo.core.client.impl.Table;
@@ -29,6 +28,7 @@ import org.apache.accumulo.core.conf.ConfigSanityCheck;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.conf.SiteConfiguration;
 import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
+import org.apache.accumulo.server.ServerContext;
 
 /**
  * A factor for configurations used by a server process. Instance of this class are thread-safe.
@@ -87,13 +87,13 @@ public class ServerConfigurationFactory extends ServerConfiguration {
     }
   }
 
-  private final Instance instance;
+  private final ServerContext context;
   private final String instanceID;
   private ZooCacheFactory zcf = new ZooCacheFactory();
 
-  public ServerConfigurationFactory(Instance instance) {
-    this.instance = instance;
-    instanceID = instance.getInstanceID();
+  public ServerConfigurationFactory(ServerContext context) {
+    this.context = context;
+    instanceID = context.getInstanceID();
     addInstanceToCaches(instanceID);
   }
 
@@ -122,7 +122,7 @@ public class ServerConfigurationFactory extends ServerConfiguration {
   @Override
   public synchronized AccumuloConfiguration getSystemConfiguration() {
     if (systemConfig == null) {
-      systemConfig = new ZooConfigurationFactory().getInstance(instance, zcf,
+      systemConfig = new ZooConfigurationFactory().getInstance(context, zcf,
           getSiteConfiguration());
     }
     return systemConfig;
@@ -145,8 +145,8 @@ public class ServerConfigurationFactory extends ServerConfiguration {
     // Tablet sets will never see updates from ZooKeeper which means that things like constraints
     // and
     // default visibility labels will never be updated in a Tablet until it is reloaded.
-    if (conf == null && Tables.exists(instance, tableId)) {
-      conf = new TableConfiguration(instance, tableId, getNamespaceConfigurationForTable(tableId));
+    if (conf == null && Tables.exists(context, tableId)) {
+      conf = new TableConfiguration(context, tableId, getNamespaceConfigurationForTable(tableId));
       ConfigSanityCheck.validate(conf);
       synchronized (tableConfigs) {
         Map<Table.ID,TableConfiguration> configs = tableConfigs.get(instanceID);
@@ -173,11 +173,11 @@ public class ServerConfigurationFactory extends ServerConfiguration {
     if (conf == null) {
       Namespace.ID namespaceId;
       try {
-        namespaceId = Tables.getNamespaceId(instance, tableId);
+        namespaceId = Tables.getNamespaceId(context, tableId);
       } catch (TableNotFoundException e) {
         throw new RuntimeException(e);
       }
-      conf = new NamespaceConfiguration(namespaceId, instance, getSystemConfiguration());
+      conf = new NamespaceConfiguration(namespaceId, context, getSystemConfiguration());
       ConfigSanityCheck.validate(conf);
       synchronized (tableParentConfigs) {
         tableParentConfigs.get(instanceID).put(tableId, conf);
@@ -196,7 +196,7 @@ public class ServerConfigurationFactory extends ServerConfiguration {
     }
     if (conf == null) {
       // changed - include instance in constructor call
-      conf = new NamespaceConfiguration(namespaceId, instance, getSystemConfiguration());
+      conf = new NamespaceConfiguration(namespaceId, context, getSystemConfiguration());
       conf.setZooCacheFactory(zcf);
       ConfigSanityCheck.validate(conf);
       synchronized (namespaceConfigs) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfWatcher.java b/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfWatcher.java
index ee07726..f3df058 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfWatcher.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfWatcher.java
@@ -17,9 +17,8 @@
 package org.apache.accumulo.server.conf;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Table;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.WatchedEvent;
@@ -32,14 +31,14 @@ class TableConfWatcher implements Watcher {
   }
 
   private static final Logger log = Logger.getLogger(TableConfWatcher.class);
-  private final Instance instance;
+  private final ServerContext context;
   private final String tablesPrefix;
   private ServerConfigurationFactory scf;
 
-  TableConfWatcher(Instance instance) {
-    this.instance = instance;
-    tablesPrefix = ZooUtil.getRoot(instance) + Constants.ZTABLES + "/";
-    scf = new ServerConfigurationFactory(instance);
+  TableConfWatcher(ServerContext context) {
+    this.context = context;
+    tablesPrefix = context.getZooKeeperRoot() + Constants.ZTABLES + "/";
+    scf = context.getServerConfFactory();
   }
 
   static String toString(WatchedEvent event) {
@@ -90,7 +89,7 @@ class TableConfWatcher implements Watcher {
           // only remove the AccumuloConfiguration object when a
           // table node is deleted, not when a tables property is
           // deleted.
-          ServerConfigurationFactory.removeCachedTableConfiguration(instance.getInstanceID(),
+          ServerConfigurationFactory.removeCachedTableConfiguration(context.getInstanceID(),
               tableId);
         }
         break;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java
index cc67feb..706880c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java
@@ -29,7 +29,6 @@ import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Predicate;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.conf.ConfigurationObserver;
 import org.apache.accumulo.core.conf.ObservableConfiguration;
@@ -38,9 +37,9 @@ import org.apache.accumulo.core.data.thrift.IterInfo;
 import org.apache.accumulo.core.iterators.IteratorUtil;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.spi.scan.ScanDispatcher;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.ZooCachePropertyAccessor.PropCacheKey;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -55,7 +54,7 @@ public class TableConfiguration extends ObservableConfiguration {
   private static final Map<PropCacheKey,ZooCache> propCaches = new java.util.HashMap<>();
 
   private ZooCachePropertyAccessor propCacheAccessor = null;
-  private final Instance instance;
+  private final ServerContext context;
   private final NamespaceConfiguration parent;
   private ZooCacheFactory zcf = new ZooCacheFactory();
 
@@ -63,8 +62,9 @@ public class TableConfiguration extends ObservableConfiguration {
 
   private EnumMap<IteratorScope,AtomicReference<ParsedIteratorConfig>> iteratorConfig;
 
-  public TableConfiguration(Instance instance, Table.ID tableId, NamespaceConfiguration parent) {
-    this.instance = requireNonNull(instance);
+  public TableConfiguration(ServerContext context, Table.ID tableId,
+      NamespaceConfiguration parent) {
+    this.context = requireNonNull(context);
     this.tableId = requireNonNull(tableId);
     this.parent = requireNonNull(parent);
 
@@ -81,11 +81,11 @@ public class TableConfiguration extends ObservableConfiguration {
   private synchronized ZooCachePropertyAccessor getPropCacheAccessor() {
     if (propCacheAccessor == null) {
       synchronized (propCaches) {
-        PropCacheKey key = new PropCacheKey(instance.getInstanceID(), tableId.canonicalID());
+        PropCacheKey key = new PropCacheKey(context.getInstanceID(), tableId.canonicalID());
         ZooCache propCache = propCaches.get(key);
         if (propCache == null) {
-          propCache = zcf.getZooCache(instance.getZooKeepers(),
-              instance.getZooKeepersSessionTimeOut(), new TableConfWatcher(instance));
+          propCache = zcf.getZooCache(context.getZooKeepers(),
+              context.getZooKeepersSessionTimeOut(), new TableConfWatcher(context));
           propCaches.put(key, propCache);
         }
         propCacheAccessor = new ZooCachePropertyAccessor(propCache);
@@ -116,8 +116,7 @@ public class TableConfiguration extends ObservableConfiguration {
   }
 
   private String getPath() {
-    return ZooUtil.getRoot(instance.getInstanceID()) + Constants.ZTABLES + "/" + tableId
-        + Constants.ZTABLE_CONF;
+    return context.getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_CONF;
   }
 
   @Override
@@ -138,7 +137,7 @@ public class TableConfiguration extends ObservableConfiguration {
    * returns the actual NamespaceConfiguration that corresponds to the current parent namespace.
    */
   public NamespaceConfiguration getNamespaceConfiguration() {
-    return new ServerConfigurationFactory(parent.inst)
+    return new ServerConfigurationFactory(parent.context)
         .getNamespaceConfiguration(parent.namespaceId);
   }
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/TableParentConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/conf/TableParentConfiguration.java
index c55483f..a5c5602 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/TableParentConfiguration.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/TableParentConfiguration.java
@@ -16,12 +16,12 @@
  */
 package org.apache.accumulo.server.conf;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Namespace;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.server.ServerContext;
 
 /**
  * Used by TableConfiguration to dynamically get the NamespaceConfiguration if the namespace changes
@@ -30,8 +30,9 @@ public class TableParentConfiguration extends NamespaceConfiguration {
 
   private Table.ID tableId;
 
-  public TableParentConfiguration(Table.ID tableId, Instance inst, AccumuloConfiguration parent) {
-    super(null, inst, parent);
+  public TableParentConfiguration(Table.ID tableId, ServerContext context,
+      AccumuloConfiguration parent) {
+    super(null, context, parent);
     this.tableId = tableId;
     this.namespaceId = getNamespaceId();
   }
@@ -39,7 +40,7 @@ public class TableParentConfiguration extends NamespaceConfiguration {
   @Override
   protected Namespace.ID getNamespaceId() {
     try {
-      return Tables.getNamespaceId(inst, tableId);
+      return Tables.getNamespaceId(context, tableId);
     } catch (TableNotFoundException e) {
       throw new RuntimeException(e);
     }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfigurationFactory.java b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfigurationFactory.java
index bba9425..efb4902 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfigurationFactory.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfigurationFactory.java
@@ -20,13 +20,13 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
 import org.apache.accumulo.server.Accumulo;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.hadoop.fs.Path;
@@ -43,17 +43,18 @@ class ZooConfigurationFactory {
    * Gets a configuration object for the given instance with the given parent. Repeated calls will
    * return the same object.
    *
-   * @param inst
-   *          instance; if null, instance is determined from HDFS
+   * @param context
+   *          ServerContext; if null, instance is determined from HDFS
    * @param zcf
    *          {@link ZooCacheFactory} for building {@link ZooCache} to contact ZooKeeper (required)
    * @param parent
    *          parent configuration (required)
    * @return configuration
    */
-  ZooConfiguration getInstance(Instance inst, ZooCacheFactory zcf, AccumuloConfiguration parent) {
+  ZooConfiguration getInstance(ServerContext context, ZooCacheFactory zcf,
+      AccumuloConfiguration parent) {
     String instanceId;
-    if (inst == null) {
+    if (context == null) {
       // InstanceID should be the same across all volumes, so just choose one
       VolumeManager fs;
       try {
@@ -64,7 +65,7 @@ class ZooConfigurationFactory {
       Path instanceIdPath = Accumulo.getAccumuloInstanceIdPath(fs);
       instanceId = ZooUtil.getInstanceIDFromHdfs(instanceIdPath, parent);
     } else {
-      instanceId = inst.getInstanceID();
+      instanceId = context.getInstanceID();
     }
 
     ZooConfiguration config;
@@ -80,12 +81,12 @@ class ZooConfigurationFactory {
           @Override
           public void process(WatchedEvent arg0) {}
         };
-        if (inst == null) {
+        if (context == null) {
           propCache = zcf.getZooCache(parent.get(Property.INSTANCE_ZK_HOST),
               (int) parent.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT), watcher);
         } else {
-          propCache = zcf.getZooCache(inst.getZooKeepers(), inst.getZooKeepersSessionTimeOut(),
-              watcher);
+          propCache = zcf.getZooCache(context.getZooKeepers(),
+              context.getZooKeepersSessionTimeOut(), watcher);
         }
         config = new ZooConfiguration(instanceId, propCache, parent);
         instances.put(instanceId, config);
@@ -98,13 +99,13 @@ class ZooConfigurationFactory {
    * Gets a configuration object for the given instance with the given parent. Repeated calls will
    * return the same object.
    *
-   * @param inst
-   *          instance; if null, instance is determined from HDFS
+   * @param context
+   *          ServerContext; if null, instance ID is determined from HDFS
    * @param parent
    *          parent configuration (required)
    * @return configuration
    */
-  public ZooConfiguration getInstance(Instance inst, AccumuloConfiguration parent) {
-    return getInstance(inst, new ZooCacheFactory(), parent);
+  public ZooConfiguration getInstance(ServerContext context, AccumuloConfiguration parent) {
+    return getInstance(context, new ZooCacheFactory(), parent);
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java b/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
index 11131f2..95b6767 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
@@ -41,7 +41,7 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Sc
 import org.apache.accumulo.core.util.ColumnFQ;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
 import org.apache.accumulo.server.zookeeper.ZooCache;
 import org.apache.accumulo.server.zookeeper.ZooLock;
@@ -264,7 +264,7 @@ public class MetadataConstraints implements Constraint {
           }
 
           if (zooRoot == null) {
-            zooRoot = ZooUtil.getRoot(HdfsZooInstance.getInstance());
+            zooRoot = ServerContext.getInstance().getZooKeeperRoot();
           }
 
           boolean lockHeld = false;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/PerTableVolumeChooser.java b/server/base/src/main/java/org/apache/accumulo/server/fs/PerTableVolumeChooser.java
index e16c074..f01a019 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/PerTableVolumeChooser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/PerTableVolumeChooser.java
@@ -23,7 +23,7 @@ import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.fs.VolumeChooserEnvironment.ChooserScope;
@@ -186,7 +186,7 @@ public class PerTableVolumeChooser implements VolumeChooser {
     ServerConfigurationFactory localConf = lazyConfFactory;
     if (localConf == null) {
       // If we're under contention when first getting here we'll throw away some initializations.
-      localConf = new ServerConfigurationFactory(HdfsZooInstance.getInstance());
+      localConf = ServerContext.getInstance().getServerConfFactory();
       lazyConfFactory = localConf;
     }
     return localConf;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java b/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
index f7cbe8c..15d20e5 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
@@ -23,7 +23,7 @@ import java.util.stream.Collectors;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.volume.Volume;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.fs.VolumeChooserEnvironment.ChooserScope;
@@ -164,7 +164,7 @@ public class PreferredVolumeChooser extends RandomVolumeChooser {
     ServerConfigurationFactory localConf = lazyConfFactory;
     if (localConf == null) {
       // If we're under contention when first getting here we'll throw away some initializations.
-      localConf = new ServerConfigurationFactory(HdfsZooInstance.getInstance());
+      localConf = ServerContext.getInstance().getServerConfFactory();
       lazyConfFactory = localConf;
     }
     return localConf;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
index 79c67a1..249c681 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
@@ -32,8 +32,8 @@ import org.apache.accumulo.core.protobuf.ProtobufUtil;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager.FileType;
 import org.apache.accumulo.server.replication.StatusUtil;
 import org.apache.accumulo.server.replication.proto.Replication.Status;
@@ -179,7 +179,7 @@ public class VolumeUtil {
    * configured in instance.volumes.replacements. Second, if a tablet dir is no longer configured
    * for use it chooses a new tablet directory.
    */
-  public static TabletFiles updateTabletVolumes(AccumuloServerContext context, ZooLock zooLock,
+  public static TabletFiles updateTabletVolumes(ServerContext context, ZooLock zooLock,
       VolumeManager vm, KeyExtent extent, TabletFiles tabletFiles, boolean replicate)
       throws IOException {
     List<Pair<Path,Path>> replacements = ServerConstants.getVolumeReplacements();
@@ -261,7 +261,7 @@ public class VolumeUtil {
     return ret;
   }
 
-  private static String decommisionedTabletDir(AccumuloServerContext context, ZooLock zooLock,
+  private static String decommisionedTabletDir(ServerContext context, ZooLock zooLock,
       VolumeManager vm, KeyExtent extent, String metaDir) throws IOException {
     Path dir = new Path(metaDir);
     if (isActiveVolume(dir))
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 9ca5ed1..82007c6 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@ -38,7 +38,6 @@ import java.util.UUID;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.IteratorSetting.Column;
 import org.apache.accumulo.core.client.impl.Namespace;
@@ -83,10 +82,8 @@ import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.accumulo.server.Accumulo;
-import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.constraints.MetadataConstraints;
 import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
 import org.apache.accumulo.server.fs.VolumeChooserEnvironment.ChooserScope;
@@ -394,15 +391,14 @@ public class Initialize implements KeywordExecutable {
       return false;
     }
 
-    final Instance instance = HdfsZooInstance.getInstance();
-    final ServerConfigurationFactory confFactory = new ServerConfigurationFactory(instance);
+    final ServerContext context = ServerContext.getInstance();
 
     // When we're using Kerberos authentication, we need valid credentials to perform
     // initialization. If the user provided some, use them.
     // If they did not, fall back to the credentials present in accumulo-site.xml that the servers
     // will use themselves.
     try {
-      final SiteConfiguration siteConf = confFactory.getSiteConfiguration();
+      final SiteConfiguration siteConf = context.getServerConfFactory().getSiteConfiguration();
       if (siteConf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
         final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
         // We don't have any valid creds to talk to HDFS
@@ -430,7 +426,6 @@ public class Initialize implements KeywordExecutable {
     }
 
     try {
-      AccumuloServerContext context = new AccumuloServerContext(instance, confFactory);
       initSecurity(context, opts, uuid.toString(), rootUser);
     } catch (Exception e) {
       log.error("FATAL: Failed to initialize security", e);
@@ -777,8 +772,8 @@ public class Initialize implements KeywordExecutable {
     return optionalWarning;
   }
 
-  private static void initSecurity(AccumuloServerContext context, Opts opts, String iid,
-      String rootUser) throws AccumuloSecurityException, ThriftSecurityException, IOException {
+  private static void initSecurity(ServerContext context, Opts opts, String iid, String rootUser)
+      throws AccumuloSecurityException, ThriftSecurityException, IOException {
     AuditedSecurityOperation.getInstance(context, true).initializeSecurity(context.rpcCreds(),
         rootUser, opts.rootpass);
   }
@@ -933,15 +928,13 @@ public class Initialize implements KeywordExecutable {
 
       if (opts.resetSecurity) {
         log.info("Resetting security on accumulo.");
-        Instance instance = HdfsZooInstance.getInstance();
-        AccumuloServerContext context = new AccumuloServerContext(instance,
-            new ServerConfigurationFactory(instance));
+        ServerContext context = ServerContext.getInstance();
         if (isInitialized(fs)) {
           if (!opts.forceResetSecurity) {
             ConsoleReader c = getConsoleReader();
             String userEnteredName = c.readLine("WARNING: This will remove all"
                 + " users from Accumulo! If you wish to proceed enter the instance" + " name: ");
-            if (userEnteredName != null && !instance.getInstanceName().equals(userEnteredName)) {
+            if (userEnteredName != null && !context.getInstanceName().equals(userEnteredName)) {
               log.error("Aborted reset security: Instance name did not match current instance.");
               return;
             }
@@ -949,7 +942,7 @@ public class Initialize implements KeywordExecutable {
 
           final String rootUser = getRootUserName(opts);
           opts.rootpass = getRootPassword(opts, rootUser);
-          initSecurity(context, opts, HdfsZooInstance.getInstance().getInstanceID(), rootUser);
+          initSecurity(context, opts, context.getInstanceID(), rootUser);
         } else {
           log.error("FATAL: Attempted to reset security on accumulo before it was initialized");
         }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
index 4c8be24..675e9b1 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
@@ -41,7 +41,7 @@ import org.apache.accumulo.core.data.impl.KeyExtent;
 import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.state.TabletMigration;
@@ -294,9 +294,9 @@ public class HostRegexTableLoadBalancer extends TableLoadBalancer implements Con
   }
 
   @Override
-  public void init(AccumuloServerContext context) {
+  public void init(ServerContext context) {
     super.init(context);
-    parseConfiguration(context.getServerConfigurationFactory());
+    parseConfiguration(context.getServerConfFactory());
   }
 
   @Override
@@ -524,12 +524,12 @@ public class HostRegexTableLoadBalancer extends TableLoadBalancer implements Con
 
   @Override
   public void propertyChanged(String key) {
-    parseConfiguration(context.getServerConfigurationFactory());
+    parseConfiguration(context.getServerConfFactory());
   }
 
   @Override
   public void propertiesChanged() {
-    parseConfiguration(context.getServerConfigurationFactory());
+    parseConfiguration(context.getServerConfFactory());
   }
 
   @Override
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/RegexGroupBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/RegexGroupBalancer.java
index 6e85a21..c7d7d67 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/RegexGroupBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/RegexGroupBalancer.java
@@ -62,8 +62,7 @@ public class RegexGroupBalancer extends GroupBalancer {
 
   @Override
   protected long getWaitTime() {
-    Map<String,String> customProps = context.getServerConfigurationFactory()
-        .getTableConfiguration(tableId)
+    Map<String,String> customProps = context.getServerConfFactory().getTableConfiguration(tableId)
         .getAllPropertiesWithPrefix(Property.TABLE_ARBITRARY_PROP_PREFIX);
     if (customProps.containsKey(WAIT_TIME_PROPERTY)) {
       return ConfigurationTypeHelper.getTimeInMillis(customProps.get(WAIT_TIME_PROPERTY));
@@ -75,8 +74,7 @@ public class RegexGroupBalancer extends GroupBalancer {
   @Override
   protected Function<KeyExtent,String> getPartitioner() {
 
-    Map<String,String> customProps = context.getServerConfigurationFactory()
-        .getTableConfiguration(tableId)
+    Map<String,String> customProps = context.getServerConfFactory().getTableConfiguration(tableId)
         .getAllPropertiesWithPrefix(Property.TABLE_ARBITRARY_PROP_PREFIX);
     String regex = customProps.get(REGEX_PROPERTY);
     final String defaultGroup = customProps.get(DEFAUT_GROUP_PROPERTY);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java
index e752a00..2306609 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java
@@ -49,7 +49,7 @@ public class TableLoadBalancer extends TabletBalancer {
   private TabletBalancer constructNewBalancerForTable(String clazzName, Table.ID tableId)
       throws Exception {
     String context = null;
-    context = this.context.getServerConfigurationFactory().getTableConfiguration(tableId)
+    context = this.context.getServerConfFactory().getTableConfiguration(tableId)
         .get(Property.TABLE_CLASSPATH);
     Class<? extends TabletBalancer> clazz;
     if (context != null && !context.equals(""))
@@ -66,7 +66,7 @@ public class TableLoadBalancer extends TabletBalancer {
     if (tableState == null)
       return null;
     if (tableState.equals(TableState.ONLINE))
-      return this.context.getServerConfigurationFactory().getTableConfiguration(table)
+      return this.context.getServerConfFactory().getTableConfiguration(table)
           .get(Property.TABLE_LOAD_BALANCER);
     return null;
   }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TabletBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TabletBalancer.java
index 8bd85db..e1bdc81 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TabletBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TabletBalancer.java
@@ -33,8 +33,7 @@ import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
 import org.apache.accumulo.core.trace.Tracer;
-import org.apache.accumulo.server.AccumuloServerContext;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.state.TabletMigration;
@@ -60,17 +59,17 @@ public abstract class TabletBalancer {
 
   private static final Logger log = LoggerFactory.getLogger(TabletBalancer.class);
 
-  protected AccumuloServerContext context;
+  protected ServerContext context;
 
   /**
    * Initialize the TabletBalancer. This gives the balancer the opportunity to read the
    * configuration.
    *
-   * @deprecated since 2.0.0; use {@link #init(AccumuloServerContext)} instead.
+   * @deprecated since 2.0.0; use {@link #init(ServerContext)} instead.
    */
   @Deprecated
   public void init(ServerConfigurationFactory conf) {
-    init(new AccumuloServerContext(HdfsZooInstance.getInstance(), conf));
+    init(ServerContext.getInstance());
   }
 
   /**
@@ -79,7 +78,7 @@ public abstract class TabletBalancer {
    *
    * @since 2.0.0
    */
-  public void init(AccumuloServerContext context) {
+  public void init(ServerContext context) {
     this.context = context;
   }
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
index 1b83189..de9170d 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
@@ -29,7 +29,7 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.hadoop.fs.Path;
 
 public class MetaDataStateStore extends TabletStateStore {
@@ -52,11 +52,11 @@ public class MetaDataStateStore extends TabletStateStore {
     this(context, state, MetadataTable.NAME);
   }
 
-  protected MetaDataStateStore(AccumuloServerContext context, String tableName) {
+  protected MetaDataStateStore(ServerContext context, String tableName) {
     this(context, null, tableName);
   }
 
-  public MetaDataStateStore(AccumuloServerContext context) {
+  public MetaDataStateStore(ServerContext context) {
     this(context, MetadataTable.NAME);
   }
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/RootTabletStateStore.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/RootTabletStateStore.java
index c4a1bb4..e8e4863 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/RootTabletStateStore.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/RootTabletStateStore.java
@@ -19,7 +19,7 @@ package org.apache.accumulo.server.master.state;
 import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 
 public class RootTabletStateStore extends MetaDataStateStore {
 
@@ -27,7 +27,7 @@ public class RootTabletStateStore extends MetaDataStateStore {
     super(context, state, RootTable.NAME);
   }
 
-  public RootTabletStateStore(AccumuloServerContext context) {
+  public RootTabletStateStore(ServerContext context) {
     super(context, RootTable.NAME);
   }
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java
index b08191c..a32f03f 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java
@@ -22,7 +22,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.hadoop.fs.Path;
 
 /**
@@ -81,27 +81,27 @@ public abstract class TabletStateStore implements Iterable<TabletLocationState>
   abstract public void unsuspend(Collection<TabletLocationState> tablets)
       throws DistributedStoreException;
 
-  public static void unassign(AccumuloServerContext context, TabletLocationState tls,
+  public static void unassign(ServerContext context, TabletLocationState tls,
       Map<TServerInstance,List<Path>> logsForDeadServers) throws DistributedStoreException {
     getStoreForTablet(tls.extent, context).unassign(Collections.singletonList(tls),
         logsForDeadServers);
   }
 
-  public static void suspend(AccumuloServerContext context, TabletLocationState tls,
+  public static void suspend(ServerContext context, TabletLocationState tls,
       Map<TServerInstance,List<Path>> logsForDeadServers, long suspensionTimestamp)
       throws DistributedStoreException {
     getStoreForTablet(tls.extent, context).suspend(Collections.singletonList(tls),
         logsForDeadServers, suspensionTimestamp);
   }
 
-  public static void setLocation(AccumuloServerContext context, Assignment assignment)
+  public static void setLocation(ServerContext context, Assignment assignment)
       throws DistributedStoreException {
     getStoreForTablet(assignment.tablet, context)
         .setLocations(Collections.singletonList(assignment));
   }
 
-  protected static TabletStateStore getStoreForTablet(KeyExtent extent,
-      AccumuloServerContext context) throws DistributedStoreException {
+  protected static TabletStateStore getStoreForTablet(KeyExtent extent, ServerContext context)
+      throws DistributedStoreException {
     if (extent.isRootTablet()) {
       return new ZooTabletStateStore();
     } else if (extent.isMeta()) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooStore.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooStore.java
index ee1fd90..908b097 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooStore.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooStore.java
@@ -21,11 +21,10 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.ZooCache;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.slf4j.Logger;
@@ -46,7 +45,7 @@ public class ZooStore implements DistributedStore {
   }
 
   public ZooStore() throws IOException {
-    this(ZooUtil.getRoot(HdfsZooInstance.getInstance().getInstanceID()));
+    this(ServerContext.getInstance().getZooKeeperRoot());
   }
 
   @Override
diff --git a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
index 3524bc8..0955023 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
@@ -28,17 +28,14 @@ import java.net.UnknownHostException;
 import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.Encoding;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.AccumuloServerContext;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.hadoop.io.Text;
@@ -140,38 +137,39 @@ public class ProblemReport {
     }
   }
 
-  void removeFromMetadataTable(AccumuloServerContext context) throws Exception {
+  void removeFromMetadataTable(ServerContext context) throws Exception {
     Mutation m = new Mutation(new Text("~err_" + tableId));
     m.putDelete(new Text(problemType.name()), new Text(resource));
     MetadataTableUtil.getMetadataTable(context).update(m);
   }
 
-  void saveToMetadataTable(AccumuloServerContext context) throws Exception {
+  void saveToMetadataTable(ServerContext context) throws Exception {
     Mutation m = new Mutation(new Text("~err_" + tableId));
     m.put(new Text(problemType.name()), new Text(resource), new Value(encode()));
     MetadataTableUtil.getMetadataTable(context).update(m);
   }
 
   void removeFromZooKeeper() throws Exception {
-    removeFromZooKeeper(ZooReaderWriter.getInstance(), HdfsZooInstance.getInstance());
+    removeFromZooKeeper(ZooReaderWriter.getInstance(), ServerContext.getInstance());
   }
 
-  void removeFromZooKeeper(ZooReaderWriter zoorw, Instance instance)
+  void removeFromZooKeeper(ZooReaderWriter zoorw, ServerContext context)
       throws IOException, KeeperException, InterruptedException {
-    String zpath = getZPath(instance);
+    String zpath = getZPath(context.getZooKeeperRoot());
     zoorw.recursiveDelete(zpath, NodeMissingPolicy.SKIP);
   }
 
   void saveToZooKeeper() throws Exception {
-    saveToZooKeeper(ZooReaderWriter.getInstance(), HdfsZooInstance.getInstance());
+    saveToZooKeeper(ZooReaderWriter.getInstance(), ServerContext.getInstance());
   }
 
-  void saveToZooKeeper(ZooReaderWriter zoorw, Instance instance)
+  void saveToZooKeeper(ZooReaderWriter zoorw, ServerContext context)
       throws IOException, KeeperException, InterruptedException {
-    zoorw.putPersistentData(getZPath(instance), encode(), NodeExistsPolicy.OVERWRITE);
+    zoorw.putPersistentData(getZPath(context.getZooKeeperRoot()), encode(),
+        NodeExistsPolicy.OVERWRITE);
   }
 
-  private String getZPath(Instance instance) throws IOException {
+  private String getZPath(String zkRoot) throws IOException {
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     DataOutputStream dos = new DataOutputStream(baos);
     dos.writeUTF(getTableId().canonicalID());
@@ -180,16 +178,16 @@ public class ProblemReport {
     dos.close();
     baos.close();
 
-    return ZooUtil.getRoot(instance) + Constants.ZPROBLEMS + "/"
+    return zkRoot + Constants.ZPROBLEMS + "/"
         + Encoding.encodeAsBase64FileName(new Text(baos.toByteArray()));
   }
 
   static ProblemReport decodeZooKeeperEntry(String node) throws Exception {
-    return decodeZooKeeperEntry(node, ZooReaderWriter.getInstance(), HdfsZooInstance.getInstance());
+    return decodeZooKeeperEntry(node, ZooReaderWriter.getInstance(), ServerContext.getInstance());
   }
 
-  static ProblemReport decodeZooKeeperEntry(String node, ZooReaderWriter zoorw, Instance instance)
-      throws IOException, KeeperException, InterruptedException {
+  static ProblemReport decodeZooKeeperEntry(String node, ZooReaderWriter zoorw,
+      ServerContext context) throws IOException, KeeperException, InterruptedException {
     byte bytes[] = Encoding.decodeBase64FileName(node);
 
     ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
@@ -199,7 +197,7 @@ public class ProblemReport {
     String problemType = dis.readUTF();
     String resource = dis.readUTF();
 
-    String zpath = ZooUtil.getRoot(instance) + Constants.ZPROBLEMS + "/" + node;
+    String zpath = context.getZooKeeperRoot() + Constants.ZPROBLEMS + "/" + node;
     byte[] enc = zoorw.getData(zpath, null);
 
     return new ProblemReport(tableId, ProblemType.valueOf(problemType), resource, enc);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReportingIterator.java b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReportingIterator.java
index 36faf0e..c94de09 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReportingIterator.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReportingIterator.java
@@ -29,7 +29,7 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.system.InterruptibleIterator;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 
 public class ProblemReportingIterator implements InterruptibleIterator {
   private final SortedKeyValueIterator<Key,Value> source;
@@ -37,9 +37,9 @@ public class ProblemReportingIterator implements InterruptibleIterator {
   private final boolean continueOnError;
   private String resource;
   private Table.ID tableId;
-  private final AccumuloServerContext context;
+  private final ServerContext context;
 
-  public ProblemReportingIterator(AccumuloServerContext context, Table.ID tableId, String resource,
+  public ProblemReportingIterator(ServerContext context, Table.ID tableId, String resource,
       boolean continueOnError, SortedKeyValueIterator<Key,Value> source) {
     this.context = context;
     this.tableId = tableId;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
index c691188..c0af4d4 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
@@ -32,7 +32,6 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.impl.Table;
@@ -48,9 +47,7 @@ import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.util.LoggingRunnable;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.server.AccumuloServerContext;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.commons.collections.map.LRUMap;
@@ -74,9 +71,9 @@ public class ProblemReports implements Iterable<ProblemReport> {
   private ExecutorService reportExecutor = new ThreadPoolExecutor(0, 1, 60, TimeUnit.SECONDS,
       new LinkedBlockingQueue<>(500), new NamingThreadFactory("acu-problem-reporter"));
 
-  private final AccumuloServerContext context;
+  private final ServerContext context;
 
-  public ProblemReports(AccumuloServerContext context) {
+  public ProblemReports(ServerContext context) {
     this.context = context;
   }
 
@@ -296,7 +293,7 @@ public class ProblemReports implements Iterable<ProblemReport> {
     return iterator(null);
   }
 
-  public static synchronized ProblemReports getInstance(AccumuloServerContext context) {
+  public static synchronized ProblemReports getInstance(ServerContext context) {
     if (instance == null) {
       instance = new ProblemReports(context);
     }
@@ -305,9 +302,8 @@ public class ProblemReports implements Iterable<ProblemReport> {
   }
 
   public static void main(String args[]) throws Exception {
-    Instance instance = HdfsZooInstance.getInstance();
-    getInstance(new AccumuloServerContext(instance, new ServerConfigurationFactory(instance)))
-        .printProblems();
+    ServerContext context = ServerContext.getInstance();
+    getInstance(context).printProblems();
   }
 
   public Map<Table.ID,Map<ProblemType,Integer>> summarize() {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java
index 82a6ab0..41bf351 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java
@@ -47,7 +47,7 @@ import org.apache.accumulo.core.replication.ReplicationTable;
 import org.apache.accumulo.core.replication.ReplicationTableOfflineException;
 import org.apache.accumulo.core.replication.ReplicationTarget;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.replication.proto.Replication.Status;
 import org.apache.accumulo.server.zookeeper.ZooCache;
@@ -63,16 +63,15 @@ public class ReplicationUtil {
   private static final Logger log = LoggerFactory.getLogger(ReplicationUtil.class);
   public static final String STATUS_FORMATTER_CLASS_NAME = StatusFormatter.class.getName();
 
-  private final AccumuloServerContext context;
+  private final ServerContext context;
   private final ZooCache zooCache;
   private final ReplicaSystemFactory factory;
 
-  public ReplicationUtil(AccumuloServerContext context) {
+  public ReplicationUtil(ServerContext context) {
     this(context, new ZooCache(), new ReplicaSystemFactory());
   }
 
-  public ReplicationUtil(AccumuloServerContext context, ZooCache cache,
-      ReplicaSystemFactory factory) {
+  public ReplicationUtil(ServerContext context, ZooCache cache, ReplicaSystemFactory factory) {
     this.zooCache = cache;
     this.context = context;
     this.factory = factory;
@@ -138,8 +137,7 @@ public class ReplicationUtil {
         continue;
       }
 
-      TableConfiguration tableConf = context.getServerConfigurationFactory()
-          .getTableConfiguration(localId);
+      TableConfiguration tableConf = context.getServerConfFactory().getTableConfiguration(localId);
       if (null == tableConf) {
         log.trace("Could not get configuration for table {} (it no longer exists)", table);
         continue;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java b/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
index c8cba68..7e3f8aa 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
@@ -41,7 +41,7 @@ import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.core.util.HostAndPort;
 import org.apache.accumulo.core.util.SimpleThreadPool;
 import org.apache.accumulo.fate.util.LoggingRunnable;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.util.Halt;
 import org.apache.accumulo.server.util.time.SimpleTimer;
 import org.apache.hadoop.security.SaslRpcServer;
@@ -114,7 +114,7 @@ public class TServerUtils {
    * @throws UnknownHostException
    *           when we don't know our own address
    */
-  public static ServerAddress startServer(AccumuloServerContext service, String hostname,
+  public static ServerAddress startServer(ServerContext service, String hostname,
       Property portHintProperty, TProcessor processor, String serverName, String threadName,
       Property portSearchProperty, Property minThreadProperty,
       Property timeBetweenThreadChecksProperty, Property maxMessageSizeProperty)
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java b/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
index a98212c..f4d0bfb 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
@@ -45,7 +45,7 @@ import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ByteBufferUtil;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.rpc.TServerUtils;
 import org.apache.accumulo.server.security.handler.Authenticator;
 import org.apache.accumulo.server.security.handler.Authorizor;
@@ -59,16 +59,16 @@ public class AuditedSecurityOperation extends SecurityOperation {
   public static final String AUDITLOG = "org.apache.accumulo.audit";
   public static final Logger audit = LoggerFactory.getLogger(AUDITLOG);
 
-  public AuditedSecurityOperation(AccumuloServerContext context, Authorizor author,
-      Authenticator authent, PermissionHandler pm) {
+  public AuditedSecurityOperation(ServerContext context, Authorizor author, Authenticator authent,
+      PermissionHandler pm) {
     super(context, author, authent, pm);
   }
 
-  public static synchronized SecurityOperation getInstance(AccumuloServerContext context) {
+  public static synchronized SecurityOperation getInstance(ServerContext context) {
     return getInstance(context, false);
   }
 
-  public static synchronized SecurityOperation getInstance(AccumuloServerContext context,
+  public static synchronized SecurityOperation getInstance(ServerContext context,
       boolean initialize) {
     if (instance == null) {
       String instanceId = context.getInstanceID();
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java b/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
index 92b55da..7679457 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
@@ -49,7 +49,7 @@ import org.apache.accumulo.core.security.NamespacePermission;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.security.handler.Authenticator;
 import org.apache.accumulo.server.security.handler.Authorizor;
 import org.apache.accumulo.server.security.handler.KerberosAuthenticator;
@@ -76,11 +76,11 @@ public class SecurityOperation {
   private final ZooCache zooCache;
   private final String ZKUserPath;
 
-  protected final AccumuloServerContext context;
+  protected final ServerContext context;
 
   static SecurityOperation instance;
 
-  public static synchronized SecurityOperation getInstance(AccumuloServerContext context,
+  public static synchronized SecurityOperation getInstance(ServerContext context,
       boolean initialize) {
     if (instance == null) {
       String instanceId = context.getInstanceID();
@@ -116,13 +116,13 @@ public class SecurityOperation {
     return toRet;
   }
 
-  protected SecurityOperation(AccumuloServerContext context) {
+  protected SecurityOperation(ServerContext context) {
     this.context = context;
     ZKUserPath = Constants.ZROOT + "/" + context.getInstanceID() + "/users";
     zooCache = new ZooCache();
   }
 
-  public SecurityOperation(AccumuloServerContext context, Authorizor author, Authenticator authent,
+  public SecurityOperation(ServerContext context, Authorizor author, Authenticator authent,
       PermissionHandler pm) {
     this(context);
     authorizor = author;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java b/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
index 29d66ed..31a4bad 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/SystemCredentials.java
@@ -27,7 +27,6 @@ import java.util.Base64;
 import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Credentials;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
@@ -49,12 +48,12 @@ public final class SystemCredentials extends Credentials {
 
   private final TCredentials AS_THRIFT;
 
-  public SystemCredentials(Instance instance, String principal, AuthenticationToken token) {
+  public SystemCredentials(String instanceID, String principal, AuthenticationToken token) {
     super(principal, token);
-    AS_THRIFT = super.toThrift(instance);
+    AS_THRIFT = super.toThrift(instanceID);
   }
 
-  public static SystemCredentials get(Instance instance) {
+  public static SystemCredentials get(String instanceID) {
     String principal = SYSTEM_PRINCIPAL;
     AccumuloConfiguration conf = SiteConfiguration.getInstance();
     if (conf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
@@ -66,14 +65,14 @@ public final class SystemCredentials extends Credentials {
       // principal
       principal = SecurityUtil.getServerPrincipal(conf.get(Property.GENERAL_KERBEROS_PRINCIPAL));
     }
-    return new SystemCredentials(instance, principal, SystemToken.get(instance));
+    return new SystemCredentials(instanceID, principal, SystemToken.get(instanceID));
   }
 
   @Override
-  public TCredentials toThrift(Instance instance) {
-    if (!AS_THRIFT.getInstanceId().equals(instance.getInstanceID()))
+  public TCredentials toThrift(String instanceID) {
+    if (!AS_THRIFT.getInstanceId().equals(instanceID))
       throw new IllegalArgumentException("Unexpected instance used for "
-          + SystemCredentials.class.getSimpleName() + ": " + instance.getInstanceID());
+          + SystemCredentials.class.getSimpleName() + ": " + instanceID);
     return AS_THRIFT;
   }
 
@@ -93,8 +92,8 @@ public final class SystemCredentials extends Credentials {
       super(systemPassword);
     }
 
-    private static SystemToken get(Instance instance) {
-      byte[] instanceIdBytes = instance.getInstanceID().getBytes(UTF_8);
+    private static SystemToken get(String instanceID) {
+      byte[] instanceIdBytes = instanceID.getBytes(UTF_8);
       byte[] confChecksum;
       MessageDigest md;
       try {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenSecretManager.java b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenSecretManager.java
index f5048b0..f5e48ea 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenSecretManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenSecretManager.java
@@ -30,7 +30,6 @@ import java.util.concurrent.TimeUnit;
 import javax.crypto.SecretKey;
 
 import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
 import org.apache.accumulo.core.client.impl.AuthenticationTokenIdentifier;
 import org.apache.accumulo.core.client.impl.DelegationTokenImpl;
@@ -58,7 +57,7 @@ public class AuthenticationTokenSecretManager extends SecretManager<Authenticati
 
   private static final Logger log = LoggerFactory.getLogger(AuthenticationTokenSecretManager.class);
 
-  private final Instance instance;
+  private final String instanceID;
   private final long tokenMaxLifetime;
   private final ConcurrentHashMap<Integer,AuthenticationKey> allKeys = new ConcurrentHashMap<>();
   private AuthenticationKey currentKey;
@@ -66,15 +65,15 @@ public class AuthenticationTokenSecretManager extends SecretManager<Authenticati
   /**
    * Create a new secret manager instance for generating keys.
    *
-   * @param instance
-   *          Accumulo instance
+   * @param instanceID
+   *          Accumulo instance ID
    * @param tokenMaxLifetime
    *          Maximum age (in milliseconds) before a token expires and is no longer valid
    */
-  public AuthenticationTokenSecretManager(Instance instance, long tokenMaxLifetime) {
-    requireNonNull(instance);
+  public AuthenticationTokenSecretManager(String instanceID, long tokenMaxLifetime) {
+    requireNonNull(instanceID);
     checkArgument(tokenMaxLifetime > 0, "Max lifetime must be positive");
-    this.instance = instance;
+    this.instanceID = instanceID;
     this.tokenMaxLifetime = tokenMaxLifetime;
   }
 
@@ -115,7 +114,7 @@ public class AuthenticationTokenSecretManager extends SecretManager<Authenticati
       }
     }
 
-    identifier.setInstanceId(instance.getInstanceID());
+    identifier.setInstanceId(instanceID);
     return createPassword(identifier.getBytes(), secretKey.getKey());
   }
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/tables/TableManager.java b/server/base/src/main/java/org/apache/accumulo/server/tables/TableManager.java
index 4a8c8d1..20d1358 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/tables/TableManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/tables/TableManager.java
@@ -26,7 +26,6 @@ import java.util.Map.Entry;
 import java.util.Set;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.NamespaceNotFoundException;
 import org.apache.accumulo.core.client.impl.Namespace;
 import org.apache.accumulo.core.client.impl.Table;
@@ -38,7 +37,7 @@ import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.util.TablePropUtil;
 import org.apache.accumulo.server.zookeeper.ZooCache;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
@@ -60,7 +59,8 @@ public class TableManager {
 
   private static TableManager tableManager = null;
 
-  private final Instance instance;
+  private final String zkRoot;
+  private final String instanceID;
   private ZooCache zooStateCache;
 
   public static void prepareNewNamespaceState(String instanceId, Namespace.ID namespaceId,
@@ -106,7 +106,9 @@ public class TableManager {
   }
 
   private TableManager() {
-    instance = HdfsZooInstance.getInstance();
+    ServerContext context = ServerContext.getInstance();
+    zkRoot = context.getZooKeeperRoot();
+    instanceID = context.getInstanceID();
     zooStateCache = new ZooCache(new TableStateWatcher());
     updateTableStateCache();
   }
@@ -154,8 +156,7 @@ public class TableManager {
   }
 
   public synchronized void transitionTableState(final Table.ID tableId, final TableState newState) {
-    String statePath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZTABLES + "/"
-        + tableId + Constants.ZTABLE_STATE;
+    String statePath = zkRoot + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_STATE;
 
     try {
       ZooReaderWriter.getInstance().mutate(statePath, newState.name().getBytes(UTF_8),
@@ -199,10 +200,9 @@ public class TableManager {
 
   private void updateTableStateCache() {
     synchronized (tableStateCache) {
-      for (String tableId : zooStateCache
-          .getChildren(ZooUtil.getRoot(instance) + Constants.ZTABLES))
-        if (zooStateCache.get(ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId
-            + Constants.ZTABLE_STATE) != null)
+      for (String tableId : zooStateCache.getChildren(zkRoot + Constants.ZTABLES))
+        if (zooStateCache
+            .get(zkRoot + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_STATE) != null)
           updateTableStateCache(Table.ID.of(tableId));
     }
   }
@@ -210,8 +210,8 @@ public class TableManager {
   public TableState updateTableStateCache(Table.ID tableId) {
     synchronized (tableStateCache) {
       TableState tState = TableState.UNKNOWN;
-      byte[] data = zooStateCache.get(
-          ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_STATE);
+      byte[] data = zooStateCache
+          .get(zkRoot + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_STATE);
       if (data != null) {
         String sState = new String(data, UTF_8);
         try {
@@ -228,21 +228,19 @@ public class TableManager {
   public void addTable(Table.ID tableId, Namespace.ID namespaceId, String tableName,
       NodeExistsPolicy existsPolicy)
       throws KeeperException, InterruptedException, NamespaceNotFoundException {
-    prepareNewTableState(instance.getInstanceID(), tableId, namespaceId, tableName, TableState.NEW,
-        existsPolicy);
+    prepareNewTableState(instanceID, tableId, namespaceId, tableName, TableState.NEW, existsPolicy);
     updateTableStateCache(tableId);
   }
 
   public void cloneTable(Table.ID srcTableId, Table.ID tableId, String tableName,
       Namespace.ID namespaceId, Map<String,String> propertiesToSet, Set<String> propertiesToExclude,
       NodeExistsPolicy existsPolicy) throws KeeperException, InterruptedException {
-    prepareNewTableState(instance.getInstanceID(), tableId, namespaceId, tableName, TableState.NEW,
-        existsPolicy);
+    prepareNewTableState(instanceID, tableId, namespaceId, tableName, TableState.NEW, existsPolicy);
 
-    String srcTablePath = Constants.ZROOT + "/" + instance.getInstanceID() + Constants.ZTABLES + "/"
-        + srcTableId + Constants.ZTABLE_CONF;
-    String newTablePath = Constants.ZROOT + "/" + instance.getInstanceID() + Constants.ZTABLES + "/"
-        + tableId + Constants.ZTABLE_CONF;
+    String srcTablePath = Constants.ZROOT + "/" + instanceID + Constants.ZTABLES + "/" + srcTableId
+        + Constants.ZTABLE_CONF;
+    String newTablePath = Constants.ZROOT + "/" + instanceID + Constants.ZTABLES + "/" + tableId
+        + Constants.ZTABLE_CONF;
     ZooReaderWriter.getInstance().recursiveCopyPersistent(srcTablePath, newTablePath,
         NodeExistsPolicy.OVERWRITE);
 
@@ -250,7 +248,7 @@ public class TableManager {
       TablePropUtil.setTableProperty(tableId, entry.getKey(), entry.getValue());
 
     for (String prop : propertiesToExclude)
-      ZooReaderWriter.getInstance().recursiveDelete(Constants.ZROOT + "/" + instance.getInstanceID()
+      ZooReaderWriter.getInstance().recursiveDelete(Constants.ZROOT + "/" + instanceID
           + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_CONF + "/" + prop,
           NodeMissingPolicy.SKIP);
 
@@ -261,10 +259,10 @@ public class TableManager {
     synchronized (tableStateCache) {
       tableStateCache.remove(tableId);
       ZooReaderWriter.getInstance().recursiveDelete(
-          ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_STATE,
+          zkRoot + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_STATE,
+          NodeMissingPolicy.SKIP);
+      ZooReaderWriter.getInstance().recursiveDelete(zkRoot + Constants.ZTABLES + "/" + tableId,
           NodeMissingPolicy.SKIP);
-      ZooReaderWriter.getInstance().recursiveDelete(
-          ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId, NodeMissingPolicy.SKIP);
     }
   }
 
@@ -286,7 +284,7 @@ public class TableManager {
       final String zPath = event.getPath();
       final EventType zType = event.getType();
 
-      String tablesPrefix = ZooUtil.getRoot(instance) + Constants.ZTABLES;
+      String tablesPrefix = zkRoot + Constants.ZTABLES;
       Table.ID tableId = null;
 
       if (zPath != null && zPath.startsWith(tablesPrefix + "/")) {
@@ -352,8 +350,7 @@ public class TableManager {
   public void removeNamespace(Namespace.ID namespaceId)
       throws KeeperException, InterruptedException {
     ZooReaderWriter.getInstance().recursiveDelete(
-        ZooUtil.getRoot(instance) + Constants.ZNAMESPACES + "/" + namespaceId,
-        NodeMissingPolicy.SKIP);
+        zkRoot + Constants.ZNAMESPACES + "/" + namespaceId, NodeMissingPolicy.SKIP);
   }
 
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/tablets/UniqueNameAllocator.java b/server/base/src/main/java/org/apache/accumulo/server/tablets/UniqueNameAllocator.java
index 4559a0d..72b4292 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/tablets/UniqueNameAllocator.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/tablets/UniqueNameAllocator.java
@@ -23,7 +23,7 @@ import java.util.Random;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.util.FastFormat;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 
 /**
@@ -39,7 +39,7 @@ public class UniqueNameAllocator {
   private Random rand;
 
   private UniqueNameAllocator() {
-    nextNamePath = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID()
+    nextNamePath = Constants.ZROOT + "/" + ServerContext.getInstance().getInstanceID()
         + Constants.ZNEXT_FILE;
     rand = new Random();
   }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java b/server/base/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java
index 8fd16d1..ec3c023 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java
@@ -22,12 +22,10 @@ import java.util.List;
 import java.util.Map.Entry;
 import java.util.TreeMap;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -150,7 +148,7 @@ public class LargestFirstMemoryManager implements MemoryManager {
     return mincIdleThresholds.get(tableId);
   }
 
-  protected boolean tableExists(Instance instance, Table.ID tableId) {
+  protected boolean tableExists(Table.ID tableId) {
     // make sure that the table still exists by checking if it has a configuration
     return config.getTableConfiguration(tableId) != null;
   }
@@ -161,7 +159,6 @@ public class LargestFirstMemoryManager implements MemoryManager {
       throw new IllegalStateException(
           "need to initialize " + LargestFirstMemoryManager.class.getName());
 
-    final Instance instance = HdfsZooInstance.getInstance();
     final int maxMinCs = maxConcurrentMincs * numWaitingMultiplier;
 
     mincIdleThresholds.clear();
@@ -179,7 +176,7 @@ public class LargestFirstMemoryManager implements MemoryManager {
     // find the largest and most idle tablets
     for (TabletState ts : tablets) {
       // Make sure that the table still exists
-      if (!tableExists(instance, ts.getExtent().getTableId())) {
+      if (!tableExists(ts.getExtent().getTableId())) {
         log.trace("Ignoring extent for deleted table: {}", ts.getExtent());
         continue;
       }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
index 521d2df..61c05f1 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
@@ -37,7 +37,6 @@ import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.NamespaceNotFoundException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.admin.InstanceOperations;
@@ -55,13 +54,10 @@ import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.trace.Tracer;
 import org.apache.accumulo.core.util.AddressUtil;
 import org.apache.accumulo.core.util.HostAndPort;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
 import org.apache.accumulo.fate.zookeeper.ZooLock;
-import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.security.SecurityUtil;
 import org.apache.accumulo.start.spi.KeywordExecutable;
 import org.apache.hadoop.conf.Configuration;
@@ -217,11 +213,8 @@ public class Admin implements KeywordExecutable {
       SecurityUtil.serverLogin(siteConf);
     }
 
-    Instance instance = opts.getInstance();
-    ServerConfigurationFactory confFactory = new ServerConfigurationFactory(instance);
-
     try {
-      ClientContext context = new AccumuloServerContext(instance, confFactory);
+      ClientContext context = opts.getServerContext();
 
       int rc = 0;
 
@@ -373,8 +366,7 @@ public class Admin implements KeywordExecutable {
       log.info("No masters running. Not attempting safe unload of tserver.");
       return;
     }
-    final Instance instance = context.getInstance();
-    final String zTServerRoot = getTServersZkPath(instance);
+    final String zTServerRoot = getTServersZkPath(context);
     final ZooCache zc = new ZooCacheFactory().getZooCache(context.getZooKeepers(),
         context.getZooKeepersSessionTimeOut());
     for (String server : servers) {
@@ -392,14 +384,13 @@ public class Admin implements KeywordExecutable {
   /**
    * Get the parent ZNode for tservers for the given instance
    *
-   * @param instance
-   *          The Instance
+   * @param context
+   *          ClientContext
    * @return The tservers znode for the instance
    */
-  static String getTServersZkPath(Instance instance) {
-    requireNonNull(instance);
-    final String instanceRoot = ZooUtil.getRoot(instance);
-    return instanceRoot + Constants.ZTSERVERS;
+  static String getTServersZkPath(ClientContext context) {
+    requireNonNull(context);
+    return context.getZooKeeperRoot() + Constants.ZTSERVERS;
   }
 
   /**
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ChangeSecret.java b/server/base/src/main/java/org/apache/accumulo/server/util/ChangeSecret.java
index 8089a7d..cb8c5b4 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ChangeSecret.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ChangeSecret.java
@@ -24,14 +24,13 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.UUID;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.volume.Volume;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
@@ -69,14 +68,14 @@ public class ChangeSecret {
     argsList.addAll(Arrays.asList(args));
     opts.parseArgs(ChangeSecret.class.getName(), argsList.toArray(new String[0]));
 
-    Instance inst = opts.getInstance();
-    verifyAccumuloIsDown(inst, opts.oldPass);
+    ServerContext context = opts.getServerContext();
+    verifyAccumuloIsDown(context, opts.oldPass);
 
     final String newInstanceId = UUID.randomUUID().toString();
-    updateHdfs(fs, inst, newInstanceId);
-    rewriteZooKeeperInstance(inst, newInstanceId, opts.oldPass, opts.newPass);
+    updateHdfs(fs, newInstanceId);
+    rewriteZooKeeperInstance(context, newInstanceId, opts.oldPass, opts.newPass);
     if (opts.oldPass != null) {
-      deleteInstance(inst, opts.oldPass);
+      deleteInstance(context, opts.oldPass);
     }
     System.out.println("New instance id is " + newInstanceId);
     System.out.println("Be sure to put your new secret in accumulo-site.xml");
@@ -97,10 +96,11 @@ public class ChangeSecret {
     }
   }
 
-  private static void verifyAccumuloIsDown(Instance inst, String oldPassword) throws Exception {
-    ZooReader zooReader = new ZooReaderWriter(inst.getZooKeepers(),
-        inst.getZooKeepersSessionTimeOut(), oldPassword);
-    String root = ZooUtil.getRoot(inst);
+  private static void verifyAccumuloIsDown(ServerContext context, String oldPassword)
+      throws Exception {
+    ZooReader zooReader = new ZooReaderWriter(context.getZooKeepers(),
+        context.getZooKeepersSessionTimeOut(), oldPassword);
+    String root = context.getZooKeeperRoot();
     final List<String> ephemerals = new ArrayList<>();
     recurse(zooReader, root, new Visitor() {
       @Override
@@ -119,18 +119,18 @@ public class ChangeSecret {
     }
   }
 
-  private static void rewriteZooKeeperInstance(final Instance inst, final String newInstanceId,
-      String oldPass, String newPass) throws Exception {
-    final ZooReaderWriter orig = new ZooReaderWriter(inst.getZooKeepers(),
-        inst.getZooKeepersSessionTimeOut(), oldPass);
-    final IZooReaderWriter new_ = new ZooReaderWriter(inst.getZooKeepers(),
-        inst.getZooKeepersSessionTimeOut(), newPass);
+  private static void rewriteZooKeeperInstance(final ServerContext context,
+      final String newInstanceId, String oldPass, String newPass) throws Exception {
+    final ZooReaderWriter orig = new ZooReaderWriter(context.getZooKeepers(),
+        context.getZooKeepersSessionTimeOut(), oldPass);
+    final IZooReaderWriter new_ = new ZooReaderWriter(context.getZooKeepers(),
+        context.getZooKeepersSessionTimeOut(), newPass);
 
-    String root = ZooUtil.getRoot(inst);
+    String root = context.getZooKeeperRoot();
     recurse(orig, root, new Visitor() {
       @Override
       public void visit(ZooReader zoo, String path) throws Exception {
-        String newPath = path.replace(inst.getInstanceID(), newInstanceId);
+        String newPath = path.replace(context.getInstanceID(), newInstanceId);
         byte[] data = zoo.getData(path, null);
         List<ACL> acls = orig.getZooKeeper().getACL(path, new Stat());
         if (acls.containsAll(Ids.READ_ACL_UNSAFE)) {
@@ -152,13 +152,12 @@ public class ChangeSecret {
         }
       }
     });
-    String path = "/accumulo/instances/" + inst.getInstanceName();
+    String path = "/accumulo/instances/" + context.getInstanceName();
     orig.recursiveDelete(path, NodeMissingPolicy.SKIP);
     new_.putPersistentData(path, newInstanceId.getBytes(UTF_8), NodeExistsPolicy.OVERWRITE);
   }
 
-  private static void updateHdfs(VolumeManager fs, Instance inst, String newInstanceId)
-      throws IOException {
+  private static void updateHdfs(VolumeManager fs, String newInstanceId) throws IOException {
     // Need to recreate the instanceId on all of them to keep consistency
     for (Volume v : fs.getVolumes()) {
       final Path instanceId = ServerConstants.getInstanceIdLocation(v);
@@ -204,9 +203,9 @@ public class ChangeSecret {
         stat.getPath(), stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
   }
 
-  private static void deleteInstance(Instance origInstance, String oldPass) throws Exception {
-    IZooReaderWriter orig = new ZooReaderWriter(origInstance.getZooKeepers(),
-        origInstance.getZooKeepersSessionTimeOut(), oldPass);
-    orig.recursiveDelete("/accumulo/" + origInstance.getInstanceID(), NodeMissingPolicy.SKIP);
+  private static void deleteInstance(ServerContext context, String oldPass) throws Exception {
+    IZooReaderWriter orig = new ZooReaderWriter(context.getZooKeepers(),
+        context.getZooKeepersSessionTimeOut(), oldPass);
+    orig.recursiveDelete("/accumulo/" + context.getInstanceID(), NodeMissingPolicy.SKIP);
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/CleanZookeeper.java b/server/base/src/main/java/org/apache/accumulo/server/util/CleanZookeeper.java
index 17b06cf..d6a31fc 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/CleanZookeeper.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/CleanZookeeper.java
@@ -18,13 +18,11 @@ package org.apache.accumulo.server.util;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 
-import java.io.IOException;
-
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -47,10 +45,8 @@ public class CleanZookeeper {
    * @param args
    *          must contain one element: the address of a zookeeper node a second parameter provides
    *          an additional authentication value
-   * @throws IOException
-   *           error connecting to accumulo or zookeeper
    */
-  public static void main(String[] args) throws IOException {
+  public static void main(String[] args) {
     Opts opts = new Opts();
     opts.parseArgs(CleanZookeeper.class.getName(), args);
 
@@ -60,14 +56,15 @@ public class CleanZookeeper {
       zk.getZooKeeper().addAuthInfo("digest", ("accumulo:" + opts.auth).getBytes(UTF_8));
     }
 
+    ServerContext context = ServerContext.getInstance();
+
     try {
       for (String child : zk.getChildren(root)) {
         if (Constants.ZINSTANCES.equals("/" + child)) {
           for (String instanceName : zk.getChildren(root + Constants.ZINSTANCES)) {
             String instanceNamePath = root + Constants.ZINSTANCES + "/" + instanceName;
             byte[] id = zk.getData(instanceNamePath, null);
-            if (id != null
-                && !new String(id, UTF_8).equals(HdfsZooInstance.getInstance().getInstanceID())) {
+            if (id != null && !new String(id, UTF_8).equals(context.getInstanceID())) {
               try {
                 zk.recursiveDelete(instanceNamePath, NodeMissingPolicy.SKIP);
               } catch (KeeperException.NoAuthException ex) {
@@ -75,7 +72,7 @@ public class CleanZookeeper {
               }
             }
           }
-        } else if (!child.equals(HdfsZooInstance.getInstance().getInstanceID())) {
+        } else if (!child.equals(context.getInstanceID())) {
           String path = root + "/" + child;
           try {
             zk.recursiveDelete(path, NodeMissingPolicy.SKIP);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java b/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
index 675d2ba..3bdc2ce 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
@@ -21,7 +21,6 @@ import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.Table;
@@ -32,9 +31,8 @@ import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.master.LiveTServerSet;
 import org.apache.accumulo.server.master.LiveTServerSet.Listener;
 import org.apache.accumulo.server.master.state.DistributedStoreException;
@@ -53,9 +51,7 @@ public class FindOfflineTablets {
   public static void main(String[] args) throws Exception {
     ClientOpts opts = new ClientOpts();
     opts.parseArgs(FindOfflineTablets.class.getName(), args);
-    Instance instance = opts.getInstance();
-    AccumuloServerContext context = new AccumuloServerContext(instance,
-        new ServerConfigurationFactory(opts.getInstance()));
+    ServerContext context = opts.getServerContext();
     findOffline(context, null);
   }
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/Info.java b/server/base/src/main/java/org/apache/accumulo/server/util/Info.java
index 0c856ef..e5c15c2 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/Info.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/Info.java
@@ -16,9 +16,8 @@
  */
 package org.apache.accumulo.server.util;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.util.MonitorUtil;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.start.spi.KeywordExecutable;
 import org.apache.zookeeper.KeeperException;
 
@@ -44,10 +43,10 @@ public class Info implements KeywordExecutable {
 
   @Override
   public void execute(final String[] args) throws KeeperException, InterruptedException {
-    Instance instance = HdfsZooInstance.getInstance();
-    System.out.println("monitor: " + MonitorUtil.getLocation(instance));
-    System.out.println("masters: " + instance.getMasterLocations());
-    System.out.println("zookeepers: " + instance.getZooKeepers());
+    ServerContext context = ServerContext.getInstance();
+    System.out.println("monitor: " + MonitorUtil.getLocation(context));
+    System.out.println("masters: " + context.getMasterLocations());
+    System.out.println("zookeepers: " + context.getZooKeepers());
   }
 
   public static void main(String[] args) throws Exception {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java b/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
index 0f5bf5f..3dafd0c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
@@ -20,7 +20,6 @@ import java.util.ArrayList;
 import java.util.Map.Entry;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.data.Key;
@@ -30,9 +29,7 @@ import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
-import org.apache.accumulo.server.AccumuloServerContext;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager.FileType;
 import org.apache.accumulo.server.log.WalStateManager;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
@@ -41,8 +38,7 @@ import org.apache.hadoop.fs.Path;
 public class ListVolumesUsed {
 
   public static void main(String[] args) throws Exception {
-    Instance instance = HdfsZooInstance.getInstance();
-    listVolumes(new AccumuloServerContext(instance, new ServerConfigurationFactory(instance)));
+    listVolumes(ServerContext.getInstance());
   }
 
   private static String getTableURI(String rootTabletDir) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/LoginProperties.java b/server/base/src/main/java/org/apache/accumulo/server/util/LoginProperties.java
index 34d7113..01be214 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/LoginProperties.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/LoginProperties.java
@@ -24,8 +24,7 @@ import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.TokenProperty;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.security.handler.Authenticator;
 import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
 import org.apache.accumulo.start.spi.KeywordExecutable;
@@ -47,7 +46,7 @@ public class LoginProperties implements KeywordExecutable {
 
   @Override
   public void execute(String[] args) throws Exception {
-    AccumuloConfiguration config = new ServerConfigurationFactory(HdfsZooInstance.getInstance())
+    AccumuloConfiguration config = ServerContext.getInstance().getServerConfFactory()
         .getSystemConfiguration();
     Authenticator authenticator = AccumuloVFSClassLoader.getClassLoader()
         .loadClass(config.get(Property.INSTANCE_SECURITY_AUTHENTICATOR))
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
index efaf6f0..e47a9e1 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -80,13 +80,11 @@ import org.apache.accumulo.core.tabletserver.thrift.ConstraintViolationException
 import org.apache.accumulo.core.util.ColumnFQ;
 import org.apache.accumulo.core.util.FastFormat;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
 import org.apache.accumulo.server.fs.VolumeManager;
@@ -138,7 +136,7 @@ public class MetadataTableUtil {
 
   public static void putLockID(ZooLock zooLock, Mutation m) {
     TabletsSection.ServerColumnFamily.LOCK_COLUMN.put(m, new Value(zooLock.getLockID()
-        .serialize(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + "/").getBytes(UTF_8)));
+        .serialize(ServerContext.getInstance().getZooKeeperRoot() + "/").getBytes(UTF_8)));
   }
 
   private static void update(ClientContext context, Mutation m, KeyExtent extent) {
@@ -223,7 +221,7 @@ public class MetadataTableUtil {
   public static void updateTabletVolumes(KeyExtent extent, List<LogEntry> logsToRemove,
       List<LogEntry> logsToAdd, List<FileRef> filesToRemove,
       SortedMap<FileRef,DataFileValue> filesToAdd, String newDir, ZooLock zooLock,
-      AccumuloServerContext context) {
+      ServerContext context) {
 
     if (extent.isRootTablet()) {
       if (newDir != null)
@@ -280,7 +278,7 @@ public class MetadataTableUtil {
     }
   }
 
-  private static void addRootLogEntry(AccumuloServerContext context, ZooLock zooLock,
+  private static void addRootLogEntry(ServerContext context, ZooLock zooLock,
       final LogEntry entry) {
     retryZooKeeperUpdate(context, zooLock, new ZooOperation() {
       @Override
@@ -375,7 +373,7 @@ public class MetadataTableUtil {
     }
   }
 
-  public static void addDeleteEntry(AccumuloServerContext context, Table.ID tableId, String path)
+  public static void addDeleteEntry(ServerContext context, Table.ID tableId, String path)
       throws IOException {
     update(context, createDeleteMutation(tableId, path), new KeyExtent(tableId, null, null));
   }
@@ -506,12 +504,12 @@ public class MetadataTableUtil {
   }
 
   static String getZookeeperLogLocation() {
-    return ZooUtil.getRoot(HdfsZooInstance.getInstance()) + RootTable.ZROOT_TABLET_WALOGS;
+    return ServerContext.getInstance().getZooKeeperRoot() + RootTable.ZROOT_TABLET_WALOGS;
   }
 
   public static void setRootTabletDir(String dir) throws IOException {
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-    String zpath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + RootTable.ZROOT_TABLET_PATH;
+    String zpath = ServerContext.getInstance().getZooKeeperRoot() + RootTable.ZROOT_TABLET_PATH;
     try {
       zoo.putPersistentData(zpath, dir.getBytes(UTF_8), -1, NodeExistsPolicy.OVERWRITE);
     } catch (KeeperException e) {
@@ -524,7 +522,7 @@ public class MetadataTableUtil {
 
   public static String getRootTabletDir() throws IOException {
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-    String zpath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + RootTable.ZROOT_TABLET_PATH;
+    String zpath = ServerContext.getInstance().getZooKeeperRoot() + RootTable.ZROOT_TABLET_PATH;
     try {
       return new String(zoo.getData(zpath, null), UTF_8);
     } catch (KeeperException e) {
@@ -693,7 +691,7 @@ public class MetadataTableUtil {
     return new LogEntryIterator(context);
   }
 
-  public static void removeUnusedWALEntries(AccumuloServerContext context, KeyExtent extent,
+  public static void removeUnusedWALEntries(ServerContext context, KeyExtent extent,
       final List<LogEntry> entries, ZooLock zooLock) {
     if (extent.isRootTablet()) {
       retryZooKeeperUpdate(context, zooLock, new ZooOperation() {
@@ -941,7 +939,7 @@ public class MetadataTableUtil {
     }
   }
 
-  public static void chopped(AccumuloServerContext context, KeyExtent extent, ZooLock zooLock) {
+  public static void chopped(ServerContext context, KeyExtent extent, ZooLock zooLock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
     ChoppedColumnFamily.CHOPPED_COLUMN.put(m, new Value("chopped".getBytes(UTF_8)));
     update(context, zooLock, m, extent);
@@ -1012,7 +1010,7 @@ public class MetadataTableUtil {
     return result;
   }
 
-  public static void addBulkLoadInProgressFlag(AccumuloServerContext context, String path) {
+  public static void addBulkLoadInProgressFlag(ServerContext context, String path) {
 
     Mutation m = new Mutation(MetadataSchema.BlipSection.getRowPrefix() + path);
     m.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {}));
@@ -1023,7 +1021,7 @@ public class MetadataTableUtil {
     update(context, m, new KeyExtent(Table.ID.of("anythingNotMetadata"), null, null));
   }
 
-  public static void removeBulkLoadInProgressFlag(AccumuloServerContext context, String path) {
+  public static void removeBulkLoadInProgressFlag(ServerContext context, String path) {
 
     Mutation m = new Mutation(MetadataSchema.BlipSection.getRowPrefix() + path);
     m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/NamespacePropUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/NamespacePropUtil.java
index 318dd91..6ef9d9b 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/NamespacePropUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/NamespacePropUtil.java
@@ -21,10 +21,9 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.impl.Namespace;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.zookeeper.KeeperException;
 
@@ -61,7 +60,7 @@ public class NamespacePropUtil {
   }
 
   private static String getPath(Namespace.ID namespaceId) {
-    return ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZNAMESPACES + "/"
+    return ServerContext.getInstance().getZooKeeperRoot() + Constants.ZNAMESPACES + "/"
         + namespaceId + Constants.ZNAMESPACE_CONF;
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
index 2df146f..af290b2 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
@@ -23,12 +23,10 @@ import java.io.IOException;
 import java.util.Map.Entry;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Table;
@@ -40,9 +38,9 @@ import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.SimpleThreadPool;
-import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.server.cli.ClientOnRequiredTable;
 import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
@@ -59,9 +57,7 @@ public class RandomizeVolumes {
     opts.parseArgs(RandomizeVolumes.class.getName(), args);
     Connector c;
     if (opts.getToken() == null) {
-      Instance instance = opts.getInstance();
-      AccumuloServerContext context = new AccumuloServerContext(instance,
-          new ServerConfigurationFactory(instance));
+      ServerContext context = opts.getServerContext();
       c = context.getConnector();
     } else {
       c = opts.getConnector();
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
index 252708d..1fdd4be 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
@@ -21,10 +21,9 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.conf.PropertyType;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -61,7 +60,7 @@ public class SystemPropUtil {
     }
 
     // create the zk node for this property and set it's data to the specified value
-    String zPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZCONFIG + "/"
+    String zPath = ServerContext.getInstance().getZooKeeperRoot() + Constants.ZCONFIG + "/"
         + property;
 
     return ZooReaderWriter.getInstance().putPersistentData(zPath, value.getBytes(UTF_8),
@@ -70,7 +69,7 @@ public class SystemPropUtil {
 
   public static void removeSystemProperty(String property)
       throws InterruptedException, KeeperException {
-    String zPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZCONFIG + "/"
+    String zPath = ServerContext.getInstance().getZooKeeperRoot() + Constants.ZCONFIG + "/"
         + property;
     ZooReaderWriter.getInstance().recursiveDelete(zPath, NodeMissingPolicy.FAIL);
   }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TablePropUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/TablePropUtil.java
index ae77327..37ec6b3 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/TablePropUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/TablePropUtil.java
@@ -21,10 +21,9 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.zookeeper.KeeperException;
 
@@ -60,7 +59,7 @@ public class TablePropUtil {
   }
 
   private static String getTablePath(Table.ID tableId) {
-    return ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZTABLES + "/"
+    return ServerContext.getInstance().getZooKeeperRoot() + Constants.ZTABLES + "/"
         + tableId.canonicalID() + Constants.ZTABLE_CONF;
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java b/server/base/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java
index f371bfb..772ab78 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java
@@ -22,11 +22,9 @@ import java.util.List;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.ZooLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 
@@ -43,12 +41,12 @@ public class TabletServerLocks {
 
   public static void main(String[] args) throws Exception {
 
-    Instance instance = HdfsZooInstance.getInstance();
-    String tserverPath = ZooUtil.getRoot(instance) + Constants.ZTSERVERS;
+    ServerContext context = ServerContext.getInstance();
+    String tserverPath = context.getZooKeeperRoot() + Constants.ZTSERVERS;
     Opts opts = new Opts();
     opts.parseArgs(TabletServerLocks.class.getName(), args);
 
-    ZooCache cache = new ZooCache(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut());
+    ZooCache cache = new ZooCache(context.getZooKeepers(), context.getZooKeepersSessionTimeOut());
 
     if (opts.list) {
       IZooReaderWriter zoo = ZooReaderWriter.getInstance();
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java b/server/base/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
index 4cedd15..1b1a107 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
@@ -17,9 +17,8 @@
 package org.apache.accumulo.server.util;
 
 import org.apache.accumulo.core.cli.Help;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.start.spi.KeywordExecutable;
 import org.apache.hadoop.fs.FileSystem;
@@ -69,13 +68,13 @@ public class ZooKeeperMain implements KeywordExecutable {
     String baseDir = ServerConstants.getBaseUris()[0];
     System.out.println("Using " + fs.makeQualified(new Path(baseDir + "/instance_id"))
         + " to lookup accumulo instance");
-    Instance instance = HdfsZooInstance.getInstance();
+    ServerContext context = ServerContext.getInstance();
     if (opts.servers == null) {
-      opts.servers = instance.getZooKeepers();
+      opts.servers = context.getZooKeepers();
     }
-    System.out.println("The accumulo instance id is " + instance.getInstanceID());
+    System.out.println("The accumulo instance id is " + context.getInstanceID());
     if (!opts.servers.contains("/"))
-      opts.servers += "/accumulo/" + instance.getInstanceID();
+      opts.servers += "/accumulo/" + context.getInstanceID();
     org.apache.zookeeper.ZooKeeperMain
         .main(new String[] {"-server", opts.servers, "-timeout", "" + (opts.timeout * 1000)});
   }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/zookeeper/TransactionWatcher.java b/server/base/src/main/java/org/apache/accumulo/server/zookeeper/TransactionWatcher.java
index 5f96ddc..b65a400 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/zookeeper/TransactionWatcher.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/zookeeper/TransactionWatcher.java
@@ -20,62 +20,56 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReader;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.zookeeper.KeeperException;
 
 public class TransactionWatcher extends org.apache.accumulo.fate.zookeeper.TransactionWatcher {
   public static class ZooArbitrator implements Arbitrator {
 
-    Instance instance = HdfsZooInstance.getInstance();
-    ZooReader rdr = new ZooReader(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut());
+    private static ServerContext context = ServerContext.getInstance();
+    ZooReader rdr = new ZooReader(context.getZooKeepers(), context.getZooKeepersSessionTimeOut());
 
     @Override
     public boolean transactionAlive(String type, long tid) throws Exception {
-      String path = ZooUtil.getRoot(instance) + "/" + type + "/" + tid;
+      String path = context.getZooKeeperRoot() + "/" + type + "/" + tid;
       rdr.sync(path);
       return rdr.exists(path);
     }
 
     public static void start(String type, long tid) throws KeeperException, InterruptedException {
-      Instance instance = HdfsZooInstance.getInstance();
       IZooReaderWriter writer = ZooReaderWriter.getInstance();
-      writer.putPersistentData(ZooUtil.getRoot(instance) + "/" + type, new byte[] {},
+      writer.putPersistentData(context.getZooKeeperRoot() + "/" + type, new byte[] {},
           NodeExistsPolicy.OVERWRITE);
-      writer.putPersistentData(ZooUtil.getRoot(instance) + "/" + type + "/" + tid, new byte[] {},
+      writer.putPersistentData(context.getZooKeeperRoot() + "/" + type + "/" + tid, new byte[] {},
           NodeExistsPolicy.OVERWRITE);
-      writer.putPersistentData(ZooUtil.getRoot(instance) + "/" + type + "/" + tid + "-running",
+      writer.putPersistentData(context.getZooKeeperRoot() + "/" + type + "/" + tid + "-running",
           new byte[] {}, NodeExistsPolicy.OVERWRITE);
     }
 
     public static void stop(String type, long tid) throws KeeperException, InterruptedException {
-      Instance instance = HdfsZooInstance.getInstance();
       IZooReaderWriter writer = ZooReaderWriter.getInstance();
-      writer.recursiveDelete(ZooUtil.getRoot(instance) + "/" + type + "/" + tid,
+      writer.recursiveDelete(context.getZooKeeperRoot() + "/" + type + "/" + tid,
           NodeMissingPolicy.SKIP);
     }
 
     public static void cleanup(String type, long tid) throws KeeperException, InterruptedException {
-      Instance instance = HdfsZooInstance.getInstance();
       IZooReaderWriter writer = ZooReaderWriter.getInstance();
-      writer.recursiveDelete(ZooUtil.getRoot(instance) + "/" + type + "/" + tid,
+      writer.recursiveDelete(context.getZooKeeperRoot() + "/" + type + "/" + tid,
           NodeMissingPolicy.SKIP);
-      writer.recursiveDelete(ZooUtil.getRoot(instance) + "/" + type + "/" + tid + "-running",
+      writer.recursiveDelete(context.getZooKeeperRoot() + "/" + type + "/" + tid + "-running",
           NodeMissingPolicy.SKIP);
     }
 
     public static Set<Long> allTransactionsAlive(String type)
         throws KeeperException, InterruptedException {
-      final Instance instance = HdfsZooInstance.getInstance();
       final IZooReader reader = ZooReaderWriter.getInstance();
       final Set<Long> result = new HashSet<>();
-      final String parent = ZooUtil.getRoot(instance) + "/" + type;
+      final String parent = context.getZooKeeperRoot() + "/" + type;
       reader.sync(parent);
       List<String> children = reader.getChildren(parent);
       for (String child : children) {
@@ -89,7 +83,7 @@ public class TransactionWatcher extends org.apache.accumulo.fate.zookeeper.Trans
 
     @Override
     public boolean transactionComplete(String type, long tid) throws Exception {
-      String path = ZooUtil.getRoot(instance) + "/" + type + "/" + tid + "-running";
+      String path = context.getZooKeeperRoot() + "/" + type + "/" + tid + "-running";
       rdr.sync(path);
       return !rdr.exists(path);
     }
diff --git a/server/base/src/test/java/org/apache/accumulo/server/AccumuloServerContextTest.java b/server/base/src/test/java/org/apache/accumulo/server/ServerContextTest.java
similarity index 93%
rename from server/base/src/test/java/org/apache/accumulo/server/AccumuloServerContextTest.java
rename to server/base/src/test/java/org/apache/accumulo/server/ServerContextTest.java
index b2855c6..ffa7591 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/AccumuloServerContextTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/ServerContextTest.java
@@ -42,7 +42,7 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
-public class AccumuloServerContextTest {
+public class ServerContextTest {
 
   private UserGroupInformation testUser;
   private String username;
@@ -82,14 +82,13 @@ public class AccumuloServerContextTest {
       EasyMock.expect(factory.getSystemConfiguration()).andReturn(conf).anyTimes();
       EasyMock.expect(factory.getSiteConfiguration()).andReturn(siteConfig).anyTimes();
 
-      AccumuloServerContext context = EasyMock.createMockBuilder(AccumuloServerContext.class)
+      ServerContext context = EasyMock.createMockBuilder(ServerContext.class)
           .addMockedMethod("enforceKerberosLogin").addMockedMethod("getConfiguration")
-          .addMockedMethod("getServerConfigurationFactory").addMockedMethod("getCredentials")
-          .createMock();
+          .addMockedMethod("getServerConfFactory").addMockedMethod("getCredentials").createMock();
       context.enforceKerberosLogin();
       EasyMock.expectLastCall().anyTimes();
       EasyMock.expect(context.getConfiguration()).andReturn(conf).anyTimes();
-      EasyMock.expect(context.getServerConfigurationFactory()).andReturn(factory).anyTimes();
+      EasyMock.expect(context.getServerConfFactory()).andReturn(factory).anyTimes();
       EasyMock.expect(context.getCredentials())
           .andReturn(new Credentials("accumulo/hostname@FAKE.COM", token)).once();
 
diff --git a/server/base/src/test/java/org/apache/accumulo/server/client/BulkImporterTest.java b/server/base/src/test/java/org/apache/accumulo/server/client/BulkImporterTest.java
index 9d754b7..37cf168 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/client/BulkImporterTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/client/BulkImporterTest.java
@@ -25,7 +25,6 @@ import java.util.TreeSet;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.Table;
@@ -104,7 +103,7 @@ public class BulkImporterTest {
     }
 
     @Override
-    public void invalidateCache(Instance instance, String server) {
+    public void invalidateCache(ClientContext context, String server) {
       throw new NotImplementedException();
     }
   }
diff --git a/server/base/src/test/java/org/apache/accumulo/server/conf/NamespaceConfigurationTest.java b/server/base/src/test/java/org/apache/accumulo/server/conf/NamespaceConfigurationTest.java
index 3a627a1..1ee9e2e 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/conf/NamespaceConfigurationTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/conf/NamespaceConfigurationTest.java
@@ -21,7 +21,6 @@ import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.createMock;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 import static org.junit.Assert.assertEquals;
@@ -31,11 +30,11 @@ import static org.junit.Assert.assertTrue;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
+import java.util.Properties;
 import java.util.UUID;
 import java.util.function.Predicate;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Namespace;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.ConfigurationObserver;
@@ -43,6 +42,7 @@ import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
+import org.apache.accumulo.server.ServerContext;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -52,7 +52,7 @@ public class NamespaceConfigurationTest {
   private static final int ZK_SESSION_TIMEOUT = 120000;
 
   private String iid;
-  private Instance instance;
+  private ServerContext context;
   private AccumuloConfiguration parent;
   private ZooCacheFactory zcf;
   private ZooCache zc;
@@ -61,16 +61,18 @@ public class NamespaceConfigurationTest {
   @Before
   public void setUp() {
     iid = UUID.randomUUID().toString();
-    instance = createMock(Instance.class);
+
+    context = createMock(ServerContext.class);
     parent = createMock(AccumuloConfiguration.class);
 
-    expect(instance.getInstanceID()).andReturn(iid);
-    expectLastCall().anyTimes();
-    expect(instance.getZooKeepers()).andReturn(ZOOKEEPERS);
-    expect(instance.getZooKeepersSessionTimeOut()).andReturn(ZK_SESSION_TIMEOUT);
-    replay(instance);
+    expect(context.getProperties()).andReturn(new Properties());
+    expect(context.getZooKeeperRoot()).andReturn("/accumulo/" + iid).anyTimes();
+    expect(context.getInstanceID()).andReturn(iid).anyTimes();
+    expect(context.getZooKeepers()).andReturn(ZOOKEEPERS).anyTimes();
+    expect(context.getZooKeepersSessionTimeOut()).andReturn(ZK_SESSION_TIMEOUT).anyTimes();
+    replay(context);
 
-    c = new NamespaceConfiguration(NSID, instance, parent);
+    c = new NamespaceConfiguration(NSID, context, parent);
     zcf = createMock(ZooCacheFactory.class);
     c.setZooCacheFactory(zcf);
 
@@ -108,7 +110,7 @@ public class NamespaceConfigurationTest {
 
   @Test
   public void testGet_SkipParentIfAccumuloNS() {
-    c = new NamespaceConfiguration(Namespace.ID.ACCUMULO, instance, parent);
+    c = new NamespaceConfiguration(Namespace.ID.ACCUMULO, context, parent);
     c.setZooCacheFactory(zcf);
     Property p = Property.INSTANCE_SECRET;
     expect(zc.get(ZooUtil.getRoot(iid) + Constants.ZNAMESPACES + "/" + Namespace.ID.ACCUMULO
diff --git a/server/base/src/test/java/org/apache/accumulo/server/conf/ServerConfigurationFactoryTest.java b/server/base/src/test/java/org/apache/accumulo/server/conf/ServerConfigurationFactoryTest.java
index 7d4f17d..0d198c4 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/conf/ServerConfigurationFactoryTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/conf/ServerConfigurationFactoryTest.java
@@ -28,13 +28,15 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertSame;
 
-import org.apache.accumulo.core.client.Instance;
+import java.util.Properties;
+
 import org.apache.accumulo.core.client.impl.Namespace;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.conf.SiteConfiguration;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
+import org.apache.accumulo.server.ServerContext;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -67,14 +69,16 @@ public class ServerConfigurationFactoryTest {
     replay(zc);
   }
 
-  private Instance instance;
+  private ServerContext context;
   private ServerConfigurationFactory scf;
 
   @Before
   public void setUp() throws Exception {
-    instance = createMock(Instance.class);
-    expect(instance.getInstanceID()).andReturn(IID);
-    expectLastCall().anyTimes();
+    context = createMock(ServerContext.class);
+    expect(context.getInstanceID()).andReturn(IID).anyTimes();
+    expect(context.getProperties()).andReturn(new Properties()).anyTimes();
+    expect(context.getZooKeepers()).andReturn(ZK_HOST).anyTimes();
+    expect(context.getZooKeepersSessionTimeOut()).andReturn(ZK_TIMEOUT).anyTimes();
   }
 
   @After
@@ -83,13 +87,12 @@ public class ServerConfigurationFactoryTest {
   }
 
   private void mockInstanceForConfig() {
-    expect(instance.getZooKeepers()).andReturn(ZK_HOST);
-    expect(instance.getZooKeepersSessionTimeOut()).andReturn(ZK_TIMEOUT);
+    expect(context.getZooKeeperRoot()).andReturn("/accumulo/" + IID).anyTimes();
   }
 
   private void ready() {
-    replay(instance);
-    scf = new ServerConfigurationFactory(instance);
+    replay(context);
+    scf = new ServerConfigurationFactory(context);
     scf.setZooCacheFactory(zcf);
   }
 
diff --git a/server/base/src/test/java/org/apache/accumulo/server/conf/TableConfigurationTest.java b/server/base/src/test/java/org/apache/accumulo/server/conf/TableConfigurationTest.java
index 0948616..0428c0b 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/conf/TableConfigurationTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/conf/TableConfigurationTest.java
@@ -29,17 +29,18 @@ import static org.junit.Assert.assertTrue;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
+import java.util.Properties;
 import java.util.UUID;
 import java.util.function.Predicate;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.conf.ConfigurationObserver;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
+import org.apache.accumulo.server.ServerContext;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -49,7 +50,7 @@ public class TableConfigurationTest {
   private static final int ZK_SESSION_TIMEOUT = 120000;
 
   private String iid;
-  private Instance instance;
+  private ServerContext context;
   private NamespaceConfiguration parent;
   private ZooCacheFactory zcf;
   private ZooCache zc;
@@ -58,14 +59,18 @@ public class TableConfigurationTest {
   @Before
   public void setUp() {
     iid = UUID.randomUUID().toString();
-    instance = createMock(Instance.class);
-    expect(instance.getInstanceID()).andReturn(iid).anyTimes();
-    expect(instance.getZooKeepers()).andReturn(ZOOKEEPERS);
-    expect(instance.getZooKeepersSessionTimeOut()).andReturn(ZK_SESSION_TIMEOUT);
-    replay(instance);
+    context = createMock(ServerContext.class);
+    ServerConfigurationFactory scf = createMock(ServerConfigurationFactory.class);
+    expect(context.getServerConfFactory()).andReturn(scf);
+    expect(context.getProperties()).andReturn(new Properties()).anyTimes();
+    expect(context.getInstanceID()).andReturn(iid).anyTimes();
+    expect(context.getZooKeeperRoot()).andReturn("/accumulo/" + iid).anyTimes();
+    expect(context.getZooKeepers()).andReturn(ZOOKEEPERS).anyTimes();
+    expect(context.getZooKeepersSessionTimeOut()).andReturn(ZK_SESSION_TIMEOUT).anyTimes();
+    replay(context);
 
     parent = createMock(NamespaceConfiguration.class);
-    c = new TableConfiguration(instance, TID, parent);
+    c = new TableConfiguration(context, TID, parent);
     zcf = createMock(ZooCacheFactory.class);
     c.setZooCacheFactory(zcf);
 
diff --git a/server/base/src/test/java/org/apache/accumulo/server/conf/ZooConfigurationFactoryTest.java b/server/base/src/test/java/org/apache/accumulo/server/conf/ZooConfigurationFactoryTest.java
index 4c0e842..9b23e3d 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/conf/ZooConfigurationFactoryTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/conf/ZooConfigurationFactoryTest.java
@@ -19,23 +19,23 @@ package org.apache.accumulo.server.conf;
 import static org.easymock.EasyMock.createMock;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.isA;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertSame;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.zookeeper.Watcher;
 import org.junit.Before;
 import org.junit.Test;
 
 public class ZooConfigurationFactoryTest {
-  private Instance instance;
+
+  private ServerContext context;
   private ZooCacheFactory zcf;
   private ZooCache zc;
   private ZooConfigurationFactory zconff;
@@ -43,7 +43,7 @@ public class ZooConfigurationFactoryTest {
 
   @Before
   public void setUp() {
-    instance = createMock(Instance.class);
+    context = createMock(ServerContext.class);
     zcf = createMock(ZooCacheFactory.class);
     zc = createMock(ZooCache.class);
     zconff = new ZooConfigurationFactory();
@@ -52,19 +52,19 @@ public class ZooConfigurationFactoryTest {
 
   @Test
   public void testGetInstance() {
-    expect(instance.getInstanceID()).andReturn("iid");
-    expectLastCall().anyTimes();
-    expect(instance.getZooKeepers()).andReturn("localhost");
-    expect(instance.getZooKeepersSessionTimeOut()).andReturn(120000);
-    replay(instance);
-    expect(zcf.getZooCache(eq("localhost"), eq(120000), isA(Watcher.class))).andReturn(zc);
+    expect(context.getInstanceID()).andReturn("iid").anyTimes();
+    expect(context.getZooKeepers()).andReturn("localhost").anyTimes();
+    expect(context.getZooKeepersSessionTimeOut()).andReturn(120000).anyTimes();
+    replay(context);
+    expect(zcf.getZooCache(eq("localhost"), eq(120000), isA(Watcher.class))).andReturn(zc)
+        .anyTimes();
     replay(zcf);
 
-    ZooConfiguration c = zconff.getInstance(instance, zcf, parent);
+    ZooConfiguration c = zconff.getInstance(context, zcf, parent);
     assertNotNull(c);
-    assertSame(c, zconff.getInstance(instance, zcf, parent));
+    assertSame(c, zconff.getInstance(context, zcf, parent));
 
-    verify(instance);
+    verify(context);
     verify(zcf);
   }
 }
diff --git a/server/base/src/test/java/org/apache/accumulo/server/master/balancer/BaseHostRegexTableLoadBalancerTest.java b/server/base/src/test/java/org/apache/accumulo/server/master/balancer/BaseHostRegexTableLoadBalancerTest.java
index 9e91f04..aef9964 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/master/balancer/BaseHostRegexTableLoadBalancerTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/master/balancer/BaseHostRegexTableLoadBalancerTest.java
@@ -16,29 +16,28 @@
  */
 package org.apache.accumulo.server.master.balancer;
 
+import static org.easymock.EasyMock.expect;
+
 import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Properties;
 import java.util.SortedMap;
 import java.util.SortedSet;
 import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.function.Predicate;
 
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.admin.TableOperations;
 import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.Namespace;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.client.impl.TableOperationsImpl;
 import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.ConfigurationCopy;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
@@ -47,6 +46,7 @@ import org.apache.accumulo.core.data.impl.KeyExtent;
 import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.NamespaceConfiguration;
 import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.conf.TableConfiguration;
@@ -57,46 +57,6 @@ import org.easymock.EasyMock;
 
 public abstract class BaseHostRegexTableLoadBalancerTest extends HostRegexTableLoadBalancer {
 
-  protected static class TestInstance implements Instance {
-
-    @Override
-    public String getRootTabletLocation() {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public List<String> getMasterLocations() {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public String getInstanceID() {
-      return "1111";
-    }
-
-    @Override
-    public String getInstanceName() {
-      return "test";
-    }
-
-    @Override
-    public String getZooKeepers() {
-      return "";
-    }
-
-    @Override
-    public int getZooKeepersSessionTimeOut() {
-      return 30;
-    }
-
-    @Override
-    public Connector getConnector(String principal, AuthenticationToken token)
-        throws AccumuloException, AccumuloSecurityException {
-      throw new UnsupportedOperationException();
-    }
-
-  }
-
   protected static class TestTable {
     private String tableName;
     private Table.ID id;
@@ -132,11 +92,11 @@ public abstract class BaseHostRegexTableLoadBalancerTest extends HostRegexTableL
 
   protected static class TestServerConfigurationFactory extends ServerConfigurationFactory {
 
-    final Instance instance;
+    final ServerContext context;
 
-    public TestServerConfigurationFactory(Instance instance) {
-      super(instance);
-      this.instance = instance;
+    public TestServerConfigurationFactory(ServerContext context) {
+      super(context);
+      this.context = context;
     }
 
     @Override
@@ -148,9 +108,9 @@ public abstract class BaseHostRegexTableLoadBalancerTest extends HostRegexTableL
     public TableConfiguration getTableConfiguration(final Table.ID tableId) {
       // create a dummy namespaceConfiguration to satisfy requireNonNull in TableConfiguration
       // constructor
-      NamespaceConfiguration dummyConf = new NamespaceConfiguration(Namespace.ID.DEFAULT,
-          this.instance, DefaultConfiguration.getInstance());
-      return new TableConfiguration(this.instance, tableId, dummyConf) {
+      NamespaceConfiguration dummyConf = new NamespaceConfiguration(Namespace.ID.DEFAULT, context,
+          DefaultConfiguration.getInstance());
+      return new TableConfiguration(context, tableId, dummyConf) {
         @Override
         public String get(Property property) {
           return DEFAULT_TABLE_PROPERTIES.get(property.name());
@@ -196,9 +156,17 @@ public abstract class BaseHostRegexTableLoadBalancerTest extends HostRegexTableL
     }
   }
 
-  protected final TestInstance instance = new TestInstance();
-  protected final TestServerConfigurationFactory factory = new TestServerConfigurationFactory(
-      instance);
+  protected ServerContext createMockContext() {
+    ServerContext mockContext = EasyMock.createMock(ServerContext.class);
+    expect(mockContext.getProperties()).andReturn(new Properties()).anyTimes();
+    expect(mockContext.getZooKeepers()).andReturn("").anyTimes();
+    expect(mockContext.getInstanceName()).andReturn("test").anyTimes();
+    expect(mockContext.getZooKeepersSessionTimeOut()).andReturn(30).anyTimes();
+    expect(mockContext.getInstanceID()).andReturn("1111").anyTimes();
+    expect(mockContext.getZooKeeperRoot()).andReturn(Constants.ZROOT + "/1111").anyTimes();
+    return mockContext;
+  }
+
   protected final Map<String,String> servers = new HashMap<>(15);
   protected final SortedMap<TServerInstance,TabletServerStatus> allTabletServers = new TreeMap<>();
   protected final Map<String,List<KeyExtent>> tableExtents = new HashMap<>(3);
@@ -365,5 +333,4 @@ public abstract class BaseHostRegexTableLoadBalancerTest extends HostRegexTableL
     }
     return current;
   }
-
 }
diff --git a/server/base/src/test/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancerReconfigurationTest.java b/server/base/src/test/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancerReconfigurationTest.java
index 5485ef9..20d5c34 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancerReconfigurationTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancerReconfigurationTest.java
@@ -16,6 +16,9 @@
  */
 package org.apache.accumulo.server.master.balancer;
 
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -30,7 +33,7 @@ import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 import org.apache.accumulo.core.data.impl.KeyExtent;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
 import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.state.TabletMigration;
 import org.apache.thrift.TException;
@@ -44,8 +47,13 @@ public class HostRegexTableLoadBalancerReconfigurationTest
 
   @Test
   public void testConfigurationChanges() {
-
-    init(new AccumuloServerContext(instance, factory));
+    ServerContext context1 = createMockContext();
+    replay(context1);
+    final TestServerConfigurationFactory factory = new TestServerConfigurationFactory(context1);
+    ServerContext context2 = createMockContext();
+    expect(context2.getServerConfFactory()).andReturn(factory).anyTimes();
+    replay(context2);
+    init(context2);
     Map<KeyExtent,TServerInstance> unassigned = new HashMap<>();
     for (List<KeyExtent> extents : tableExtents.values()) {
       for (KeyExtent ke : extents) {
diff --git a/server/base/src/test/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancerTest.java b/server/base/src/test/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancerTest.java
index d729620..641db22 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancerTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancerTest.java
@@ -16,6 +16,9 @@
  */
 package org.apache.accumulo.server.master.balancer;
 
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -39,8 +42,9 @@ import org.apache.accumulo.core.data.thrift.TKeyExtent;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
 import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.NamespaceConfiguration;
+import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.state.TabletMigration;
@@ -50,9 +54,23 @@ import org.junit.Test;
 
 public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalancerTest {
 
+  public void init() {
+    ServerContext context1 = createMockContext();
+    replay(context1);
+    final TestServerConfigurationFactory factory = new TestServerConfigurationFactory(context1);
+    initFactory(factory);
+  }
+
+  private void initFactory(ServerConfigurationFactory factory) {
+    ServerContext context = createMockContext();
+    expect(context.getServerConfFactory()).andReturn(factory).anyTimes();
+    replay(context);
+    init(context);
+  }
+
   @Test
   public void testInit() {
-    init(new AccumuloServerContext(instance, factory));
+    init();
     Assert.assertEquals("OOB check interval value is incorrect", 7000, this.getOobCheckMillis());
     Assert.assertEquals("Max migrations is incorrect", 4, this.getMaxMigrations());
     Assert.assertEquals("Max outstanding migrations is incorrect", 10,
@@ -79,7 +97,7 @@ public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalanc
 
   @Test
   public void testBalance() {
-    init(new AccumuloServerContext(instance, factory));
+    init();
     Set<KeyExtent> migrations = new HashSet<>();
     List<TabletMigration> migrationsOut = new ArrayList<>();
     long wait = this.balance(Collections.unmodifiableSortedMap(createCurrent(15)), migrations,
@@ -125,7 +143,7 @@ public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalanc
   @Test
   public void testBalanceWithTooManyOutstandingMigrations() {
     List<TabletMigration> migrationsOut = new ArrayList<>();
-    init(new AccumuloServerContext(instance, factory));
+    init();
     // lets say we already have migrations ongoing for the FOO and BAR table extends (should be 5 of
     // each of them) for a total of 10
     Set<KeyExtent> migrations = new HashSet<>();
@@ -140,7 +158,7 @@ public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalanc
 
   @Test
   public void testSplitCurrentByRegexUsingHostname() {
-    init(new AccumuloServerContext(instance, factory));
+    init();
     Map<String,SortedMap<TServerInstance,TabletServerStatus>> groups = this
         .splitCurrentByRegex(createCurrent(15));
     Assert.assertEquals(3, groups.size());
@@ -172,13 +190,15 @@ public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalanc
 
   @Test
   public void testSplitCurrentByRegexUsingOverlappingPools() {
-    init(new AccumuloServerContext(instance, new TestServerConfigurationFactory(instance) {
+    ServerContext context = createMockContext();
+    replay(context);
+    initFactory(new TestServerConfigurationFactory(context) {
 
       @Override
       public TableConfiguration getTableConfiguration(Table.ID tableId) {
         NamespaceConfiguration defaultConf = new NamespaceConfiguration(Namespace.ID.DEFAULT,
-            this.instance, DefaultConfiguration.getInstance());
-        return new TableConfiguration(instance, tableId, defaultConf) {
+            this.context, DefaultConfiguration.getInstance());
+        return new TableConfiguration(this.context, tableId, defaultConf) {
           HashMap<String,String> tableProperties = new HashMap<>();
           {
             tableProperties
@@ -208,7 +228,7 @@ public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalanc
           }
         };
       }
-    }));
+    });
     Map<String,SortedMap<TServerInstance,TabletServerStatus>> groups = this
         .splitCurrentByRegex(createCurrent(15));
     Assert.assertEquals(2, groups.size());
@@ -247,7 +267,9 @@ public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalanc
 
   @Test
   public void testSplitCurrentByRegexUsingIP() {
-    init(new AccumuloServerContext(instance, new TestServerConfigurationFactory(instance) {
+    ServerContext context = createMockContext();
+    replay(context);
+    initFactory(new TestServerConfigurationFactory(context) {
       @Override
       public synchronized AccumuloConfiguration getSystemConfiguration() {
         HashMap<String,String> props = new HashMap<>();
@@ -259,8 +281,8 @@ public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalanc
       @Override
       public TableConfiguration getTableConfiguration(Table.ID tableId) {
         NamespaceConfiguration defaultConf = new NamespaceConfiguration(Namespace.ID.DEFAULT,
-            this.instance, DefaultConfiguration.getInstance());
-        return new TableConfiguration(instance, tableId, defaultConf) {
+            this.context, DefaultConfiguration.getInstance());
+        return new TableConfiguration(context, tableId, defaultConf) {
           HashMap<String,String> tableProperties = new HashMap<>();
           {
             tableProperties.put(
@@ -291,7 +313,7 @@ public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalanc
           }
         };
       }
-    }));
+    });
     Assert.assertTrue(isIpBasedRegex());
     Map<String,SortedMap<TServerInstance,TabletServerStatus>> groups = this
         .splitCurrentByRegex(createCurrent(15));
@@ -324,7 +346,7 @@ public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalanc
 
   @Test
   public void testAllUnassigned() {
-    init(new AccumuloServerContext(instance, factory));
+    init();
     Map<KeyExtent,TServerInstance> assignments = new HashMap<>();
     Map<KeyExtent,TServerInstance> unassigned = new HashMap<>();
     for (List<KeyExtent> extents : tableExtents.values()) {
@@ -356,7 +378,7 @@ public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalanc
 
   @Test
   public void testAllAssigned() {
-    init(new AccumuloServerContext(instance, factory));
+    init();
     Map<KeyExtent,TServerInstance> assignments = new HashMap<>();
     Map<KeyExtent,TServerInstance> unassigned = new HashMap<>();
     this.getAssignments(Collections.unmodifiableSortedMap(allTabletServers),
@@ -366,7 +388,7 @@ public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalanc
 
   @Test
   public void testPartiallyAssigned() {
-    init(new AccumuloServerContext(instance, factory));
+    init();
     Map<KeyExtent,TServerInstance> assignments = new HashMap<>();
     Map<KeyExtent,TServerInstance> unassigned = new HashMap<>();
     int i = 0;
@@ -402,7 +424,7 @@ public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalanc
 
   @Test
   public void testUnassignedWithNoTServers() {
-    init(new AccumuloServerContext(instance, factory));
+    init();
     Map<KeyExtent,TServerInstance> assignments = new HashMap<>();
     Map<KeyExtent,TServerInstance> unassigned = new HashMap<>();
     for (KeyExtent ke : tableExtents.get(BAR.getTableName())) {
@@ -434,7 +456,7 @@ public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalanc
 
   @Test
   public void testOutOfBoundsTablets() {
-    init(new AccumuloServerContext(instance, factory));
+    init();
     // Wait to trigger the out of bounds check which will call our version of
     // getOnlineTabletsForTable
     UtilWaitThread.sleep(11000);
diff --git a/server/base/src/test/java/org/apache/accumulo/server/master/balancer/TableLoadBalancerTest.java b/server/base/src/test/java/org/apache/accumulo/server/master/balancer/TableLoadBalancerTest.java
index 533bd1b..0ffb552 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/master/balancer/TableLoadBalancerTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/master/balancer/TableLoadBalancerTest.java
@@ -16,17 +16,19 @@
  */
 package org.apache.accumulo.server.master.balancer;
 
+import static org.easymock.EasyMock.replay;
+
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Properties;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.UUID;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.admin.TableOperations;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
@@ -37,7 +39,7 @@ import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
 import org.apache.accumulo.core.util.HostAndPort;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.NamespaceConfiguration;
 import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.conf.TableConfiguration;
@@ -101,7 +103,7 @@ public class TableLoadBalancerTest {
     }
 
     @Override
-    public void init(AccumuloServerContext context) {}
+    public void init(ServerContext context) {}
 
     @Override
     public List<TabletStats> getOnlineTabletsForTable(TServerInstance tserver, Table.ID tableId)
@@ -130,28 +132,34 @@ public class TableLoadBalancerTest {
     protected TableOperations getTableOperations() {
       TableOperations tops = EasyMock.createMock(TableOperations.class);
       EasyMock.expect(tops.tableIdMap()).andReturn(TABLE_ID_MAP).anyTimes();
-      EasyMock.replay(tops);
+      replay(tops);
       return tops;
     }
   }
 
+  private ServerContext createMockContext() {
+    ServerContext context = EasyMock.createMock(ServerContext.class);
+    final String instanceId = UUID.nameUUIDFromBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 0})
+        .toString();
+    EasyMock.expect(context.getProperties()).andReturn(new Properties()).anyTimes();
+    EasyMock.expect(context.getInstanceID()).andReturn(instanceId).anyTimes();
+    EasyMock.expect(context.getZooKeepers()).andReturn("10.0.0.1:1234").anyTimes();
+    EasyMock.expect(context.getZooKeepersSessionTimeOut()).andReturn(30_000).anyTimes();
+    EasyMock.expect(context.getZooKeeperRoot()).andReturn("/root/").anyTimes();
+    return context;
+  }
+
   @Test
   public void test() throws Exception {
-    final Instance inst = EasyMock.createMock(Instance.class);
-    EasyMock.expect(inst.getInstanceID())
-        .andReturn(UUID.nameUUIDFromBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}).toString())
-        .anyTimes();
-    EasyMock.expect(inst.getZooKeepers()).andReturn("10.0.0.1:1234").anyTimes();
-    EasyMock.expect(inst.getZooKeepersSessionTimeOut()).andReturn(30_000).anyTimes();
-    EasyMock.replay(inst);
-
-    ServerConfigurationFactory confFactory = new ServerConfigurationFactory(inst) {
+    final ServerContext context = createMockContext();
+    replay(context);
+    ServerConfigurationFactory confFactory = new ServerConfigurationFactory(context) {
       @Override
       public TableConfiguration getTableConfiguration(Table.ID tableId) {
         // create a dummy namespaceConfiguration to satisfy requireNonNull in TableConfiguration
         // constructor
-        NamespaceConfiguration dummyConf = new NamespaceConfiguration(null, inst, null);
-        return new TableConfiguration(inst, tableId, dummyConf) {
+        NamespaceConfiguration dummyConf = new NamespaceConfiguration(null, context, null);
+        return new TableConfiguration(context, tableId, dummyConf) {
           @Override
           public String get(Property property) {
             // fake the get table configuration so the test doesn't try to look in zookeeper for
@@ -161,6 +169,9 @@ public class TableLoadBalancerTest {
         };
       }
     };
+    final ServerContext context2 = createMockContext();
+    EasyMock.expect(context2.getServerConfFactory()).andReturn(confFactory).anyTimes();
+    replay(context2);
 
     String t1Id = TABLE_ID_MAP.get("t1"), t2Id = TABLE_ID_MAP.get("t2"),
         t3Id = TABLE_ID_MAP.get("t3");
@@ -171,13 +182,13 @@ public class TableLoadBalancerTest {
     Set<KeyExtent> migrations = Collections.emptySet();
     List<TabletMigration> migrationsOut = new ArrayList<>();
     TableLoadBalancer tls = new TableLoadBalancer();
-    tls.init(new AccumuloServerContext(inst, confFactory));
+    tls.init(context2);
     tls.balance(state, migrations, migrationsOut);
     Assert.assertEquals(0, migrationsOut.size());
 
     state.put(mkts("10.0.0.2", "0x02030405"), status());
     tls = new TableLoadBalancer();
-    tls.init(new AccumuloServerContext(inst, confFactory));
+    tls.init(context2);
     tls.balance(state, migrations, migrationsOut);
     int count = 0;
     Map<Table.ID,Integer> movedByTable = new HashMap<>();
diff --git a/server/base/src/test/java/org/apache/accumulo/server/problems/ProblemReportTest.java b/server/base/src/test/java/org/apache/accumulo/server/problems/ProblemReportTest.java
index f75ae44..af63ccf 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/problems/ProblemReportTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/problems/ProblemReportTest.java
@@ -32,12 +32,12 @@ import java.io.ByteArrayOutputStream;
 import java.io.DataOutputStream;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.util.Encoding;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.hadoop.io.Text;
 import org.junit.Before;
@@ -48,16 +48,15 @@ public class ProblemReportTest {
   private static final String RESOURCE = "resource";
   private static final String SERVER = "server";
 
-  private Instance instance;
+  private ServerContext context;
   private ZooReaderWriter zoorw;
   private ProblemReport r;
 
   @Before
   public void setUp() throws Exception {
-    instance = createMock(Instance.class);
-    expect(instance.getInstanceID()).andReturn("instance");
-    replay(instance);
-
+    context = createMock(ServerContext.class);
+    expect(context.getZooKeeperRoot()).andReturn("/accumulo/instance");
+    replay(context);
     zoorw = createMock(ZooReaderWriter.class);
   }
 
@@ -159,7 +158,7 @@ public class ProblemReportTest {
     zoorw.recursiveDelete(path, NodeMissingPolicy.SKIP);
     replay(zoorw);
 
-    r.removeFromZooKeeper(zoorw, instance);
+    r.removeFromZooKeeper(zoorw, context);
     verify(zoorw);
   }
 
@@ -175,7 +174,7 @@ public class ProblemReportTest {
         .andReturn(true);
     replay(zoorw);
 
-    r.saveToZooKeeper(zoorw, instance);
+    r.saveToZooKeeper(zoorw, context);
     verify(zoorw);
   }
 
@@ -190,7 +189,7 @@ public class ProblemReportTest {
         .andReturn(encoded);
     replay(zoorw);
 
-    r = ProblemReport.decodeZooKeeperEntry(node, zoorw, instance);
+    r = ProblemReport.decodeZooKeeperEntry(node, zoorw, context);
     assertEquals(TABLE_ID, r.getTableId());
     assertSame(ProblemType.FILE_READ, r.getProblemType());
     assertEquals(RESOURCE, r.getResource());
diff --git a/server/base/src/test/java/org/apache/accumulo/server/replication/ReplicationUtilTest.java b/server/base/src/test/java/org/apache/accumulo/server/replication/ReplicationUtilTest.java
index ad53d09..b77764c 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/replication/ReplicationUtilTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/replication/ReplicationUtilTest.java
@@ -24,7 +24,7 @@ import java.util.Map.Entry;
 
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.ZooCache;
 import org.easymock.EasyMock;
 import org.junit.Before;
@@ -32,7 +32,7 @@ import org.junit.Test;
 
 public class ReplicationUtilTest {
 
-  AccumuloServerContext context;
+  ServerContext context;
   ZooCache zc;
   AccumuloConfiguration conf;
   Map<String,String> confEntries;
@@ -41,7 +41,7 @@ public class ReplicationUtilTest {
 
   @Before
   public void setup() {
-    context = EasyMock.createMock(AccumuloServerContext.class);
+    context = EasyMock.createMock(ServerContext.class);
     zc = EasyMock.createMock(ZooCache.class);
     conf = EasyMock.createMock(AccumuloConfiguration.class);
     EasyMock.expect(context.getConfiguration()).andReturn(conf).anyTimes();
diff --git a/server/base/src/test/java/org/apache/accumulo/server/rpc/SaslDigestCallbackHandlerTest.java b/server/base/src/test/java/org/apache/accumulo/server/rpc/SaslDigestCallbackHandlerTest.java
index 0aa2ae1..118d5ed 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/rpc/SaslDigestCallbackHandlerTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/rpc/SaslDigestCallbackHandlerTest.java
@@ -16,10 +16,6 @@
  */
 package org.apache.accumulo.server.rpc;
 
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 
@@ -32,7 +28,6 @@ import javax.crypto.KeyGenerator;
 import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.UnsupportedCallbackException;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
 import org.apache.accumulo.core.client.impl.AuthenticationTokenIdentifier;
 import org.apache.accumulo.core.rpc.SaslDigestCallbackHandler;
@@ -94,12 +89,8 @@ public class SaslDigestCallbackHandlerTest {
 
   @Test
   public void testTokenSerialization() throws Exception {
-    Instance instance = createMock(Instance.class);
-    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(instance,
-        1000L);
-    expect(instance.getInstanceID()).andReturn("instanceid");
-
-    replay(instance);
+    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(
+        "instanceid", 1000L);
 
     secretManager.addKey(new AuthenticationKey(1, 0L, 100L, keyGen.generateKey()));
     Entry<Token<AuthenticationTokenIdentifier>,AuthenticationTokenIdentifier> entry = secretManager
@@ -109,19 +100,13 @@ public class SaslDigestCallbackHandlerTest {
 
     char[] computedPassword = handler.getPassword(secretManager, entry.getValue());
 
-    verify(instance);
-
     assertArrayEquals(computedPassword, encodedPassword);
   }
 
   @Test
   public void testTokenAndIdentifierSerialization() throws Exception {
-    Instance instance = createMock(Instance.class);
-    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(instance,
-        1000L);
-    expect(instance.getInstanceID()).andReturn("instanceid");
-
-    replay(instance);
+    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(
+        "instanceid", 1000L);
 
     secretManager.addKey(new AuthenticationKey(1, 0L, 1000 * 100L, keyGen.generateKey()));
     Entry<Token<AuthenticationTokenIdentifier>,AuthenticationTokenIdentifier> entry = secretManager
@@ -135,8 +120,6 @@ public class SaslDigestCallbackHandlerTest {
     identifier.readFields(new DataInputStream(new ByteArrayInputStream(decodedIdentifier)));
     char[] computedPassword = handler.getPassword(secretManager, identifier);
 
-    verify(instance);
-
     assertArrayEquals(computedPassword, encodedPassword);
   }
 }
diff --git a/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java b/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
index 45dff66..c1e7434 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
@@ -23,13 +23,10 @@ import java.io.File;
 import java.io.IOException;
 import java.util.UUID;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.ConnectorImpl;
 import org.apache.accumulo.core.client.impl.Credentials;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.security.SystemCredentials.SystemToken;
-import org.easymock.EasyMock;
-import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -40,7 +37,8 @@ public class SystemCredentialsTest {
   @Rule
   public TestName test = new TestName();
 
-  private Instance inst;
+  private String instanceId = UUID.nameUUIDFromBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 0})
+      .toString();
 
   @BeforeClass
   public static void setUp() throws IOException {
@@ -63,15 +61,6 @@ public class SystemCredentialsTest {
     }
   }
 
-  @Before
-  public void setupInstance() {
-    inst = EasyMock.createMock(Instance.class);
-    EasyMock.expect(inst.getInstanceID())
-        .andReturn(UUID.nameUUIDFromBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 0}).toString())
-        .anyTimes();
-    EasyMock.replay(inst);
-  }
-
   /**
    * This is a test to ensure the string literal in
    * {@link ConnectorImpl#ConnectorImpl(org.apache.accumulo.core.client.impl.ClientContext)} is kept
@@ -82,13 +71,13 @@ public class SystemCredentialsTest {
   public void testSystemToken() {
     assertEquals("org.apache.accumulo.server.security.SystemCredentials$SystemToken",
         SystemToken.class.getName());
-    assertEquals(SystemCredentials.get(inst).getToken().getClass(), SystemToken.class);
+    assertEquals(SystemCredentials.get(instanceId).getToken().getClass(), SystemToken.class);
   }
 
   @Test
   public void testSystemCredentials() {
-    Credentials a = SystemCredentials.get(inst);
-    Credentials b = SystemCredentials.get(inst);
+    Credentials a = SystemCredentials.get(instanceId);
+    Credentials b = SystemCredentials.get(instanceId);
     assertEquals(a, b);
   }
 }
diff --git a/server/base/src/test/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenSecretManagerTest.java b/server/base/src/test/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenSecretManagerTest.java
index 1addbe2..dc7d2dd 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenSecretManagerTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenSecretManagerTest.java
@@ -17,7 +17,6 @@
 package org.apache.accumulo.server.security.delegation;
 
 import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
@@ -38,12 +37,10 @@ import java.util.concurrent.TimeUnit;
 import javax.crypto.KeyGenerator;
 
 import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
 import org.apache.accumulo.core.client.impl.AuthenticationTokenIdentifier;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
-import org.junit.After;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -68,30 +65,21 @@ public class AuthenticationTokenSecretManagerTest {
     keyGen.init(KEY_LENGTH);
   }
 
-  private Instance instance;
   private String instanceId;
   private DelegationTokenConfig cfg;
 
   @Before
-  public void setupMocks() {
-    instance = createMock(Instance.class);
+  public void setup() {
     instanceId = UUID.randomUUID().toString();
     cfg = new DelegationTokenConfig();
-    expect(instance.getInstanceID()).andReturn(instanceId).anyTimes();
-    replay(instance);
-  }
-
-  @After
-  public void verifyMocks() {
-    verify(instance);
   }
 
   @Test
   public void testAddKey() {
     // 1 minute
     long tokenLifetime = 60 * 1000;
-    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(instance,
-        tokenLifetime);
+    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(
+        instanceId, tokenLifetime);
 
     // Add a single key
     AuthenticationKey authKey = new AuthenticationKey(1, 0, tokenLifetime, keyGen.generateKey());
@@ -117,8 +105,8 @@ public class AuthenticationTokenSecretManagerTest {
   public void testRemoveKey() {
     // 1 minute
     long tokenLifetime = 60 * 1000;
-    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(instance,
-        tokenLifetime);
+    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(
+        instanceId, tokenLifetime);
 
     // Add a single key
     AuthenticationKey authKey = new AuthenticationKey(1, 0, tokenLifetime, keyGen.generateKey());
@@ -141,8 +129,8 @@ public class AuthenticationTokenSecretManagerTest {
 
     // 1 minute
     long tokenLifetime = 60 * 1000;
-    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(instance,
-        tokenLifetime);
+    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(
+        instanceId, tokenLifetime);
 
     // Add a current key
     secretManager
@@ -185,8 +173,8 @@ public class AuthenticationTokenSecretManagerTest {
 
     // 1 minute
     long tokenLifetime = 60 * 1000;
-    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(instance,
-        tokenLifetime);
+    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(
+        instanceId, tokenLifetime);
 
     // Add a current key
     secretManager
@@ -228,8 +216,8 @@ public class AuthenticationTokenSecretManagerTest {
 
     // 500ms lifetime
     long tokenLifetime = 500;
-    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(instance,
-        tokenLifetime);
+    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(
+        instanceId, tokenLifetime);
 
     // Add a current key
     secretManager
@@ -256,8 +244,8 @@ public class AuthenticationTokenSecretManagerTest {
     long then = System.currentTimeMillis();
 
     long tokenLifetime = 60 * 1000;
-    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(instance,
-        tokenLifetime);
+    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(
+        instanceId, tokenLifetime);
 
     // Add a current key
     secretManager
@@ -284,8 +272,8 @@ public class AuthenticationTokenSecretManagerTest {
     long then = System.currentTimeMillis();
 
     long tokenLifetime = 60 * 1000;
-    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(instance,
-        tokenLifetime);
+    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(
+        instanceId, tokenLifetime);
 
     // Add a current key
     AuthenticationKey authKey1 = new AuthenticationKey(1, then, then + tokenLifetime,
@@ -322,8 +310,8 @@ public class AuthenticationTokenSecretManagerTest {
 
     // 10s lifetime
     long tokenLifetime = 10 * 1000L;
-    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(instance,
-        tokenLifetime);
+    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(
+        instanceId, tokenLifetime);
 
     // Make 2 keys, and add only one. The second has double the expiration of the first
     AuthenticationKey authKey1 = new AuthenticationKey(1, then, then + tokenLifetime,
@@ -372,8 +360,8 @@ public class AuthenticationTokenSecretManagerTest {
 
     // 1 hr
     long tokenLifetime = 60 * 60 * 1000;
-    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(instance,
-        tokenLifetime);
+    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(
+        instanceId, tokenLifetime);
 
     // Add a current key
     secretManager
@@ -408,8 +396,8 @@ public class AuthenticationTokenSecretManagerTest {
 
     // 1 hr
     long tokenLifetime = 60 * 60 * 1000;
-    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(instance,
-        tokenLifetime);
+    AuthenticationTokenSecretManager secretManager = new AuthenticationTokenSecretManager(
+        instanceId, tokenLifetime);
 
     // Add a current key
     secretManager
diff --git a/server/base/src/test/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcherTest.java b/server/base/src/test/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcherTest.java
index 8df552f..b0e379c 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcherTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcherTest.java
@@ -37,7 +37,6 @@ import java.util.UUID;
 import javax.crypto.KeyGenerator;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
 import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.apache.zookeeper.WatchedEvent;
@@ -62,7 +61,6 @@ public class ZooAuthenticationKeyWatcherTest {
   }
 
   private ZooReader zk;
-  private Instance instance;
   private String instanceId;
   private String baseNode;
   private long tokenLifetime = 7 * 24 * 60 * 60 * 1000; // 7days
@@ -72,11 +70,9 @@ public class ZooAuthenticationKeyWatcherTest {
   @Before
   public void setupMocks() {
     zk = createMock(ZooReader.class);
-    instance = createMock(Instance.class);
     instanceId = UUID.randomUUID().toString();
     baseNode = "/accumulo/" + instanceId + Constants.ZDELEGATION_TOKEN_KEYS;
-    expect(instance.getInstanceID()).andReturn(instanceId).anyTimes();
-    secretManager = new AuthenticationTokenSecretManager(instance, tokenLifetime);
+    secretManager = new AuthenticationTokenSecretManager(instanceId, tokenLifetime);
     keyWatcher = new ZooAuthenticationKeyWatcher(secretManager, zk, baseNode);
   }
 
@@ -85,11 +81,11 @@ public class ZooAuthenticationKeyWatcherTest {
     WatchedEvent event = new WatchedEvent(EventType.NodeCreated, null, baseNode);
 
     expect(zk.getChildren(baseNode, keyWatcher)).andReturn(Collections.emptyList());
-    replay(instance, zk);
+    replay(zk);
 
     keyWatcher.process(event);
 
-    verify(instance, zk);
+    verify(zk);
     assertTrue(secretManager.getKeys().isEmpty());
   }
 
@@ -104,11 +100,11 @@ public class ZooAuthenticationKeyWatcherTest {
     expect(zk.getChildren(baseNode, keyWatcher)).andReturn(children);
     expect(zk.getData(baseNode + "/1", keyWatcher, null)).andReturn(serializedKey1);
     expect(zk.getData(baseNode + "/2", keyWatcher, null)).andReturn(serializedKey2);
-    replay(instance, zk);
+    replay(zk);
 
     keyWatcher.process(event);
 
-    verify(instance, zk);
+    verify(zk);
     assertEquals(2, secretManager.getKeys().size());
     assertEquals(key1, secretManager.getKeys().get(key1.getKeyId()));
     assertEquals(key2, secretManager.getKeys().get(key2.getKeyId()));
@@ -125,11 +121,11 @@ public class ZooAuthenticationKeyWatcherTest {
     expect(zk.getChildren(baseNode, keyWatcher)).andReturn(children);
     expect(zk.getData(baseNode + "/1", keyWatcher, null)).andReturn(serializedKey1);
     expect(zk.getData(baseNode + "/2", keyWatcher, null)).andReturn(serializedKey2);
-    replay(instance, zk);
+    replay(zk);
 
     keyWatcher.process(event);
 
-    verify(instance, zk);
+    verify(zk);
     assertEquals(2, secretManager.getKeys().size());
     assertEquals(key1, secretManager.getKeys().get(key1.getKeyId()));
     assertEquals(key2, secretManager.getKeys().get(key2.getKeyId()));
@@ -145,11 +141,11 @@ public class ZooAuthenticationKeyWatcherTest {
     secretManager.addKey(key2);
     assertEquals(2, secretManager.getKeys().size());
 
-    replay(instance, zk);
+    replay(zk);
 
     keyWatcher.process(event);
 
-    verify(instance, zk);
+    verify(zk);
     assertEquals(0, secretManager.getKeys().size());
     assertFalse(secretManager.isCurrentKeySet());
   }
@@ -158,11 +154,11 @@ public class ZooAuthenticationKeyWatcherTest {
   public void testBaseNodeDataChanged() throws Exception {
     WatchedEvent event = new WatchedEvent(EventType.NodeDataChanged, null, baseNode);
 
-    replay(instance, zk);
+    replay(zk);
 
     keyWatcher.process(event);
 
-    verify(instance, zk);
+    verify(zk);
     assertEquals(0, secretManager.getKeys().size());
     assertFalse(secretManager.isCurrentKeySet());
   }
@@ -177,11 +173,11 @@ public class ZooAuthenticationKeyWatcherTest {
     byte[] serializedKey2 = serialize(key2);
 
     expect(zk.getData(event.getPath(), keyWatcher, null)).andReturn(serializedKey2);
-    replay(instance, zk);
+    replay(zk);
 
     keyWatcher.process(event);
 
-    verify(instance, zk);
+    verify(zk);
     assertEquals(2, secretManager.getKeys().size());
     assertEquals(key1, secretManager.getKeys().get(key1.getKeyId()));
     assertEquals(key2, secretManager.getKeys().get(key2.getKeyId()));
@@ -197,11 +193,11 @@ public class ZooAuthenticationKeyWatcherTest {
     secretManager.addKey(key2);
     assertEquals(2, secretManager.getKeys().size());
 
-    replay(instance, zk);
+    replay(zk);
 
     keyWatcher.process(event);
 
-    verify(instance, zk);
+    verify(zk);
     assertEquals(1, secretManager.getKeys().size());
     assertEquals(key2, secretManager.getKeys().get(key2.getKeyId()));
     assertEquals(key2, secretManager.getCurrentKey());
@@ -216,12 +212,12 @@ public class ZooAuthenticationKeyWatcherTest {
     secretManager.addKey(key2);
     assertEquals(2, secretManager.getKeys().size());
 
-    replay(instance, zk);
+    replay(zk);
 
     // Does nothing
     keyWatcher.process(event);
 
-    verify(instance, zk);
+    verify(zk);
     assertEquals(2, secretManager.getKeys().size());
     assertEquals(key1, secretManager.getKeys().get(key1.getKeyId()));
     assertEquals(key2, secretManager.getKeys().get(key2.getKeyId()));
@@ -232,11 +228,11 @@ public class ZooAuthenticationKeyWatcherTest {
   public void testInitialUpdateNoNode() throws Exception {
     expect(zk.exists(baseNode, keyWatcher)).andReturn(false);
 
-    replay(zk, instance);
+    replay(zk);
 
     keyWatcher.updateAuthKeys();
 
-    verify(zk, instance);
+    verify(zk);
     assertEquals(0, secretManager.getKeys().size());
     assertNull(secretManager.getCurrentKey());
   }
@@ -254,11 +250,11 @@ public class ZooAuthenticationKeyWatcherTest {
     expect(zk.getData(baseNode + "/" + key2.getKeyId(), keyWatcher, null))
         .andReturn(serialize(key2));
 
-    replay(zk, instance);
+    replay(zk);
 
     keyWatcher.updateAuthKeys();
 
-    verify(zk, instance);
+    verify(zk);
 
     assertEquals(2, secretManager.getKeys().size());
     assertEquals(key1, secretManager.getKeys().get(key1.getKeyId()));
@@ -291,20 +287,20 @@ public class ZooAuthenticationKeyWatcherTest {
     expect(zk.getData(baseNode + "/" + key2.getKeyId(), keyWatcher, null))
         .andReturn(serialize(key2));
 
-    replay(zk, instance);
+    replay(zk);
 
     // Initialize and then get disconnected
     keyWatcher.updateAuthKeys();
     keyWatcher.process(disconnectEvent);
 
-    verify(zk, instance);
+    verify(zk);
 
     // We should have no auth keys when we're disconnected
     assertEquals("Secret manager should be empty after a disconnect", 0,
         secretManager.getKeys().size());
     assertNull("Current key should be null", secretManager.getCurrentKey());
 
-    reset(zk, instance);
+    reset(zk);
 
     expect(zk.exists(baseNode, keyWatcher)).andReturn(true);
     expect(zk.getChildren(baseNode, keyWatcher)).andReturn(children);
@@ -313,12 +309,12 @@ public class ZooAuthenticationKeyWatcherTest {
     expect(zk.getData(baseNode + "/" + key2.getKeyId(), keyWatcher, null))
         .andReturn(serialize(key2));
 
-    replay(zk, instance);
+    replay(zk);
 
     // Reconnect again, get all the keys
     keyWatcher.process(reconnectEvent);
 
-    verify(zk, instance);
+    verify(zk);
 
     // Verify we have both keys
     assertEquals(2, secretManager.getKeys().size());
@@ -338,12 +334,12 @@ public class ZooAuthenticationKeyWatcherTest {
     expect(zk.getData(baseNode + "/" + key1.getKeyId(), keyWatcher, null))
         .andThrow(new NoNodeException());
 
-    replay(zk, instance);
+    replay(zk);
 
     // Initialize
     keyWatcher.updateAuthKeys();
 
-    verify(zk, instance);
+    verify(zk);
 
     // We should have no auth keys after initializing things
     assertEquals("Secret manager should be empty after a disconnect", 0,
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/AdminTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/AdminTest.java
index b533a79..ac98736 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/AdminTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/AdminTest.java
@@ -22,7 +22,7 @@ import java.util.Collections;
 import java.util.UUID;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.fate.zookeeper.ZooCache.ZcStat;
 import org.easymock.EasyMock;
@@ -32,17 +32,17 @@ public class AdminTest {
 
   @Test
   public void testZooKeeperTserverPath() {
-    Instance instance = EasyMock.createMock(Instance.class);
+    ClientContext context = EasyMock.createMock(ClientContext.class);
     String instanceId = UUID.randomUUID().toString();
 
-    EasyMock.expect(instance.getInstanceID()).andReturn(instanceId);
+    EasyMock.expect(context.getZooKeeperRoot()).andReturn(Constants.ZROOT + "/" + instanceId);
 
-    EasyMock.replay(instance);
+    EasyMock.replay(context);
 
     assertEquals(Constants.ZROOT + "/" + instanceId + Constants.ZTSERVERS,
-        Admin.getTServersZkPath(instance));
+        Admin.getTServersZkPath(context));
 
-    EasyMock.verify(instance);
+    EasyMock.verify(context);
   }
 
   @Test
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/TServerUtilsTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/TServerUtilsTest.java
index 2003b38..4806602 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/TServerUtilsTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/TServerUtilsTest.java
@@ -29,79 +29,36 @@ import java.io.IOException;
 import java.net.InetAddress;
 import java.net.ServerSocket;
 import java.net.UnknownHostException;
-import java.util.List;
+import java.util.Properties;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.thrift.ClientService.Iface;
 import org.apache.accumulo.core.client.impl.thrift.ClientService.Processor;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.ConfigurationCopy;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.trace.wrappers.TraceWrap;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.client.ClientServiceHandler;
 import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.rpc.ServerAddress;
 import org.apache.accumulo.server.rpc.TServerUtils;
+import org.apache.accumulo.server.rpc.ThriftServerType;
 import org.apache.thrift.server.TServer;
 import org.apache.thrift.transport.TServerSocket;
+import org.easymock.EasyMock;
 import org.junit.After;
 import org.junit.Test;
 
 public class TServerUtilsTest {
 
-  protected static class TestInstance implements Instance {
-
-    @Override
-    public String getRootTabletLocation() {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public List<String> getMasterLocations() {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public String getInstanceID() {
-      return "1111";
-    }
-
-    @Override
-    public String getInstanceName() {
-      return "test";
-    }
-
-    @Override
-    public String getZooKeepers() {
-      return "";
-    }
-
-    @Override
-    public int getZooKeepersSessionTimeOut() {
-      return 30;
-    }
-
-    @Override
-    public Connector getConnector(String principal, AuthenticationToken token)
-        throws AccumuloException, AccumuloSecurityException {
-      throw new UnsupportedOperationException();
-    }
-
-  }
-
   protected static class TestServerConfigurationFactory extends ServerConfigurationFactory {
 
     private ConfigurationCopy conf = null;
 
-    public TestServerConfigurationFactory(Instance instance) {
-      super(instance);
+    public TestServerConfigurationFactory(ServerContext context) {
+      super(context);
       conf = new ConfigurationCopy(DefaultConfiguration.getInstance());
     }
 
@@ -163,9 +120,24 @@ public class TServerUtilsTest {
     // not dying is enough
   }
 
-  private static final TestInstance instance = new TestInstance();
+  private static ServerContext createMockContext() {
+    ServerContext context = EasyMock.createMock(ServerContext.class);
+    expect(context.getProperties()).andReturn(new Properties()).anyTimes();
+    expect(context.getZooKeepers()).andReturn("").anyTimes();
+    expect(context.getInstanceName()).andReturn("instance").anyTimes();
+    expect(context.getZooKeepersSessionTimeOut()).andReturn(1).anyTimes();
+    expect(context.getInstanceID()).andReturn("11111").anyTimes();
+    return context;
+  }
+
+  private static ServerContext createReplayMockInfo() {
+    ServerContext context = createMockContext();
+    replay(context);
+    return context;
+  }
+
   private static final TestServerConfigurationFactory factory = new TestServerConfigurationFactory(
-      instance);
+      createReplayMockInfo());
 
   @After
   public void resetProperty() {
@@ -286,7 +258,6 @@ public class TServerUtilsTest {
       if (null != server) {
         TServerUtils.stopTServer(server);
       }
-
     }
   }
 
@@ -318,7 +289,16 @@ public class TServerUtilsTest {
   }
 
   private ServerAddress startServer() throws Exception {
-    AccumuloServerContext ctx = new AccumuloServerContext(instance, factory);
+    ServerContext context = createMockContext();
+    expect(context.getServerConfFactory()).andReturn(factory).anyTimes();
+    ServerContext ctx = createMock(ServerContext.class);
+    expect(ctx.getInstanceID()).andReturn("instance").anyTimes();
+    expect(ctx.getConfiguration()).andReturn(factory.getSystemConfiguration());
+    expect(ctx.getThriftServerType()).andReturn(ThriftServerType.THREADPOOL);
+    expect(ctx.getServerSslParams()).andReturn(null).anyTimes();
+    expect(ctx.getSaslParams()).andReturn(null).anyTimes();
+    expect(ctx.getClientTimeoutInMillis()).andReturn((long) 1000).anyTimes();
+    replay(ctx, context);
     ClientServiceHandler clientHandler = new ClientServiceHandler(ctx, null, null);
     Iface rpcProxy = TraceWrap.service(clientHandler);
     Processor<Iface> processor = new Processor<>(rpcProxy);
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
index 4bde774..c81d267 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
@@ -46,8 +46,8 @@ import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.trace.Span;
 import org.apache.accumulo.core.trace.Trace;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.log.WalStateManager;
 import org.apache.accumulo.server.log.WalStateManager.WalMarkerException;
@@ -73,7 +73,7 @@ import com.google.common.collect.Iterators;
 public class GarbageCollectWriteAheadLogs {
   private static final Logger log = LoggerFactory.getLogger(GarbageCollectWriteAheadLogs.class);
 
-  private final AccumuloServerContext context;
+  private final ServerContext context;
   private final VolumeManager fs;
   private final boolean useTrash;
   private final LiveTServerSet liveServers;
@@ -90,8 +90,8 @@ public class GarbageCollectWriteAheadLogs {
    * @param useTrash
    *          true to move files to trash rather than delete them
    */
-  GarbageCollectWriteAheadLogs(final AccumuloServerContext context, VolumeManager fs,
-      boolean useTrash) throws IOException {
+  GarbageCollectWriteAheadLogs(final ServerContext context, VolumeManager fs, boolean useTrash)
+      throws IOException {
     this.context = context;
     this.fs = fs;
     this.useTrash = useTrash;
@@ -127,7 +127,7 @@ public class GarbageCollectWriteAheadLogs {
    *          a started LiveTServerSet instance
    */
   @VisibleForTesting
-  GarbageCollectWriteAheadLogs(AccumuloServerContext context, VolumeManager fs, boolean useTrash,
+  GarbageCollectWriteAheadLogs(ServerContext context, VolumeManager fs, boolean useTrash,
       LiveTServerSet liveTServerSet, WalStateManager walMarker, Iterable<TabletLocationState> store)
       throws IOException {
     this.context = context;
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
index 35b0893..18da383 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
@@ -38,15 +38,14 @@ import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.IsolatedScanner;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.conf.SiteConfiguration;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.PartialKey;
@@ -67,7 +66,6 @@ import org.apache.accumulo.core.replication.ReplicationTable;
 import org.apache.accumulo.core.replication.ReplicationTableOfflineException;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.trace.DistributedTrace;
 import org.apache.accumulo.core.trace.ProbabilitySampler;
 import org.apache.accumulo.core.trace.Span;
 import org.apache.accumulo.core.trace.Trace;
@@ -83,23 +81,17 @@ import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
 import org.apache.accumulo.fate.zookeeper.ZooLock.LockWatcher;
 import org.apache.accumulo.gc.replication.CloseWriteAheadLogReferences;
-import org.apache.accumulo.server.Accumulo;
-import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.ServerOpts;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManager.FileType;
-import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.fs.VolumeUtil;
-import org.apache.accumulo.server.metrics.MetricsSystemHelper;
 import org.apache.accumulo.server.replication.proto.Replication.Status;
 import org.apache.accumulo.server.rpc.ServerAddress;
 import org.apache.accumulo.server.rpc.TCredentialsUpdatingWrapper;
 import org.apache.accumulo.server.rpc.TServerUtils;
 import org.apache.accumulo.server.rpc.ThriftServerType;
-import org.apache.accumulo.server.security.SecurityUtil;
 import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.util.Halt;
 import org.apache.accumulo.server.zookeeper.ZooLock;
@@ -118,7 +110,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
 // Could/Should implement HighlyAvaialbleService but the Thrift server is already started before
 // the ZK lock is acquired. The server is only for metrics, there are no concerns about clients
 // using the service before the lock is acquired.
-public class SimpleGarbageCollector extends AccumuloServerContext implements Iface {
+public class SimpleGarbageCollector implements Iface {
   private static final Text EMPTY_TEXT = new Text();
 
   /**
@@ -140,46 +132,32 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
 
   private static final Logger log = LoggerFactory.getLogger(SimpleGarbageCollector.class);
 
+  private ServerContext context;
   private VolumeManager fs;
-  private Opts opts = new Opts();
+  private Opts opts;
   private ZooLock lock;
 
   private GCStatus status = new GCStatus(new GcCycleStats(), new GcCycleStats(), new GcCycleStats(),
       new GcCycleStats());
 
-  public static void main(String[] args) throws IOException {
+  public static void main(String[] args) {
     final String app = "gc";
     Opts opts = new Opts();
     opts.parseArgs(app, args);
-    SecurityUtil.serverLogin(SiteConfiguration.getInstance());
-    Instance instance = HdfsZooInstance.getInstance();
-    ServerConfigurationFactory conf = new ServerConfigurationFactory(instance);
-    log.info("Version " + Constants.VERSION);
-    log.info("Instance " + instance.getInstanceID());
-    final VolumeManager fs = VolumeManagerImpl.get();
-    MetricsSystemHelper.configure(SimpleGarbageCollector.class.getSimpleName());
-    Accumulo.init(fs, instance, conf, app);
-    SimpleGarbageCollector gc = new SimpleGarbageCollector(opts, instance, fs, conf);
-
-    DistributedTrace.enable(opts.getAddress(), app, conf.getSystemConfiguration());
+    ServerContext context = ServerContext.getInstance();
+    context.setupServer(app, SimpleGarbageCollector.class.getName(), opts.getAddress());
     try {
+      SimpleGarbageCollector gc = new SimpleGarbageCollector(opts, context);
       gc.run();
     } finally {
-      DistributedTrace.disable();
+      context.teardownServer();
     }
   }
 
-  /**
-   * Creates a new garbage collector.
-   *
-   * @param opts
-   *          options
-   */
-  public SimpleGarbageCollector(Opts opts, Instance instance, VolumeManager fs,
-      ServerConfigurationFactory confFactory) {
-    super(instance, confFactory);
+  public SimpleGarbageCollector(Opts opts, ServerContext context) {
+    this.context = context;
     this.opts = opts;
-    this.fs = fs;
+    this.fs = context.getVolumeManager();
 
     long gcDelay = getConfiguration().getTimeInMillis(Property.GC_CYCLE_DELAY);
     log.info("start delay: {} milliseconds", getStartDelay());
@@ -191,6 +169,18 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
     log.info("delete threads: {}", getNumDeleteThreads());
   }
 
+  ServerContext getContext() {
+    return context;
+  }
+
+  AccumuloConfiguration getConfiguration() {
+    return context.getConfiguration();
+  }
+
+  Connector getConnector() throws AccumuloSecurityException, AccumuloException {
+    return context.getConnector();
+  }
+
   /**
    * Gets the delay before the first collection.
    *
@@ -317,7 +307,7 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
 
     @Override
     public Set<Table.ID> getTableIDs() {
-      return Tables.getIdToNameMap(SimpleGarbageCollector.this).keySet();
+      return Tables.getIdToNameMap(context).keySet();
     }
 
     @Override
@@ -585,7 +575,7 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
       // before running GarbageCollectWriteAheadLogs to ensure we delete as many files as possible.
       Span replSpan = Trace.start("replicationClose");
       try {
-        CloseWriteAheadLogReferences closeWals = new CloseWriteAheadLogReferences(this);
+        CloseWriteAheadLogReferences closeWals = new CloseWriteAheadLogReferences(context);
         closeWals.run();
       } catch (Exception e) {
         log.error("Error trying to close write-ahead logs for replication table", e);
@@ -596,7 +586,7 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
       // Clean up any unused write-ahead logs
       Span waLogs = Trace.start("walogs");
       try {
-        GarbageCollectWriteAheadLogs walogCollector = new GarbageCollectWriteAheadLogs(this, fs,
+        GarbageCollectWriteAheadLogs walogCollector = new GarbageCollectWriteAheadLogs(context, fs,
             isUsingTrash());
         log.info("Beginning garbage collection of write-ahead logs");
         walogCollector.collect(status);
@@ -704,7 +694,7 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
   }
 
   private void getZooLock(HostAndPort addr) throws KeeperException, InterruptedException {
-    String path = ZooUtil.getRoot(getInstance()) + Constants.ZGC_LOCK;
+    String path = ZooUtil.getRoot(context.getInstanceID()) + Constants.ZGC_LOCK;
 
     LockWatcher lockWatcher = new LockWatcher() {
       @Override
@@ -741,7 +731,7 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
   private HostAndPort startStatsService() throws UnknownHostException {
     Iface rpcProxy = TraceWrap.service(this);
     final Processor<Iface> processor;
-    if (ThriftServerType.SASL == getThriftServerType()) {
+    if (ThriftServerType.SASL == context.getThriftServerType()) {
       Iface tcProxy = TCredentialsUpdatingWrapper.service(rpcProxy, getClass(), getConfiguration());
       processor = new Processor<>(tcProxy);
     } else {
@@ -751,10 +741,11 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
     HostAndPort[] addresses = TServerUtils.getHostAndPorts(this.opts.getAddress(), port);
     long maxMessageSize = getConfiguration().getAsBytes(Property.GENERAL_MAX_MESSAGE_SIZE);
     try {
-      ServerAddress server = TServerUtils.startTServer(getConfiguration(), getThriftServerType(),
-          processor, this.getClass().getSimpleName(), "GC Monitor Service", 2,
+      ServerAddress server = TServerUtils.startTServer(getConfiguration(),
+          context.getThriftServerType(), processor, this.getClass().getSimpleName(),
+          "GC Monitor Service", 2,
           getConfiguration().getCount(Property.GENERAL_SIMPLETIMER_THREADPOOL_SIZE), 1000,
-          maxMessageSize, getServerSslParams(), getSaslParams(), 0, addresses);
+          maxMessageSize, context.getServerSslParams(), context.getSaslParams(), 0, addresses);
       log.debug("Starting garbage collector listening on " + server.address);
       return server.address;
     } catch (Exception ex) {
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
index 3f0eecf..22ca046 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
@@ -45,7 +45,7 @@ import org.apache.accumulo.core.trace.Span;
 import org.apache.accumulo.core.trace.Trace;
 import org.apache.accumulo.core.trace.thrift.TInfo;
 import org.apache.accumulo.core.util.HostAndPort;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.log.WalStateManager;
 import org.apache.accumulo.server.log.WalStateManager.WalMarkerException;
 import org.apache.accumulo.server.log.WalStateManager.WalState;
@@ -74,9 +74,9 @@ public class CloseWriteAheadLogReferences implements Runnable {
 
   private static final String RFILE_SUFFIX = "." + RFile.EXTENSION;
 
-  private final AccumuloServerContext context;
+  private final ServerContext context;
 
-  public CloseWriteAheadLogReferences(AccumuloServerContext context) {
+  public CloseWriteAheadLogReferences(ServerContext context) {
     this.context = context;
   }
 
diff --git a/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java b/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java
index 19259b2..a5765e8 100644
--- a/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java
+++ b/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java
@@ -38,7 +38,7 @@ import org.apache.accumulo.core.replication.ReplicationSchema;
 import org.apache.accumulo.core.replication.ReplicationTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.log.WalStateManager;
 import org.apache.accumulo.server.log.WalStateManager.WalState;
@@ -84,7 +84,7 @@ public class GarbageCollectWriteAheadLogsTest {
 
   @Test
   public void testRemoveUnusedLog() throws Exception {
-    AccumuloServerContext context = EasyMock.createMock(AccumuloServerContext.class);
+    ServerContext context = EasyMock.createMock(ServerContext.class);
     VolumeManager fs = EasyMock.createMock(VolumeManager.class);
     WalStateManager marker = EasyMock.createMock(WalStateManager.class);
     LiveTServerSet tserverSet = EasyMock.createMock(LiveTServerSet.class);
@@ -118,7 +118,7 @@ public class GarbageCollectWriteAheadLogsTest {
 
   @Test
   public void testKeepClosedLog() throws Exception {
-    AccumuloServerContext context = EasyMock.createMock(AccumuloServerContext.class);
+    ServerContext context = EasyMock.createMock(ServerContext.class);
     VolumeManager fs = EasyMock.createMock(VolumeManager.class);
     WalStateManager marker = EasyMock.createMock(WalStateManager.class);
     LiveTServerSet tserverSet = EasyMock.createMock(LiveTServerSet.class);
@@ -148,7 +148,7 @@ public class GarbageCollectWriteAheadLogsTest {
 
   @Test
   public void deleteUnreferenceLogOnDeadServer() throws Exception {
-    AccumuloServerContext context = EasyMock.createMock(AccumuloServerContext.class);
+    ServerContext context = EasyMock.createMock(ServerContext.class);
     VolumeManager fs = EasyMock.createMock(VolumeManager.class);
     WalStateManager marker = EasyMock.createMock(WalStateManager.class);
     LiveTServerSet tserverSet = EasyMock.createMock(LiveTServerSet.class);
@@ -194,7 +194,7 @@ public class GarbageCollectWriteAheadLogsTest {
 
   @Test
   public void ignoreReferenceLogOnDeadServer() throws Exception {
-    AccumuloServerContext context = EasyMock.createMock(AccumuloServerContext.class);
+    ServerContext context = EasyMock.createMock(ServerContext.class);
     VolumeManager fs = EasyMock.createMock(VolumeManager.class);
     WalStateManager marker = EasyMock.createMock(WalStateManager.class);
     LiveTServerSet tserverSet = EasyMock.createMock(LiveTServerSet.class);
@@ -235,7 +235,7 @@ public class GarbageCollectWriteAheadLogsTest {
 
   @Test
   public void replicationDelaysFileCollection() throws Exception {
-    AccumuloServerContext context = EasyMock.createMock(AccumuloServerContext.class);
+    ServerContext context = EasyMock.createMock(ServerContext.class);
     VolumeManager fs = EasyMock.createMock(VolumeManager.class);
     WalStateManager marker = EasyMock.createMock(WalStateManager.class);
     LiveTServerSet tserverSet = EasyMock.createMock(LiveTServerSet.class);
diff --git a/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java b/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java
index 848ed3b..5d82004 100644
--- a/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java
+++ b/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java
@@ -32,25 +32,22 @@ import java.io.FileNotFoundException;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Credentials;
 import org.apache.accumulo.core.conf.ConfigurationCopy;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.conf.SiteConfiguration;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.trace.thrift.TInfo;
 import org.apache.accumulo.gc.SimpleGarbageCollector.Opts;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.hadoop.fs.Path;
-import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
 
 public class SimpleGarbageCollectorTest {
   private VolumeManager volMgr;
-  private Instance instance;
+  private ServerContext context;
   private Credentials credentials;
   private Opts opts;
   private SimpleGarbageCollector gc;
@@ -59,35 +56,24 @@ public class SimpleGarbageCollectorTest {
   @Before
   public void setUp() {
     volMgr = createMock(VolumeManager.class);
-    instance = createMock(Instance.class);
-    SiteConfiguration siteConfig = EasyMock.createMock(SiteConfiguration.class);
-    expect(instance.getInstanceID()).andReturn("mock").anyTimes();
-    expect(instance.getZooKeepers()).andReturn("localhost").anyTimes();
-    expect(instance.getZooKeepersSessionTimeOut()).andReturn(30000).anyTimes();
+    context = createMock(ServerContext.class);
+    expect(context.getInstanceID()).andReturn("mock").anyTimes();
+    expect(context.getZooKeepers()).andReturn("localhost").anyTimes();
+    expect(context.getZooKeepersSessionTimeOut()).andReturn(30000).anyTimes();
 
     opts = new Opts();
     systemConfig = createSystemConfig();
-    ServerConfigurationFactory factory = createMock(ServerConfigurationFactory.class);
-    expect(factory.getSystemConfiguration()).andReturn(systemConfig).anyTimes();
-    expect(factory.getSiteConfiguration()).andReturn(siteConfig).anyTimes();
-
-    // Just make the SiteConfiguration delegate to our AccumuloConfiguration
-    // Presently, we only need get(Property) and iterator().
-    EasyMock.expect(siteConfig.get(EasyMock.anyObject(Property.class))).andAnswer(() -> {
-      Object[] args = EasyMock.getCurrentArguments();
-      return systemConfig.get((Property) args[0]);
-    }).anyTimes();
-    EasyMock.expect(siteConfig.getBoolean(EasyMock.anyObject(Property.class))).andAnswer(() -> {
-      Object[] args = EasyMock.getCurrentArguments();
-      return systemConfig.getBoolean((Property) args[0]);
-    }).anyTimes();
-
-    EasyMock.expect(siteConfig.iterator()).andAnswer(() -> systemConfig.iterator()).anyTimes();
-
-    replay(instance, factory, siteConfig);
-
-    credentials = SystemCredentials.get(instance);
-    gc = new SimpleGarbageCollector(opts, instance, volMgr, factory);
+    expect(context.getConfiguration()).andReturn(systemConfig).anyTimes();
+    expect(context.getVolumeManager()).andReturn(volMgr).anyTimes();
+
+    credentials = SystemCredentials.get("mock");
+    expect(context.getPrincipal()).andReturn(credentials.getPrincipal()).anyTimes();
+    expect(context.getAuthenticationToken()).andReturn(credentials.getToken()).anyTimes();
+    expect(context.getCredentials()).andReturn(credentials).anyTimes();
+
+    replay(context);
+
+    gc = new SimpleGarbageCollector(opts, context);
   }
 
   @Test
@@ -111,8 +97,7 @@ public class SimpleGarbageCollectorTest {
   @Test
   public void testInit() throws Exception {
     assertSame(volMgr, gc.getVolumeManager());
-    assertSame(instance, gc.getInstance());
-    assertEquals(credentials, gc.getCredentials());
+    assertEquals(credentials, gc.getContext().getCredentials());
     assertTrue(gc.isUsingTrash());
     assertEquals(1000L, gc.getStartDelay());
     assertEquals(2, gc.getNumDeleteThreads());
diff --git a/server/master/src/main/findbugs/exclude-filter.xml b/server/master/src/main/findbugs/exclude-filter.xml
index 32f3c4b..1bce781 100644
--- a/server/master/src/main/findbugs/exclude-filter.xml
+++ b/server/master/src/main/findbugs/exclude-filter.xml
@@ -29,7 +29,7 @@
   <Match>
     <!-- shouldn't close the given I/O streams, because they are just wrapped -->
     <Class name="org.apache.accumulo.master.tableOps.WriteExportFiles" />
-    <Method name="exportTable" params="org.apache.accumulo.server.fs.VolumeManager,org.apache.accumulo.server.AccumuloServerContext,java.lang.String,java.lang.String,java.lang.String" returns="void" />
+    <Method name="exportTable" params="org.apache.accumulo.server.fs.VolumeManager,org.apache.accumulo.server.ServerContext,java.lang.String,java.lang.String,java.lang.String" returns="void" />
     <Bug code="OS" pattern="OS_OPEN_STREAM" />
   </Match>
   <Match>
diff --git a/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java b/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java
index 864beef..cda934c 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java
@@ -118,7 +118,8 @@ class FateServiceHandler implements FateService.Iface {
             Namespaces.NOT_DEFAULT.and(Namespaces.NOT_ACCUMULO));
         String newName = validateNamespaceArgument(arguments.get(1), tableOp, null);
 
-        Namespace.ID namespaceId = ClientServiceHandler.checkNamespaceId(master, oldName, tableOp);
+        Namespace.ID namespaceId = ClientServiceHandler.checkNamespaceId(master.getContext(),
+            oldName, tableOp);
         if (!master.security.canRenameNamespace(c, namespaceId, oldName, newName))
           throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
 
@@ -131,8 +132,8 @@ class FateServiceHandler implements FateService.Iface {
         String namespace = validateNamespaceArgument(arguments.get(0), tableOp,
             Namespaces.NOT_DEFAULT.and(Namespaces.NOT_ACCUMULO));
 
-        Namespace.ID namespaceId = ClientServiceHandler.checkNamespaceId(master, namespace,
-            tableOp);
+        Namespace.ID namespaceId = ClientServiceHandler.checkNamespaceId(master.getContext(),
+            namespace, tableOp);
         if (!master.security.canDeleteNamespace(c, namespaceId))
           throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
 
@@ -148,7 +149,8 @@ class FateServiceHandler implements FateService.Iface {
         Namespace.ID namespaceId;
 
         try {
-          namespaceId = Namespaces.getNamespaceId(master, Tables.qualify(tableName).getFirst());
+          namespaceId = Namespaces.getNamespaceId(master.getContext(),
+              Tables.qualify(tableName).getFirst());
         } catch (NamespaceNotFoundException e) {
           throw new ThriftTableOperationException(null, tableName, tableOp,
               TableOperationExceptionType.NAMESPACE_NOTFOUND, "");
@@ -186,7 +188,8 @@ class FateServiceHandler implements FateService.Iface {
 
             });
 
-        Table.ID tableId = ClientServiceHandler.checkTableId(master, oldTableName, tableOp);
+        Table.ID tableId = ClientServiceHandler.checkTableId(master.getContext(), oldTableName,
+            tableOp);
         Namespace.ID namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
 
         final boolean canRename;
@@ -218,7 +221,8 @@ class FateServiceHandler implements FateService.Iface {
         String tableName = validateTableNameArgument(arguments.get(1), tableOp, NOT_SYSTEM);
         Namespace.ID namespaceId;
         try {
-          namespaceId = Namespaces.getNamespaceId(master, Tables.qualify(tableName).getFirst());
+          namespaceId = Namespaces.getNamespaceId(master.getContext(),
+              Tables.qualify(tableName).getFirst());
         } catch (NamespaceNotFoundException e) {
           // shouldn't happen, but possible once cloning between namespaces is supported
           throw new ThriftTableOperationException(null, tableName, tableOp,
@@ -266,7 +270,8 @@ class FateServiceHandler implements FateService.Iface {
         TableOperation tableOp = TableOperation.DELETE;
         String tableName = validateTableNameArgument(arguments.get(0), tableOp, NOT_SYSTEM);
 
-        final Table.ID tableId = ClientServiceHandler.checkTableId(master, tableName, tableOp);
+        final Table.ID tableId = ClientServiceHandler.checkTableId(master.getContext(), tableName,
+            tableOp);
         Namespace.ID namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
 
         final boolean canDeleteTable;
@@ -331,7 +336,8 @@ class FateServiceHandler implements FateService.Iface {
         Text startRow = ByteBufferUtil.toText(arguments.get(1));
         Text endRow = ByteBufferUtil.toText(arguments.get(2));
 
-        final Table.ID tableId = ClientServiceHandler.checkTableId(master, tableName, tableOp);
+        final Table.ID tableId = ClientServiceHandler.checkTableId(master.getContext(), tableName,
+            tableOp);
         Namespace.ID namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
 
         final boolean canMerge;
@@ -357,7 +363,8 @@ class FateServiceHandler implements FateService.Iface {
         Text startRow = ByteBufferUtil.toText(arguments.get(1));
         Text endRow = ByteBufferUtil.toText(arguments.get(2));
 
-        final Table.ID tableId = ClientServiceHandler.checkTableId(master, tableName, tableOp);
+        final Table.ID tableId = ClientServiceHandler.checkTableId(master.getContext(), tableName,
+            tableOp);
         Namespace.ID namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
 
         final boolean canDeleteRange;
@@ -384,7 +391,8 @@ class FateServiceHandler implements FateService.Iface {
         String failDir = ByteBufferUtil.toString(arguments.get(2));
         boolean setTime = Boolean.parseBoolean(ByteBufferUtil.toString(arguments.get(3)));
 
-        final Table.ID tableId = ClientServiceHandler.checkTableId(master, tableName, tableOp);
+        final Table.ID tableId = ClientServiceHandler.checkTableId(master.getContext(), tableName,
+            tableOp);
         Namespace.ID namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
 
         final boolean canBulkImport;
@@ -458,7 +466,8 @@ class FateServiceHandler implements FateService.Iface {
         String exportDir = ByteBufferUtil.toString(arguments.get(1));
         Namespace.ID namespaceId;
         try {
-          namespaceId = Namespaces.getNamespaceId(master, Tables.qualify(tableName).getFirst());
+          namespaceId = Namespaces.getNamespaceId(master.getContext(),
+              Tables.qualify(tableName).getFirst());
         } catch (NamespaceNotFoundException e) {
           throw new ThriftTableOperationException(null, tableName, tableOp,
               TableOperationExceptionType.NAMESPACE_NOTFOUND, "");
@@ -485,7 +494,8 @@ class FateServiceHandler implements FateService.Iface {
         String tableName = validateTableNameArgument(arguments.get(0), tableOp, NOT_SYSTEM);
         String exportDir = ByteBufferUtil.toString(arguments.get(1));
 
-        Table.ID tableId = ClientServiceHandler.checkTableId(master, tableName, tableOp);
+        Table.ID tableId = ClientServiceHandler.checkTableId(master.getContext(), tableName,
+            tableOp);
         Namespace.ID namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
 
         final boolean canExport;
@@ -515,7 +525,7 @@ class FateServiceHandler implements FateService.Iface {
 
         final boolean canBulkImport;
         try {
-          String tableName = Tables.getTableName(master, tableId);
+          String tableName = Tables.getTableName(master.getContext(), tableId);
           canBulkImport = master.security.canBulkImport(c, tableId, tableName, dir, null,
               namespaceId);
         } catch (ThriftSecurityException e) {
@@ -543,7 +553,7 @@ class FateServiceHandler implements FateService.Iface {
       throws ThriftTableOperationException {
     Namespace.ID namespaceId;
     try {
-      namespaceId = Tables.getNamespaceId(master, tableId);
+      namespaceId = Tables.getNamespaceId(master.getContext(), tableId);
     } catch (TableNotFoundException e) {
       throw new ThriftTableOperationException(tableId.canonicalID(), null, tableOp,
           TableOperationExceptionType.NOTFOUND, e.getMessage());
diff --git a/server/master/src/main/java/org/apache/accumulo/master/Master.java b/server/master/src/main/java/org/apache/accumulo/master/Master.java
index 1a4563e..308212b 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/Master.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/Master.java
@@ -44,7 +44,6 @@ import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Namespace;
@@ -57,7 +56,6 @@ import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.conf.SiteConfiguration;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.data.impl.KeyExtent;
@@ -79,7 +77,6 @@ import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.NamespacePermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.tabletserver.thrift.TUnloadTabletGoal;
-import org.apache.accumulo.core.trace.DistributedTrace;
 import org.apache.accumulo.core.trace.thrift.TInfo;
 import org.apache.accumulo.core.trace.wrappers.TraceWrap;
 import org.apache.accumulo.core.util.Daemon;
@@ -98,16 +95,14 @@ import org.apache.accumulo.master.replication.ReplicationDriver;
 import org.apache.accumulo.master.replication.WorkDriver;
 import org.apache.accumulo.master.state.TableCounts;
 import org.apache.accumulo.server.Accumulo;
-import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.HighlyAvailableService;
 import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.ServerOpts;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManager.FileType;
-import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.init.Initialize;
 import org.apache.accumulo.server.log.WalStateManager;
 import org.apache.accumulo.server.log.WalStateManager.WalMarkerException;
@@ -129,7 +124,6 @@ import org.apache.accumulo.server.master.state.TabletState;
 import org.apache.accumulo.server.master.state.ZooStore;
 import org.apache.accumulo.server.master.state.ZooTabletStateStore;
 import org.apache.accumulo.server.metrics.Metrics;
-import org.apache.accumulo.server.metrics.MetricsSystemHelper;
 import org.apache.accumulo.server.replication.ZooKeeperInitialization;
 import org.apache.accumulo.server.rpc.HighlyAvailableServiceWrapper;
 import org.apache.accumulo.server.rpc.ServerAddress;
@@ -138,7 +132,6 @@ import org.apache.accumulo.server.rpc.TServerUtils;
 import org.apache.accumulo.server.rpc.ThriftServerType;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.security.SecurityUtil;
 import org.apache.accumulo.server.security.delegation.AuthenticationTokenKeyManager;
 import org.apache.accumulo.server.security.delegation.AuthenticationTokenSecretManager;
 import org.apache.accumulo.server.security.delegation.ZooAuthenticationKeyDistributor;
@@ -177,7 +170,7 @@ import com.google.common.collect.Iterables;
  *
  * The master will also coordinate log recoveries and reports general status.
  */
-public class Master extends AccumuloServerContext
+public class Master
     implements LiveTServerSet.Listener, TableObserver, CurrentState, HighlyAvailableService {
 
   final static Logger log = LoggerFactory.getLogger(Master.class);
@@ -410,7 +403,7 @@ public class Master extends AccumuloServerContext
           String ns = namespace.getFirst();
           Namespace.ID id = namespace.getSecond();
           log.debug("Upgrade creating namespace \"{}\" (ID: {})", ns, id);
-          if (!Namespaces.exists(this, id))
+          if (!Namespaces.exists(context, id))
             TableManager.prepareNewNamespaceState(getInstanceID(), id, ns, NodeExistsPolicy.SKIP);
         }
 
@@ -426,7 +419,7 @@ public class Master extends AccumuloServerContext
             RootTable.NAME, TableState.ONLINE, NodeExistsPolicy.SKIP);
         Initialize.initSystemTablesConfig();
         // ensure root user can flush root table
-        security.grantTablePermission(rpcCreds(), security.getRootUsername(), RootTable.ID,
+        security.grantTablePermission(context.rpcCreds(), security.getRootUsername(), RootTable.ID,
             TablePermission.ALTER_TABLE, Namespace.ID.ACCUMULO);
 
         // put existing tables in the correct namespaces
@@ -484,6 +477,7 @@ public class Master extends AccumuloServerContext
   private final AtomicBoolean upgradeMetadataRunning = new AtomicBoolean(false);
   private final CountDownLatch waitForMetadataUpgrade = new CountDownLatch(1);
 
+  private final ServerContext context;
   private final ServerConfigurationFactory serverConfig;
 
   private MasterClientServiceHandler clientHandler;
@@ -516,17 +510,17 @@ public class Master extends AccumuloServerContext
               log.info("Starting to upgrade metadata table.");
               if (version == ServerConstants.MOVE_DELETE_MARKERS - 1) {
                 log.info("Updating Delete Markers in metadata table for version 1.4");
-                MetadataTableUtil.moveMetaDeleteMarkersFrom14(Master.this);
+                MetadataTableUtil.moveMetaDeleteMarkersFrom14(context);
                 version++;
               }
               if (version == ServerConstants.MOVE_TO_ROOT_TABLE - 1) {
                 log.info("Updating Delete Markers in metadata table.");
-                MetadataTableUtil.moveMetaDeleteMarkers(Master.this);
+                MetadataTableUtil.moveMetaDeleteMarkers(context);
                 version++;
               }
               if (version == ServerConstants.MOVE_TO_REPLICATION_TABLE - 1) {
                 log.info("Updating metadata table with entries for the replication table");
-                MetadataTableUtil.createReplicationTable(Master.this);
+                MetadataTableUtil.createReplicationTable(context);
                 version++;
               }
               log.info("Updating persistent data version.");
@@ -627,18 +621,25 @@ public class Master extends AccumuloServerContext
   }
 
   public void mustBeOnline(final Table.ID tableId) throws ThriftTableOperationException {
-    Tables.clearCache(getInstance());
-    if (!Tables.getTableState(this, tableId).equals(TableState.ONLINE))
+    Tables.clearCache(context);
+    if (!Tables.getTableState(context, tableId).equals(TableState.ONLINE))
       throw new ThriftTableOperationException(tableId.canonicalID(), null, TableOperation.MERGE,
           TableOperationExceptionType.OFFLINE, "table is not online");
   }
 
-  public Master(Instance instance, ServerConfigurationFactory config, VolumeManager fs,
-      String hostname) throws IOException {
-    super(instance, config);
-    this.serverConfig = config;
-    this.fs = fs;
-    this.hostname = hostname;
+  public ServerContext getContext() {
+    return context;
+  }
+
+  public Connector getConnector() throws AccumuloSecurityException, AccumuloException {
+    return context.getConnector();
+  }
+
+  public Master(ServerContext context) throws IOException {
+    this.context = context;
+    this.serverConfig = context.getServerConfFactory();
+    this.fs = context.getVolumeManager();
+    this.hostname = context.getHostname();
 
     AccumuloConfiguration aconf = serverConfig.getSystemConfiguration();
 
@@ -647,10 +648,10 @@ public class Master extends AccumuloServerContext
     timeKeeper = new MasterTime(this);
     ThriftTransportPool.getInstance()
         .setIdleTime(aconf.getTimeInMillis(Property.GENERAL_RPC_TIMEOUT));
-    tserverSet = new LiveTServerSet(this, this);
+    tserverSet = new LiveTServerSet(context, this);
     this.tabletBalancer = Property.createInstanceFromPropertyName(aconf,
         Property.MASTER_TABLET_BALANCER, TabletBalancer.class, new DefaultLoadBalancer());
-    this.tabletBalancer.init(this);
+    this.tabletBalancer.init(context);
 
     try {
       AccumuloVFSClassLoader.getContextManager()
@@ -665,11 +666,11 @@ public class Master extends AccumuloServerContext
       throw new RuntimeException(e);
     }
 
-    this.security = AuditedSecurityOperation.getInstance(this);
+    this.security = AuditedSecurityOperation.getInstance(context);
 
     // Create the secret manager (can generate and verify delegation tokens)
     final long tokenLifetime = aconf.getTimeInMillis(Property.GENERAL_DELEGATION_TOKEN_LIFETIME);
-    setSecretManager(new AuthenticationTokenSecretManager(getInstance(), tokenLifetime));
+    context.setSecretManager(new AuthenticationTokenSecretManager(getInstanceID(), tokenLifetime));
 
     authenticationTokenKeyManager = null;
     keyDistributor = null;
@@ -681,7 +682,7 @@ public class Master extends AccumuloServerContext
           .getTimeInMillis(Property.GENERAL_DELEGATION_TOKEN_UPDATE_INTERVAL);
       keyDistributor = new ZooAuthenticationKeyDistributor(ZooReaderWriter.getInstance(),
           ZooUtil.getRoot(getInstanceID()) + Constants.ZDELEGATION_TOKEN_KEYS);
-      authenticationTokenKeyManager = new AuthenticationTokenKeyManager(getSecretManager(),
+      authenticationTokenKeyManager = new AuthenticationTokenKeyManager(context.getSecretManager(),
           keyDistributor, tokenUpdateInterval, tokenLifetime);
       delegationTokensAvailable = true;
     } else {
@@ -691,6 +692,14 @@ public class Master extends AccumuloServerContext
 
   }
 
+  public String getInstanceID() {
+    return context.getInstanceID();
+  }
+
+  public AccumuloConfiguration getConfiguration() {
+    return context.getConfiguration();
+  }
+
   public TServerConnection getConnection(TServerInstance server) {
     return tserverSet.getConnection(server);
   }
@@ -909,7 +918,7 @@ public class Master extends AccumuloServerContext
         if (!migrations.isEmpty()) {
           try {
             cleanupOfflineMigrations();
-            cleanupNonexistentMigrations(getConnector());
+            cleanupNonexistentMigrations(context.getConnector());
           } catch (Exception ex) {
             log.error("Error cleaning up migrations", ex);
           }
@@ -943,7 +952,7 @@ public class Master extends AccumuloServerContext
      */
     private void cleanupOfflineMigrations() {
       TableManager manager = TableManager.getInstance();
-      for (Table.ID tableId : Tables.getIdToNameMap(Master.this).keySet()) {
+      for (Table.ID tableId : Tables.getIdToNameMap(context).keySet()) {
         TableState state = manager.getTableState(tableId);
         if (TableState.OFFLINE == state) {
           clearMigrations(tableId);
@@ -1232,14 +1241,14 @@ public class Master extends AccumuloServerContext
     Iface haProxy = HighlyAvailableServiceWrapper.service(clientHandler, this);
     Iface rpcProxy = TraceWrap.service(haProxy);
     final Processor<Iface> processor;
-    if (ThriftServerType.SASL == getThriftServerType()) {
+    if (ThriftServerType.SASL == context.getThriftServerType()) {
       Iface tcredsProxy = TCredentialsUpdatingWrapper.service(rpcProxy, clientHandler.getClass(),
           getConfiguration());
       processor = new Processor<>(tcredsProxy);
     } else {
       processor = new Processor<>(rpcProxy);
     }
-    ServerAddress sa = TServerUtils.startServer(this, hostname, Property.MASTER_CLIENTPORT,
+    ServerAddress sa = TServerUtils.startServer(context, hostname, Property.MASTER_CLIENTPORT,
         processor, "Master", "Master Client Service Handler", null, Property.MASTER_MINTHREADS,
         Property.MASTER_THREADCHECK, Property.GENERAL_MAX_MESSAGE_SIZE);
     clientService = sa.server;
@@ -1253,7 +1262,7 @@ public class Master extends AccumuloServerContext
     ReplicationCoordinator.Processor<ReplicationCoordinator.Iface> replicationCoordinatorProcessor =
       new ReplicationCoordinator.Processor<>(TraceWrap.service(haReplicationProxy));
     // @formatter:on
-    ServerAddress replAddress = TServerUtils.startServer(this, hostname,
+    ServerAddress replAddress = TServerUtils.startServer(context, hostname,
         Property.MASTER_REPLICATION_COORDINATOR_PORT, replicationCoordinatorProcessor,
         "Master Replication Coordinator", "Replication Coordinator", null,
         Property.MASTER_REPLICATION_COORDINATOR_MINTHREADS,
@@ -1291,7 +1300,7 @@ public class Master extends AccumuloServerContext
       }
     });
 
-    watchers.add(new TabletGroupWatcher(this, new MetaDataStateStore(this, this), null) {
+    watchers.add(new TabletGroupWatcher(this, new MetaDataStateStore(context, this), null) {
       @Override
       boolean canSuspendTablets() {
         // Always allow user data tablets to enter suspended state.
@@ -1299,8 +1308,8 @@ public class Master extends AccumuloServerContext
       }
     });
 
-    watchers
-        .add(new TabletGroupWatcher(this, new RootTabletStateStore(this, this), watchers.get(0)) {
+    watchers.add(
+        new TabletGroupWatcher(this, new RootTabletStateStore(context, this), watchers.get(0)) {
           @Override
           boolean canSuspendTablets() {
             // Allow metadata tablets to enter suspended state only if so configured. Generally
@@ -1536,25 +1545,16 @@ public class Master extends AccumuloServerContext
   }
 
   public static void main(String[] args) throws Exception {
+    final String app = "master";
+    ServerOpts opts = new ServerOpts();
+    opts.parseArgs(app, args);
+    ServerContext context = ServerContext.getInstance();
+    context.setupServer(app, Master.class.getName(), opts.getAddress());
     try {
-      final String app = "master";
-      ServerOpts opts = new ServerOpts();
-      opts.parseArgs(app, args);
-      SecurityUtil.serverLogin(SiteConfiguration.getInstance());
-      String hostname = opts.getAddress();
-      Instance instance = HdfsZooInstance.getInstance();
-      ServerConfigurationFactory conf = new ServerConfigurationFactory(instance);
-      VolumeManager fs = VolumeManagerImpl.get();
-      MetricsSystemHelper.configure(Master.class.getSimpleName());
-      Accumulo.init(fs, instance, conf, app);
-      Master master = new Master(instance, conf, fs, hostname);
-      DistributedTrace.enable(hostname, app, conf.getSystemConfiguration());
+      Master master = new Master(context);
       master.run();
-    } catch (Exception ex) {
-      log.error("Unexpected exception, exiting", ex);
-      System.exit(1);
     } finally {
-      DistributedTrace.disable();
+      context.teardownServer();
     }
   }
 
@@ -1652,7 +1652,7 @@ public class Master extends AccumuloServerContext
     }
     TableManager manager = TableManager.getInstance();
 
-    for (Table.ID tableId : Tables.getIdToNameMap(this).keySet()) {
+    for (Table.ID tableId : Tables.getIdToNameMap(context).keySet()) {
       TableState state = manager.getTableState(tableId);
       if (state != null) {
         if (state == TableState.ONLINE)
@@ -1670,7 +1670,7 @@ public class Master extends AccumuloServerContext
   @Override
   public Collection<MergeInfo> merges() {
     List<MergeInfo> result = new ArrayList<>();
-    for (Table.ID tableId : Tables.getIdToNameMap(this).keySet()) {
+    for (Table.ID tableId : Tables.getIdToNameMap(context).keySet()) {
       result.add(getMergeInfo(tableId));
     }
     return result;
@@ -1781,7 +1781,7 @@ public class Master extends AccumuloServerContext
 
   public void markDeadServerLogsAsClosed(Map<TServerInstance,List<Path>> logsForDeadServers)
       throws WalMarkerException {
-    WalStateManager mgr = new WalStateManager(this, ZooReaderWriter.getInstance());
+    WalStateManager mgr = new WalStateManager(context, ZooReaderWriter.getInstance());
     for (Entry<TServerInstance,List<Path>> server : logsForDeadServers.entrySet()) {
       for (Path path : server.getValue()) {
         mgr.closeWal(server.getKey(), path);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java b/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
index 2e8dfbf..b49de60 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
@@ -254,7 +254,7 @@ public class MasterClientServiceHandler extends FateServiceHandler
 
         // TODO detect case of table offline AND tablets w/ logs? - ACCUMULO-1296
 
-        if (tabletCount == 0 && !Tables.exists(master, tableId))
+        if (tabletCount == 0 && !Tables.exists(master.getContext(), tableId))
           throw new ThriftTableOperationException(tableId.canonicalID(), null, TableOperation.FLUSH,
               TableOperationExceptionType.NOTFOUND, null);
 
@@ -276,7 +276,7 @@ public class MasterClientServiceHandler extends FateServiceHandler
       throws ThriftTableOperationException {
     Namespace.ID namespaceId;
     try {
-      namespaceId = Tables.getNamespaceId(master, tableId);
+      namespaceId = Tables.getNamespaceId(master.getContext(), tableId);
     } catch (TableNotFoundException e) {
       throw new ThriftTableOperationException(tableId.canonicalID(), null, tableOp,
           TableOperationExceptionType.NOTFOUND, e.getMessage());
@@ -444,7 +444,7 @@ public class MasterClientServiceHandler extends FateServiceHandler
       throws ThriftSecurityException, ThriftTableOperationException {
 
     Namespace.ID namespaceId = null;
-    namespaceId = ClientServiceHandler.checkNamespaceId(master, namespace, op);
+    namespaceId = ClientServiceHandler.checkNamespaceId(master.getContext(), namespace, op);
 
     if (!master.security.canAlterNamespace(c, namespaceId))
       throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
@@ -458,7 +458,7 @@ public class MasterClientServiceHandler extends FateServiceHandler
     } catch (KeeperException.NoNodeException e) {
       // race condition... namespace no longer exists? This call will throw an exception if the
       // namespace was deleted:
-      ClientServiceHandler.checkNamespaceId(master, namespace, op);
+      ClientServiceHandler.checkNamespaceId(master.getContext(), namespace, op);
       log.info("Error altering namespace property", e);
       throw new ThriftTableOperationException(namespaceId.canonicalID(), namespace, op,
           TableOperationExceptionType.OTHER, "Problem altering namespaceproperty");
@@ -471,7 +471,7 @@ public class MasterClientServiceHandler extends FateServiceHandler
 
   private void alterTableProperty(TCredentials c, String tableName, String property, String value,
       TableOperation op) throws ThriftSecurityException, ThriftTableOperationException {
-    final Table.ID tableId = ClientServiceHandler.checkTableId(master, tableName, op);
+    final Table.ID tableId = ClientServiceHandler.checkTableId(master.getContext(), tableName, op);
     Namespace.ID namespaceId = getNamespaceIdFromTableId(op, tableId);
     if (!master.security.canAlterTable(c, tableId, namespaceId))
       throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
@@ -485,7 +485,7 @@ public class MasterClientServiceHandler extends FateServiceHandler
     } catch (KeeperException.NoNodeException e) {
       // race condition... table no longer exists? This call will throw an exception if the table
       // was deleted:
-      ClientServiceHandler.checkTableId(master, tableName, op);
+      ClientServiceHandler.checkTableId(master.getContext(), tableName, op);
       log.info("Error altering table property", e);
       throw new ThriftTableOperationException(tableId.canonicalID(), tableName, op,
           TableOperationExceptionType.OTHER, "Problem altering table property");
@@ -501,7 +501,7 @@ public class MasterClientServiceHandler extends FateServiceHandler
       AccumuloConfiguration conf = master.getConfiguration();
       TabletBalancer balancer = Property.createInstanceFromPropertyName(conf,
           Property.MASTER_TABLET_BALANCER, TabletBalancer.class, new DefaultLoadBalancer());
-      balancer.init(master);
+      balancer.init(master.getContext());
       master.tabletBalancer = balancer;
       log.info("tablet balancer changed to {}", master.tabletBalancer.getClass().getName());
     }
@@ -538,7 +538,7 @@ public class MasterClientServiceHandler extends FateServiceHandler
     }
 
     final DelegationTokenConfig config = DelegationTokenConfigSerializer.deserialize(tConfig);
-    final AuthenticationTokenSecretManager secretManager = master.getSecretManager();
+    final AuthenticationTokenSecretManager secretManager = master.getContext().getSecretManager();
     try {
       Entry<Token<AuthenticationTokenIdentifier>,AuthenticationTokenIdentifier> pair = secretManager
           .generateToken(credentials.principal, config);
@@ -560,7 +560,7 @@ public class MasterClientServiceHandler extends FateServiceHandler
       throw new RuntimeException("Failed to obtain connector", e);
     }
 
-    final Text tableId = new Text(getTableId(master, tableName).getUtf8());
+    final Text tableId = new Text(getTableId(master.getContext(), tableName).getUtf8());
 
     drainLog.trace("Waiting for {} to be replicated for {}", logsToWatch, tableId);
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
index 3f1bc82..d96874b 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
@@ -148,7 +148,7 @@ abstract class TabletGroupWatcher extends Daemon {
     int[] oldCounts = new int[TabletState.values().length];
     EventCoordinator.Listener eventListener = this.master.nextEvent.getListener();
 
-    WalStateManager wals = new WalStateManager(master, ZooReaderWriter.getInstance());
+    WalStateManager wals = new WalStateManager(master.getContext(), ZooReaderWriter.getInstance());
 
     while (this.master.stillMaster()) {
       // slow things down a little, otherwise we spam the logs when there are many wake-up events
@@ -620,7 +620,7 @@ abstract class TabletGroupWatcher extends Daemon {
         if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
           datafiles.add(new FileRef(this.master.fs, key));
           if (datafiles.size() > 1000) {
-            MetadataTableUtil.addDeleteEntries(extent, datafiles, master);
+            MetadataTableUtil.addDeleteEntries(extent, datafiles, master.getContext());
             datafiles.clear();
           }
         } else if (TabletsSection.ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
@@ -641,12 +641,12 @@ abstract class TabletGroupWatcher extends Daemon {
                 Path.SEPARATOR + extent.getTableId() + path)));
           }
           if (datafiles.size() > 1000) {
-            MetadataTableUtil.addDeleteEntries(extent, datafiles, master);
+            MetadataTableUtil.addDeleteEntries(extent, datafiles, master.getContext());
             datafiles.clear();
           }
         }
       }
-      MetadataTableUtil.addDeleteEntries(extent, datafiles, master);
+      MetadataTableUtil.addDeleteEntries(extent, datafiles, master.getContext());
       BatchWriter bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
       try {
         deleteTablets(info, deleteRange, bw, conn);
@@ -675,8 +675,8 @@ abstract class TabletGroupWatcher extends Daemon {
             + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + extent.getTableId()
             + Constants.DEFAULT_TABLET_LOCATION;
         MetadataTableUtil.addTablet(
-            new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), tdir, master,
-            timeType, this.master.masterLock);
+            new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), tdir,
+            master.getContext(), timeType, this.master.masterLock);
       }
     } catch (RuntimeException | IOException | TableNotFoundException
         | AccumuloSecurityException ex) {
diff --git a/server/master/src/main/java/org/apache/accumulo/master/metrics/Metrics2ReplicationMetrics.java b/server/master/src/main/java/org/apache/accumulo/master/metrics/Metrics2ReplicationMetrics.java
index b4eeacd..aa77c31 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/metrics/Metrics2ReplicationMetrics.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/metrics/Metrics2ReplicationMetrics.java
@@ -70,7 +70,7 @@ public class Metrics2ReplicationMetrics implements Metrics, MetricsSource {
 
     this.registry = new MetricsRegistry(Interns.info(NAME, DESCRIPTION));
     this.registry.tag(MsInfo.ProcessName, MetricsSystemHelper.getProcessName());
-    replicationUtil = new ReplicationUtil(master);
+    replicationUtil = new ReplicationUtil(master.getContext());
     replicationQueueTimeQuantiles = registry.newQuantiles(REPLICATION_QUEUE_TIME_QUANTILES,
         "Replication queue time quantiles in milliseconds", "ops", "latency", 600);
     replicationQueueTimeStat = registry.newStat(REPLICATION_QUEUE_TIME,
@@ -79,7 +79,7 @@ public class Metrics2ReplicationMetrics implements Metrics, MetricsSource {
 
   protected void snapshot() {
     // Only add these metrics if the replication table is online and there are peers
-    if (TableState.ONLINE == Tables.getTableState(master, ReplicationTable.ID)
+    if (TableState.ONLINE == Tables.getTableState(master.getContext(), ReplicationTable.ID)
         && !replicationUtil.getPeers().isEmpty()) {
       registry.add(PENDING_FILES, getNumFilesPendingReplication());
       addReplicationQueueTimeMetrics();
diff --git a/server/master/src/main/java/org/apache/accumulo/master/metrics/ReplicationMetrics.java b/server/master/src/main/java/org/apache/accumulo/master/metrics/ReplicationMetrics.java
index ded98ee..04a900c 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/metrics/ReplicationMetrics.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/metrics/ReplicationMetrics.java
@@ -52,13 +52,13 @@ public class ReplicationMetrics extends AbstractMetricsImpl implements Replicati
     } catch (Exception e) {
       log.error("Exception setting MBean object name", e);
     }
-    replicationUtil = new ReplicationUtil(master);
+    replicationUtil = new ReplicationUtil(master.getContext());
   }
 
   @Override
   public int getNumFilesPendingReplication() {
 
-    if (TableState.ONLINE != Tables.getTableState(master, ReplicationTable.ID)) {
+    if (TableState.ONLINE != Tables.getTableState(master.getContext(), ReplicationTable.ID)) {
       return 0;
     }
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/MasterReplicationCoordinator.java b/server/master/src/main/java/org/apache/accumulo/master/replication/MasterReplicationCoordinator.java
index 3c71fbb..afe7399 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/MasterReplicationCoordinator.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/MasterReplicationCoordinator.java
@@ -51,21 +51,22 @@ public class MasterReplicationCoordinator implements ReplicationCoordinator.Ifac
   private final SecurityOperation security;
 
   public MasterReplicationCoordinator(Master master) {
-    this(master, new ZooReader(master.getZooKeepers(), master.getZooKeepersSessionTimeOut()));
+    this(master, new ZooReader(master.getContext().getZooKeepers(),
+        master.getContext().getZooKeepersSessionTimeOut()));
   }
 
   protected MasterReplicationCoordinator(Master master, ZooReader reader) {
     this.master = master;
     this.rand = new Random(358923462L);
     this.reader = reader;
-    this.security = SecurityOperation.getInstance(master, false);
+    this.security = SecurityOperation.getInstance(master.getContext(), false);
   }
 
   @Override
   public String getServicerAddress(String remoteTableId, TCredentials creds)
       throws ReplicationCoordinatorException, TException {
     try {
-      security.authenticateUser(master.rpcCreds(), creds);
+      security.authenticateUser(master.getContext().rpcCreds(), creds);
     } catch (ThriftSecurityException e) {
       log.error("{} failed to authenticate for replication to {}", creds.getPrincipal(),
           remoteTableId);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java b/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java
index cf889af..d603783 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/ReplicationDriver.java
@@ -74,7 +74,7 @@ public class ReplicationDriver extends Daemon {
         }
 
         statusMaker = new StatusMaker(conn, master.getFileSystem());
-        workMaker = new WorkMaker(master, conn);
+        workMaker = new WorkMaker(master.getContext(), conn);
         finishedWorkUpdater = new FinishedWorkUpdater(conn);
         rcrr = new RemoveCompleteReplicationRecords(conn);
       }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java b/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
index ff92d1e..f04ac24 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
@@ -123,7 +123,7 @@ public class SequentialWorkAssigner extends DistributedWorkQueueWorkAssigner {
   protected void cleanupFinishedWork() {
     final Iterator<Entry<String,Map<Table.ID,String>>> queuedWork = queuedWorkByPeerName.entrySet()
         .iterator();
-    final String instanceId = conn.getInstance().getInstanceID();
+    final String instanceId = conn.getInstanceID();
 
     int elementsRemoved = 0;
     // Check the status of all the work we've queued up
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/UnorderedWorkAssigner.java b/server/master/src/main/java/org/apache/accumulo/master/replication/UnorderedWorkAssigner.java
index a6e1a20..5dece79 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/UnorderedWorkAssigner.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/UnorderedWorkAssigner.java
@@ -137,7 +137,7 @@ public class UnorderedWorkAssigner extends DistributedWorkQueueWorkAssigner {
   @Override
   protected void cleanupFinishedWork() {
     final Iterator<String> work = queuedWork.iterator();
-    final String instanceId = conn.getInstance().getInstanceID();
+    final String instanceId = conn.getInstanceID();
     while (work.hasNext()) {
       String filename = work.next();
       // Null equates to the work was finished
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
index de6d18f..a7bed7f 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
@@ -38,7 +38,7 @@ import org.apache.accumulo.core.replication.ReplicationTableOfflineException;
 import org.apache.accumulo.core.replication.ReplicationTarget;
 import org.apache.accumulo.core.trace.Span;
 import org.apache.accumulo.core.trace.Trace;
-import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.replication.StatusUtil;
 import org.apache.accumulo.server.replication.proto.Replication.Status;
@@ -56,12 +56,12 @@ import com.google.protobuf.InvalidProtocolBufferException;
 public class WorkMaker {
   private static final Logger log = LoggerFactory.getLogger(WorkMaker.class);
 
-  private final AccumuloServerContext context;
+  private final ServerContext context;
   private Connector conn;
 
   private BatchWriter writer;
 
-  public WorkMaker(AccumuloServerContext context, Connector conn) {
+  public WorkMaker(ServerContext context, Connector conn) {
     this.context = context;
     this.conn = conn;
   }
@@ -114,7 +114,7 @@ public class WorkMaker {
         }
 
         // Get the table configuration for the table specified by the status record
-        tableConf = context.getServerConfigurationFactory().getTableConfiguration(tableId);
+        tableConf = context.getServerConfFactory().getTableConfiguration(tableId);
 
         // getTableConfiguration(String) returns null if the table no longer exists
         if (null == tableConf) {
diff --git a/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java b/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java
index cc8b4e2..c8a39db 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java
@@ -264,8 +264,8 @@ public class MergeStats {
     Map<String,String> tableIdMap = conn.tableOperations().tableIdMap();
     for (Entry<String,String> entry : tableIdMap.entrySet()) {
       final String table = entry.getKey(), tableId = entry.getValue();
-      String path = ZooUtil.getRoot(conn.getInstance().getInstanceID()) + Constants.ZTABLES + "/"
-          + tableId + "/merge";
+      String path = ZooUtil.getRoot(conn.getInstanceID()) + Constants.ZTABLES + "/" + tableId
+          + "/merge";
       MergeInfo info = new MergeInfo();
       if (ZooReaderWriter.getInstance().exists(path)) {
         byte[] data = ZooReaderWriter.getInstance().getData(path, new Stat());
diff --git a/server/master/src/main/java/org/apache/accumulo/master/state/SetGoalState.java b/server/master/src/main/java/org/apache/accumulo/master/state/SetGoalState.java
index e433f6a..017598f 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/state/SetGoalState.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/state/SetGoalState.java
@@ -21,12 +21,9 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.conf.SiteConfiguration;
 import org.apache.accumulo.core.master.thrift.MasterGoalState;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.server.Accumulo;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.fs.VolumeManagerImpl;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.security.SecurityUtil;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 
@@ -43,11 +40,11 @@ public class SetGoalState {
     }
     SecurityUtil.serverLogin(SiteConfiguration.getInstance());
 
-    VolumeManager fs = VolumeManagerImpl.get();
-    Accumulo.waitForZookeeperAndHdfs(fs);
+    ServerContext context = ServerContext.getInstance();
+    Accumulo.waitForZookeeperAndHdfs(context.getVolumeManager());
     ZooReaderWriter.getInstance().putPersistentData(
-        ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZMASTER_GOAL_STATE,
-        args[0].getBytes(UTF_8), NodeExistsPolicy.OVERWRITE);
+        context.getZooKeeperRoot() + Constants.ZMASTER_GOAL_STATE, args[0].getBytes(UTF_8),
+        NodeExistsPolicy.OVERWRITE);
   }
 
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
index 21f6179..2532247 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
@@ -47,10 +47,10 @@ public class CancelCompactions extends MasterRepo {
 
   @Override
   public Repo<Master> call(long tid, Master environment) throws Exception {
-    String zCompactID = Constants.ZROOT + "/" + environment.getInstance().getInstanceID()
-        + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_ID;
-    String zCancelID = Constants.ZROOT + "/" + environment.getInstance().getInstanceID()
-        + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_CANCEL_ID;
+    String zCompactID = Constants.ZROOT + "/" + environment.getInstanceID() + Constants.ZTABLES
+        + "/" + tableId + Constants.ZTABLE_COMPACT_ID;
+    String zCancelID = Constants.ZROOT + "/" + environment.getInstanceID() + Constants.ZTABLES + "/"
+        + tableId + Constants.ZTABLE_COMPACT_CANCEL_ID;
 
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
index 281cf70..5e13c15 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
@@ -158,14 +158,14 @@ class CleanUp extends MasterRepo {
       // If the master lock passed to deleteTable, it is possible that the delete mutations will be
       // dropped. If the delete operations
       // are dropped and the operation completes, then the deletes will not be repeated.
-      MetadataTableUtil.deleteTable(tableId, refCount != 0, master, null);
+      MetadataTableUtil.deleteTable(tableId, refCount != 0, master.getContext(), null);
     } catch (Exception e) {
       log.error("error deleting " + tableId + " from metadata table", e);
     }
 
     // remove any problem reports the table may have
     try {
-      ProblemReports.getInstance(master).deleteProblemReports(tableId);
+      ProblemReports.getInstance(master.getContext()).deleteProblemReports(tableId);
     } catch (Exception e) {
       log.error("Failed to delete problem reports for table " + tableId, e);
     }
@@ -199,15 +199,15 @@ class CleanUp extends MasterRepo {
     // remove table from zookeeper
     try {
       TableManager.getInstance().removeTable(tableId);
-      Tables.clearCache(master.getInstance());
+      Tables.clearCache(master.getContext());
     } catch (Exception e) {
       log.error("Failed to find table id in zookeeper", e);
     }
 
     // remove any permissions associated with this table
     try {
-      AuditedSecurityOperation.getInstance(master).deleteTable(master.rpcCreds(), tableId,
-          namespaceId);
+      AuditedSecurityOperation.getInstance(master.getContext())
+          .deleteTable(master.getContext().rpcCreds(), tableId, namespaceId);
     } catch (ThriftSecurityException e) {
       log.error("{}", e.getMessage(), e);
     }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java
index b425a15..1254477 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java
@@ -42,16 +42,16 @@ class CloneMetadata extends MasterRepo {
             cloneInfo.tableId, cloneInfo.srcTableId));
     // need to clear out any metadata entries for tableId just in case this
     // died before and is executing again
-    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment,
+    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment.getContext(),
         environment.getMasterLock());
-    MetadataTableUtil.cloneTable(environment, cloneInfo.srcTableId, cloneInfo.tableId,
+    MetadataTableUtil.cloneTable(environment.getContext(), cloneInfo.srcTableId, cloneInfo.tableId,
         environment.getFileSystem());
     return new FinishCloneTable(cloneInfo);
   }
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment,
+    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment.getContext(),
         environment.getMasterLock());
   }
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ClonePermissions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ClonePermissions.java
index 62e0985..1d0b8ff 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ClonePermissions.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ClonePermissions.java
@@ -47,8 +47,8 @@ class ClonePermissions extends MasterRepo {
     // give all table permissions to the creator
     for (TablePermission permission : TablePermission.values()) {
       try {
-        AuditedSecurityOperation.getInstance(environment).grantTablePermission(
-            environment.rpcCreds(), cloneInfo.user, cloneInfo.tableId, permission,
+        AuditedSecurityOperation.getInstance(environment.getContext()).grantTablePermission(
+            environment.getContext().rpcCreds(), cloneInfo.user, cloneInfo.tableId, permission,
             cloneInfo.namespaceId);
       } catch (ThriftSecurityException e) {
         LoggerFactory.getLogger(ClonePermissions.class).error("{}", e.getMessage(), e);
@@ -60,7 +60,7 @@ class ClonePermissions extends MasterRepo {
     // this way concurrent users will not get a spurious pemission denied
     // error
     try {
-      return new CloneZookeeper(cloneInfo, environment);
+      return new CloneZookeeper(cloneInfo, environment.getContext());
     } catch (NamespaceNotFoundException e) {
       throw new AcceptableThriftTableOperationException(null, cloneInfo.tableName,
           TableOperation.CLONE, TableOperationExceptionType.NAMESPACE_NOTFOUND,
@@ -70,7 +70,7 @@ class ClonePermissions extends MasterRepo {
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    AuditedSecurityOperation.getInstance(environment).deleteTable(environment.rpcCreds(),
-        cloneInfo.tableId, cloneInfo.namespaceId);
+    AuditedSecurityOperation.getInstance(environment.getContext())
+        .deleteTable(environment.getContext().rpcCreds(), cloneInfo.tableId, cloneInfo.namespaceId);
   }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
index 5da34c2..cbfb015 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
@@ -54,7 +54,7 @@ public class CloneTable extends MasterRepo {
 
     Utils.idLock.lock();
     try {
-      cloneInfo.tableId = Utils.getNextId(cloneInfo.tableName, environment.getInstance(),
+      cloneInfo.tableId = Utils.getNextId(cloneInfo.tableName, environment.getContext(),
           Table.ID::of);
       return new ClonePermissions(cloneInfo);
     } finally {
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java
index dcf73ab..480081a 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java
@@ -54,13 +54,13 @@ class CloneZookeeper extends MasterRepo {
     try {
       // write tableName & tableId to zookeeper
 
-      Utils.checkTableDoesNotExist(environment, cloneInfo.tableName, cloneInfo.tableId,
+      Utils.checkTableDoesNotExist(environment.getContext(), cloneInfo.tableName, cloneInfo.tableId,
           TableOperation.CLONE);
 
       TableManager.getInstance().cloneTable(cloneInfo.srcTableId, cloneInfo.tableId,
           cloneInfo.tableName, cloneInfo.namespaceId, cloneInfo.propertiesToSet,
           cloneInfo.propertiesToExclude, NodeExistsPolicy.OVERWRITE);
-      Tables.clearCache(environment.getInstance());
+      Tables.clearCache(environment.getContext());
 
       return new CloneMetadata(cloneInfo);
     } finally {
@@ -74,7 +74,7 @@ class CloneZookeeper extends MasterRepo {
     if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
       Utils.unreserveNamespace(cloneInfo.namespaceId, tid, false);
     Utils.unreserveTable(cloneInfo.tableId, tid, true);
-    Tables.clearCache(environment.getInstance());
+    Tables.clearCache(environment.getContext());
   }
 
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
index b660857..ecbfdb8 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
@@ -90,8 +90,8 @@ public class CompactRange extends MasterRepo {
 
   @Override
   public Repo<Master> call(final long tid, Master env) throws Exception {
-    String zTablePath = Constants.ZROOT + "/" + env.getInstance().getInstanceID()
-        + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_ID;
+    String zTablePath = Constants.ZROOT + "/" + env.getInstanceID() + Constants.ZTABLES + "/"
+        + tableId + Constants.ZTABLE_COMPACT_ID;
 
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
     byte[] cid;
@@ -143,8 +143,8 @@ public class CompactRange extends MasterRepo {
 
   static void removeIterators(Master environment, final long txid, Table.ID tableId)
       throws Exception {
-    String zTablePath = Constants.ZROOT + "/" + environment.getInstance().getInstanceID()
-        + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_ID;
+    String zTablePath = Constants.ZROOT + "/" + environment.getInstanceID() + Constants.ZTABLES
+        + "/" + tableId + Constants.ZTABLE_COMPACT_ID;
 
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
index eb73136..a966371 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
@@ -74,8 +74,8 @@ class CompactionDriver extends MasterRepo {
   @Override
   public long isReady(long tid, Master master) throws Exception {
 
-    String zCancelID = Constants.ZROOT + "/" + master.getInstance().getInstanceID()
-        + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_CANCEL_ID;
+    String zCancelID = Constants.ZROOT + "/" + master.getInstanceID() + Constants.ZTABLES + "/"
+        + tableId + Constants.ZTABLE_COMPACT_CANCEL_ID;
 
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
 
@@ -144,12 +144,13 @@ class CompactionDriver extends MasterRepo {
 
     long scanTime = System.currentTimeMillis() - t1;
 
-    Tables.clearCache(master);
-    if (tabletCount == 0 && !Tables.exists(master, tableId))
+    Tables.clearCache(master.getContext());
+    if (tabletCount == 0 && !Tables.exists(master.getContext(), tableId))
       throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null,
           TableOperation.COMPACT, TableOperationExceptionType.NOTFOUND, null);
 
-    if (serversToFlush.size() == 0 && Tables.getTableState(master, tableId) == TableState.OFFLINE)
+    if (serversToFlush.size() == 0
+        && Tables.getTableState(master.getContext(), tableId) == TableState.OFFLINE)
       throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null,
           TableOperation.COMPACT, TableOperationExceptionType.OFFLINE, null);
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
index c17a63f6..b324176 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
@@ -43,7 +43,7 @@ public class CreateNamespace extends MasterRepo {
   public Repo<Master> call(long tid, Master master) throws Exception {
     Utils.idLock.lock();
     try {
-      namespaceInfo.namespaceId = Utils.getNextId(namespaceInfo.namespaceName, master.getInstance(),
+      namespaceInfo.namespaceId = Utils.getNextId(namespaceInfo.namespaceName, master.getContext(),
           Namespace.ID::of);
       return new SetupNamespacePermissions(namespaceInfo);
     } finally {
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
index d7d4ee5..c3ca32d 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
@@ -57,7 +57,7 @@ public class CreateTable extends MasterRepo {
 
     Utils.idLock.lock();
     try {
-      tableInfo.tableId = Utils.getNextId(tableInfo.tableName, master.getInstance(), Table.ID::of);
+      tableInfo.tableId = Utils.getNextId(tableInfo.tableName, master.getContext(), Table.ID::of);
       return new SetupPermissions(tableInfo);
     } finally {
       Utils.idLock.unlock();
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java
index dfd5f0c..d2c4238 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java
@@ -21,7 +21,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException;
 import org.apache.accumulo.core.client.impl.Namespace;
 import org.apache.accumulo.core.client.impl.Namespaces;
@@ -74,17 +73,15 @@ class ImportPopulateZookeeper extends MasterRepo {
     Utils.tableNameLock.lock();
     try {
       // write tableName & tableId to zookeeper
-      Instance instance = env.getInstance();
-
-      Utils.checkTableDoesNotExist(env, tableInfo.tableName, tableInfo.tableId,
+      Utils.checkTableDoesNotExist(env.getContext(), tableInfo.tableName, tableInfo.tableId,
           TableOperation.CREATE);
 
       String namespace = Tables.qualify(tableInfo.tableName).getFirst();
-      Namespace.ID namespaceId = Namespaces.getNamespaceId(env, namespace);
+      Namespace.ID namespaceId = Namespaces.getNamespaceId(env.getContext(), namespace);
       TableManager.getInstance().addTable(tableInfo.tableId, namespaceId, tableInfo.tableName,
           NodeExistsPolicy.OVERWRITE);
 
-      Tables.clearCache(instance);
+      Tables.clearCache(env.getContext());
     } finally {
       Utils.tableNameLock.unlock();
     }
@@ -101,9 +98,8 @@ class ImportPopulateZookeeper extends MasterRepo {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    Instance instance = env.getInstance();
     TableManager.getInstance().removeTable(tableInfo.tableId);
     Utils.unreserveTable(tableInfo.tableId, tid, true);
-    Tables.clearCache(instance);
+    Tables.clearCache(env.getContext());
   }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java
index 66fd5c7..7dc6529 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java
@@ -42,11 +42,11 @@ class ImportSetupPermissions extends MasterRepo {
   @Override
   public Repo<Master> call(long tid, Master env) throws Exception {
     // give all table permissions to the creator
-    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
+    SecurityOperation security = AuditedSecurityOperation.getInstance(env.getContext());
     for (TablePermission permission : TablePermission.values()) {
       try {
-        security.grantTablePermission(env.rpcCreds(), tableInfo.user, tableInfo.tableId, permission,
-            tableInfo.namespaceId);
+        security.grantTablePermission(env.getContext().rpcCreds(), tableInfo.user,
+            tableInfo.tableId, permission, tableInfo.namespaceId);
       } catch (ThriftSecurityException e) {
         LoggerFactory.getLogger(ImportSetupPermissions.class).error("{}", e.getMessage(), e);
         throw e;
@@ -61,7 +61,7 @@ class ImportSetupPermissions extends MasterRepo {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    AuditedSecurityOperation.getInstance(env).deleteTable(env.rpcCreds(), tableInfo.tableId,
-        tableInfo.namespaceId);
+    AuditedSecurityOperation.getInstance(env.getContext()).deleteTable(env.getContext().rpcCreds(),
+        tableInfo.tableId, tableInfo.namespaceId);
   }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
index 766bc86..479e61b 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
@@ -25,7 +25,6 @@ import java.util.zip.ZipEntry;
 import java.util.zip.ZipInputStream;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException;
 import org.apache.accumulo.core.client.impl.Namespace;
 import org.apache.accumulo.core.client.impl.Table;
@@ -71,8 +70,7 @@ public class ImportTable extends MasterRepo {
 
     Utils.idLock.lock();
     try {
-      Instance instance = env.getInstance();
-      tableInfo.tableId = Utils.getNextId(tableInfo.tableName, instance, Table.ID::of);
+      tableInfo.tableId = Utils.getNextId(tableInfo.tableName, env.getContext(), Table.ID::of);
       return new ImportSetupPermissions(tableInfo);
     } finally {
       Utils.idLock.unlock();
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java
index eebaeaf..c1b0d59 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java
@@ -52,11 +52,12 @@ class NamespaceCleanUp extends MasterRepo {
     } catch (Exception e) {
       log.error("Failed to find namespace in zookeeper", e);
     }
-    Tables.clearCache(master.getInstance());
+    Tables.clearCache(master.getContext());
 
     // remove any permissions associated with this namespace
     try {
-      AuditedSecurityOperation.getInstance(master).deleteNamespace(master.rpcCreds(), namespaceId);
+      AuditedSecurityOperation.getInstance(master.getContext())
+          .deleteNamespace(master.getContext().rpcCreds(), namespaceId);
     } catch (ThriftSecurityException e) {
       log.error("{}", e.getMessage(), e);
     }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java
index bfc9e51..1049770 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java
@@ -39,7 +39,7 @@ class PopulateMetadata extends MasterRepo {
   @Override
   public Repo<Master> call(long tid, Master environment) throws Exception {
     KeyExtent extent = new KeyExtent(tableInfo.tableId, null, null);
-    MetadataTableUtil.addTablet(extent, tableInfo.dir, environment, tableInfo.timeType,
+    MetadataTableUtil.addTablet(extent, tableInfo.dir, environment.getContext(), tableInfo.timeType,
         environment.getMasterLock());
 
     return new FinishCreateTable(tableInfo);
@@ -48,7 +48,7 @@ class PopulateMetadata extends MasterRepo {
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment,
+    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment.getContext(),
         environment.getMasterLock());
   }
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadataTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadataTable.java
index c3a6386..e72fd87 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadataTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadataTable.java
@@ -227,7 +227,7 @@ class PopulateMetadataTable extends MasterRepo {
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment,
+    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment.getContext(),
         environment.getMasterLock());
   }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java
index 1e106b2..5b5b9d3 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java
@@ -18,7 +18,6 @@ package org.apache.accumulo.master.tableOps;
 
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.fate.Repo;
@@ -49,9 +48,7 @@ class PopulateZookeeper extends MasterRepo {
     Utils.tableNameLock.lock();
     try {
       // write tableName & tableId to zookeeper
-      Instance instance = master.getInstance();
-
-      Utils.checkTableDoesNotExist(master, tableInfo.tableName, tableInfo.tableId,
+      Utils.checkTableDoesNotExist(master.getContext(), tableInfo.tableName, tableInfo.tableId,
           TableOperation.CREATE);
 
       TableManager.getInstance().addTable(tableInfo.tableId, tableInfo.namespaceId,
@@ -60,7 +57,7 @@ class PopulateZookeeper extends MasterRepo {
       for (Entry<String,String> entry : tableInfo.props.entrySet())
         TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue());
 
-      Tables.clearCache(instance);
+      Tables.clearCache(master.getContext());
       return new ChooseDir(tableInfo);
     } finally {
       Utils.tableNameLock.unlock();
@@ -70,10 +67,9 @@ class PopulateZookeeper extends MasterRepo {
 
   @Override
   public void undo(long tid, Master master) throws Exception {
-    Instance instance = master.getInstance();
     TableManager.getInstance().removeTable(tableInfo.tableId);
     Utils.unreserveTable(tableInfo.tableId, tid, true);
-    Tables.clearCache(instance);
+    Tables.clearCache(master.getContext());
   }
 
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java
index 94986e3..50e656c 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java
@@ -47,7 +47,7 @@ class PopulateZookeeperWithNamespace extends MasterRepo {
 
     Utils.tableNameLock.lock();
     try {
-      Utils.checkNamespaceDoesNotExist(master, namespaceInfo.namespaceName,
+      Utils.checkNamespaceDoesNotExist(master.getContext(), namespaceInfo.namespaceName,
           namespaceInfo.namespaceId, TableOperation.CREATE);
 
       TableManager.prepareNewNamespaceState(master.getInstanceID(), namespaceInfo.namespaceId,
@@ -57,7 +57,7 @@ class PopulateZookeeperWithNamespace extends MasterRepo {
         NamespacePropUtil.setNamespaceProperty(namespaceInfo.namespaceId, entry.getKey(),
             entry.getValue());
 
-      Tables.clearCache(master);
+      Tables.clearCache(master.getContext());
 
       return new FinishCreateNamespace(namespaceInfo);
     } finally {
@@ -68,7 +68,7 @@ class PopulateZookeeperWithNamespace extends MasterRepo {
   @Override
   public void undo(long tid, Master master) throws Exception {
     TableManager.getInstance().removeNamespace(namespaceInfo.namespaceId);
-    Tables.clearCache(master.getInstance());
+    Tables.clearCache(master.getContext());
     Utils.unreserveNamespace(namespaceInfo.namespaceId, tid, true);
   }
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameNamespace.java
index eb2b780..0fe6452 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameNamespace.java
@@ -55,7 +55,8 @@ public class RenameNamespace extends MasterRepo {
 
     Utils.tableNameLock.lock();
     try {
-      Utils.checkNamespaceDoesNotExist(master, newName, namespaceId, TableOperation.RENAME);
+      Utils.checkNamespaceDoesNotExist(master.getContext(), newName, namespaceId,
+          TableOperation.RENAME);
 
       final String tap = ZooUtil.getRoot(master.getInstanceID()) + Constants.ZNAMESPACES + "/"
           + namespaceId + Constants.ZNAMESPACE_NAME;
@@ -73,7 +74,7 @@ public class RenameNamespace extends MasterRepo {
           return newName.getBytes();
         }
       });
-      Tables.clearCache(master);
+      Tables.clearCache(master.getContext());
     } finally {
       Utils.tableNameLock.unlock();
       Utils.unreserveNamespace(namespaceId, id, true);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
index 03e14cf..ead80ca 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
@@ -19,7 +19,6 @@ package org.apache.accumulo.master.tableOps;
 import static java.nio.charset.StandardCharsets.UTF_8;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.NamespaceNotFoundException;
 import org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException;
 import org.apache.accumulo.core.client.impl.Namespace;
@@ -61,13 +60,12 @@ public class RenameTable extends MasterRepo {
 
   @Override
   public Repo<Master> call(long tid, Master master) throws Exception {
-    Instance instance = master.getInstance();
     Pair<String,String> qualifiedOldTableName = Tables.qualify(oldTableName);
     Pair<String,String> qualifiedNewTableName = Tables.qualify(newTableName);
 
     // ensure no attempt is made to rename across namespaces
-    if (newTableName.contains(".")
-        && !namespaceId.equals(Namespaces.getNamespaceId(master, qualifiedNewTableName.getFirst())))
+    if (newTableName.contains(".") && !namespaceId
+        .equals(Namespaces.getNamespaceId(master.getContext(), qualifiedNewTableName.getFirst())))
       throw new AcceptableThriftTableOperationException(tableId.canonicalID(), oldTableName,
           TableOperation.RENAME, TableOperationExceptionType.INVALID_NAME,
           "Namespace in new table name does not match the old table name");
@@ -76,7 +74,8 @@ public class RenameTable extends MasterRepo {
 
     Utils.tableNameLock.lock();
     try {
-      Utils.checkTableDoesNotExist(master, newTableName, tableId, TableOperation.RENAME);
+      Utils.checkTableDoesNotExist(master.getContext(), newTableName, tableId,
+          TableOperation.RENAME);
 
       final String newName = qualifiedNewTableName.getSecond();
       final String oldName = qualifiedOldTableName.getSecond();
@@ -98,7 +97,7 @@ public class RenameTable extends MasterRepo {
           return newName.getBytes(UTF_8);
         }
       });
-      Tables.clearCache(master);
+      Tables.clearCache(master.getContext());
     } finally {
       Utils.tableNameLock.unlock();
       Utils.unreserveTable(tableId, tid, true);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupNamespacePermissions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupNamespacePermissions.java
index cf25025..78639d2 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupNamespacePermissions.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupNamespacePermissions.java
@@ -37,10 +37,10 @@ class SetupNamespacePermissions extends MasterRepo {
   @Override
   public Repo<Master> call(long tid, Master env) throws Exception {
     // give all namespace permissions to the creator
-    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
+    SecurityOperation security = AuditedSecurityOperation.getInstance(env.getContext());
     for (NamespacePermission permission : NamespacePermission.values()) {
       try {
-        security.grantNamespacePermission(env.rpcCreds(), namespaceInfo.user,
+        security.grantNamespacePermission(env.getContext().rpcCreds(), namespaceInfo.user,
             namespaceInfo.namespaceId, permission);
       } catch (ThriftSecurityException e) {
         LoggerFactory.getLogger(SetupNamespacePermissions.class).error("{}", e.getMessage(), e);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupPermissions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupPermissions.java
index fe87e9d..c7c71fe 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupPermissions.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupPermissions.java
@@ -37,12 +37,12 @@ class SetupPermissions extends MasterRepo {
   @Override
   public Repo<Master> call(long tid, Master env) throws Exception {
     // give all table permissions to the creator
-    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
-    if (!tableInfo.user.equals(env.getCredentials().getPrincipal())) {
+    SecurityOperation security = AuditedSecurityOperation.getInstance(env.getContext());
+    if (!tableInfo.user.equals(env.getContext().getCredentials().getPrincipal())) {
       for (TablePermission permission : TablePermission.values()) {
         try {
-          security.grantTablePermission(env.rpcCreds(), tableInfo.user, tableInfo.tableId,
-              permission, tableInfo.namespaceId);
+          security.grantTablePermission(env.getContext().rpcCreds(), tableInfo.user,
+              tableInfo.tableId, permission, tableInfo.namespaceId);
         } catch (ThriftSecurityException e) {
           LoggerFactory.getLogger(SetupPermissions.class).error("{}", e.getMessage(), e);
           throw e;
@@ -58,8 +58,8 @@ class SetupPermissions extends MasterRepo {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    AuditedSecurityOperation.getInstance(env).deleteTable(env.rpcCreds(), tableInfo.tableId,
-        tableInfo.namespaceId);
+    AuditedSecurityOperation.getInstance(env.getContext()).deleteTable(env.getContext().rpcCreds(),
+        tableInfo.tableId, tableInfo.namespaceId);
   }
 
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
index b8fa377..0906af7 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
@@ -25,7 +25,6 @@ import java.util.concurrent.locks.ReentrantLock;
 import java.util.function.Function;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.AbstractId;
 import org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException;
 import org.apache.accumulo.core.client.impl.ClientContext;
@@ -39,7 +38,7 @@ import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.DistributedReadWriteLock;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooReservation;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.ZooQueueLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.zookeeper.KeeperException;
@@ -50,6 +49,8 @@ public class Utils {
   private static final byte[] ZERO_BYTE = {'0'};
   private static final Logger log = LoggerFactory.getLogger(Utils.class);
 
+  private static final ServerContext context = ServerContext.getInstance();
+
   static void checkTableDoesNotExist(ClientContext context, String tableName, Table.ID tableId,
       TableOperation operation) throws AcceptableThriftTableOperationException {
 
@@ -60,11 +61,11 @@ public class Utils {
           TableOperationExceptionType.EXISTS, null);
   }
 
-  static <T extends AbstractId> T getNextId(String name, Instance instance,
+  static <T extends AbstractId> T getNextId(String name, ClientContext context,
       Function<String,T> newIdFunction) throws AcceptableThriftTableOperationException {
     try {
       IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-      final String ntp = ZooUtil.getRoot(instance) + Constants.ZTABLES;
+      final String ntp = context.getZooKeeperRoot() + Constants.ZTABLES;
       byte[] nid = zoo.mutate(ntp, ZERO_BYTE, ZooUtil.PUBLIC, currentValue -> {
         BigInteger nextId = new BigInteger(new String(currentValue, UTF_8), Character.MAX_RADIX);
         nextId = nextId.add(BigInteger.ONE);
@@ -85,9 +86,8 @@ public class Utils {
       boolean tableMustExist, TableOperation op) throws Exception {
     if (getLock(tableId, tid, writeLock).tryLock()) {
       if (tableMustExist) {
-        Instance instance = HdfsZooInstance.getInstance();
         IZooReaderWriter zk = ZooReaderWriter.getInstance();
-        if (!zk.exists(ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId))
+        if (!zk.exists(context.getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId))
           throw new AcceptableThriftTableOperationException(tableId.canonicalID(), "", op,
               TableOperationExceptionType.NOTFOUND, "Table does not exist");
       }
@@ -116,9 +116,8 @@ public class Utils {
       boolean mustExist, TableOperation op) throws Exception {
     if (getLock(namespaceId, id, writeLock).tryLock()) {
       if (mustExist) {
-        Instance instance = HdfsZooInstance.getInstance();
         IZooReaderWriter zk = ZooReaderWriter.getInstance();
-        if (!zk.exists(ZooUtil.getRoot(instance) + Constants.ZNAMESPACES + "/" + namespaceId))
+        if (!zk.exists(context.getZooKeeperRoot() + Constants.ZNAMESPACES + "/" + namespaceId))
           throw new AcceptableThriftTableOperationException(namespaceId.canonicalID(), "", op,
               TableOperationExceptionType.NAMESPACE_NOTFOUND, "Namespace does not exist");
       }
@@ -131,9 +130,7 @@ public class Utils {
 
   public static long reserveHdfsDirectory(String directory, long tid)
       throws KeeperException, InterruptedException {
-    Instance instance = HdfsZooInstance.getInstance();
-
-    String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/"
+    String resvPath = context.getZooKeeperRoot() + Constants.ZHDFS_RESERVATIONS + "/"
         + Base64.getEncoder().encodeToString(directory.getBytes(UTF_8));
 
     IZooReaderWriter zk = ZooReaderWriter.getInstance();
@@ -146,8 +143,7 @@ public class Utils {
 
   public static void unreserveHdfsDirectory(String directory, long tid)
       throws KeeperException, InterruptedException {
-    Instance instance = HdfsZooInstance.getInstance();
-    String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/"
+    String resvPath = context.getZooKeeperRoot() + Constants.ZHDFS_RESERVATIONS + "/"
         + Base64.getEncoder().encodeToString(directory.getBytes(UTF_8));
     ZooReservation.release(ZooReaderWriter.getInstance(), resvPath, String.format("%016x", tid));
   }
@@ -155,7 +151,7 @@ public class Utils {
   private static Lock getLock(AbstractId id, long tid, boolean writeLock) throws Exception {
     byte[] lockData = String.format("%016x", tid).getBytes(UTF_8);
     ZooQueueLock qlock = new ZooQueueLock(
-        ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZTABLE_LOCKS + "/" + id, false);
+        context.getZooKeeperRoot() + Constants.ZTABLE_LOCKS + "/" + id, false);
     Lock lock = DistributedReadWriteLock.recoverLock(qlock, lockData);
     if (lock == null) {
       DistributedReadWriteLock locker = new DistributedReadWriteLock(qlock, lockData);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
index a590b49..d3f8471 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
@@ -54,8 +54,8 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Lo
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -92,7 +92,7 @@ class WriteExportFiles extends MasterRepo {
 
     Connector conn = master.getConnector();
 
-    checkOffline(master);
+    checkOffline(master.getContext());
 
     Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     metaScanner.setRange(new KeyExtent(tableInfo.tableID, null, null).toMetadataRange());
@@ -124,8 +124,8 @@ class WriteExportFiles extends MasterRepo {
   @Override
   public Repo<Master> call(long tid, Master master) throws Exception {
     try {
-      exportTable(master.getFileSystem(), master, tableInfo.tableName, tableInfo.tableID,
-          tableInfo.exportDir);
+      exportTable(master.getFileSystem(), master.getContext(), tableInfo.tableName,
+          tableInfo.tableID, tableInfo.exportDir);
     } catch (IOException ioe) {
       throw new AcceptableThriftTableOperationException(tableInfo.tableID.canonicalID(),
           tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
@@ -143,7 +143,7 @@ class WriteExportFiles extends MasterRepo {
     Utils.unreserveTable(tableInfo.tableID, tid, false);
   }
 
-  public static void exportTable(VolumeManager fs, AccumuloServerContext context, String tableName,
+  public static void exportTable(VolumeManager fs, ServerContext context, String tableName,
       Table.ID tableID, String exportDir) throws Exception {
 
     fs.mkdirs(new Path(exportDir));
@@ -209,7 +209,7 @@ class WriteExportFiles extends MasterRepo {
     }
   }
 
-  private static Map<String,String> exportMetadata(VolumeManager fs, AccumuloServerContext context,
+  private static Map<String,String> exportMetadata(VolumeManager fs, ServerContext context,
       Table.ID tableID, ZipOutputStream zipOut, DataOutputStream dataOut)
       throws IOException, TableNotFoundException, AccumuloException, AccumuloSecurityException {
     zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_METADATA_FILE));
@@ -251,8 +251,8 @@ class WriteExportFiles extends MasterRepo {
     return uniqueFiles;
   }
 
-  private static void exportConfig(AccumuloServerContext context, Table.ID tableID,
-      ZipOutputStream zipOut, DataOutputStream dataOut)
+  private static void exportConfig(ServerContext context, Table.ID tableID, ZipOutputStream zipOut,
+      DataOutputStream dataOut)
       throws AccumuloException, AccumuloSecurityException, TableNotFoundException, IOException {
     Connector conn = context.getConnector();
 
@@ -260,8 +260,7 @@ class WriteExportFiles extends MasterRepo {
     Map<String,String> siteConfig = conn.instanceOperations().getSiteConfiguration();
     Map<String,String> systemConfig = conn.instanceOperations().getSystemConfiguration();
 
-    TableConfiguration tableConfig = context.getServerConfigurationFactory()
-        .getTableConfiguration(tableID);
+    TableConfiguration tableConfig = context.getServerConfFactory().getTableConfiguration(tableID);
 
     OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8);
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java
index 56bb93b..e17f3b6 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java
@@ -90,8 +90,8 @@ public class BulkImport extends MasterRepo {
     if (!Utils.getReadLock(tableId, tid).tryLock())
       return 100;
 
-    Tables.clearCache(master);
-    if (Tables.getTableState(master, tableId) == TableState.ONLINE) {
+    Tables.clearCache(master.getContext());
+    if (Tables.getTableState(master.getContext(), tableId) == TableState.ONLINE) {
       long reserve1, reserve2;
       reserve1 = reserve2 = Utils.reserveHdfsDirectory(sourceDir, tid);
       if (reserve1 == 0)
@@ -182,7 +182,7 @@ public class BulkImport extends MasterRepo {
       Table.ID tableId) throws Exception {
     final Path bulkDir = createNewBulkDir(fs, tableId);
 
-    MetadataTableUtil.addBulkLoadInProgressFlag(master,
+    MetadataTableUtil.addBulkLoadInProgressFlag(master.getContext(),
         "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
 
     Path dirPath = new Path(dir);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CleanUpBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CleanUpBulkImport.java
index 4dd6c6d..4ef01d4 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CleanUpBulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CleanUpBulkImport.java
@@ -53,9 +53,9 @@ public class CleanUpBulkImport extends MasterRepo {
     master.updateBulkImportStatus(source, BulkImportState.CLEANUP);
     log.debug("removing the bulkDir processing flag file in " + bulk);
     Path bulkDir = new Path(bulk);
-    MetadataTableUtil.removeBulkLoadInProgressFlag(master,
+    MetadataTableUtil.removeBulkLoadInProgressFlag(master.getContext(),
         "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
-    MetadataTableUtil.addDeleteEntry(master, tableId, bulkDir.toString());
+    MetadataTableUtil.addDeleteEntry(master.getContext(), tableId, bulkDir.toString());
     log.debug("removing the metadata table markers for loaded files");
     Connector conn = master.getConnector();
     MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/LoadFiles.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/LoadFiles.java
index 5492b61..6c710d3 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/LoadFiles.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/LoadFiles.java
@@ -168,11 +168,12 @@ class LoadFiles extends MasterRepo {
               long timeInMillis = master.getConfiguration()
                   .getTimeInMillis(Property.MASTER_BULK_TIMEOUT);
               server = servers[random.nextInt(servers.length)].getLocation();
-              client = ThriftUtil.getTServerClient(server, master, timeInMillis);
+              client = ThriftUtil.getTServerClient(server, master.getContext(), timeInMillis);
               List<String> attempt1 = Collections.singletonList(file);
               log.debug("Asking " + server + " to bulk import " + file);
-              List<String> fail = client.bulkImportFiles(Tracer.traceInfo(), master.rpcCreds(), tid,
-                  tableId.canonicalID(), attempt1, errorDir, setTime);
+              List<String> fail = client.bulkImportFiles(Tracer.traceInfo(),
+                  master.getContext().rpcCreds(), tid, tableId.canonicalID(), attempt1, errorDir,
+                  setTime);
               if (fail.isEmpty()) {
                 loaded.add(file);
               } else {
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/BulkImportMove.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/BulkImportMove.java
index c2de4dd..6b5d26f 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/BulkImportMove.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/BulkImportMove.java
@@ -98,7 +98,7 @@ class BulkImportMove extends MasterRepo {
    */
   private void moveFiles(String fmtTid, Path sourceDir, Path bulkDir, Master master,
       final VolumeManager fs, Map<String,String> renames) throws Exception {
-    MetadataTableUtil.addBulkLoadInProgressFlag(master,
+    MetadataTableUtil.addBulkLoadInProgressFlag(master.getContext(),
         "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
 
     int workerCount = master.getConfiguration().getCount(Property.MASTER_BULK_RENAME_THREADS);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CleanUpBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CleanUpBulkImport.java
index 3fec43e..072285c 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CleanUpBulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CleanUpBulkImport.java
@@ -47,9 +47,9 @@ public class CleanUpBulkImport extends MasterRepo {
   public Repo<Master> call(long tid, Master master) throws Exception {
     log.debug("removing the bulkDir processing flag file in " + info.bulkDir);
     Path bulkDir = new Path(info.bulkDir);
-    MetadataTableUtil.removeBulkLoadInProgressFlag(master,
+    MetadataTableUtil.removeBulkLoadInProgressFlag(master.getContext(),
         "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
-    MetadataTableUtil.addDeleteEntry(master, info.tableId, bulkDir.toString());
+    MetadataTableUtil.addDeleteEntry(master.getContext(), info.tableId, bulkDir.toString());
     if (info.tableState == TableState.ONLINE) {
       log.debug("removing the metadata table markers for loaded files");
       Connector conn = master.getConnector();
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/LoadFiles.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/LoadFiles.java
index dcf9721..f965e56 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/LoadFiles.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/LoadFiles.java
@@ -173,8 +173,8 @@ class LoadFiles extends MasterRepo {
               thriftImports.size());
           TabletClientService.Client client = null;
           try {
-            client = ThriftUtil.getTServerClient(server, master, timeInMillis);
-            client.loadFiles(Tracer.traceInfo(), master.rpcCreds(), tid,
+            client = ThriftUtil.getTServerClient(server, master.getContext(), timeInMillis);
+            client.loadFiles(Tracer.traceInfo(), master.getContext().rpcCreds(), tid,
                 tablet.getExtent().toThrift(), bulkDir.toString(), thriftImports, setTime);
           } catch (TException ex) {
             log.debug("rpc failed server: " + server + ", tid:" + fmtTid + " " + ex.getMessage(),
@@ -271,9 +271,9 @@ class LoadFiles extends MasterRepo {
 
     Text startRow = loadMapEntry.getKey().getPrevEndRow();
 
-    Iterator<TabletMetadata> tabletIter = MetadataScanner.builder().from(master).scanMetadataTable()
-        .overRange(tableId, startRow, null).checkConsistency().fetchPrev().fetchLocation()
-        .fetchLoaded().build().iterator();
+    Iterator<TabletMetadata> tabletIter = MetadataScanner.builder().from(master.getContext())
+        .scanMetadataTable().overRange(tableId, startRow, null).checkConsistency().fetchPrev()
+        .fetchLocation().fetchLoaded().build().iterator();
 
     List<TabletMetadata> tablets = new ArrayList<>();
     TabletMetadata currentTablet = tabletIter.next();
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/PrepBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/PrepBulkImport.java
index ceade85..4c0030d 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/PrepBulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/PrepBulkImport.java
@@ -87,7 +87,7 @@ public class PrepBulkImport extends MasterRepo {
 
     if (master.onlineTabletServers().size() == 0)
       return 500;
-    Tables.clearCache(master);
+    Tables.clearCache(master.getContext());
 
     return Utils.reserveHdfsDirectory(bulkInfo.sourceDir, tid);
   }
@@ -162,7 +162,7 @@ public class PrepBulkImport extends MasterRepo {
       Iterators.transform(lmi, entry -> entry.getKey());
 
       TabletIterFactory tabletIterFactory = startRow -> {
-        return MetadataScanner.builder().from(master).scanMetadataTable()
+        return MetadataScanner.builder().from(master.getContext()).scanMetadataTable()
             .overRange(bulkInfo.tableId, startRow, null).checkConsistency().fetchPrev().build()
             .stream().map(TabletMetadata::getExtent).iterator();
       };
@@ -177,7 +177,7 @@ public class PrepBulkImport extends MasterRepo {
     // now that table lock is acquired check that all splits in load mapping exists in table
     checkForMerge(master);
 
-    bulkInfo.tableState = Tables.getTableState(master.getInstance(), bulkInfo.tableId);
+    bulkInfo.tableState = Tables.getTableState(master.getContext(), bulkInfo.tableId);
 
     VolumeManager fs = master.getFileSystem();
     final UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java b/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
index cf3fb66..a133080 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
@@ -92,10 +92,10 @@ public class ShutdownTServer extends MasterRepo {
   public Repo<Master> call(long tid, Master master) throws Exception {
     // suppress assignment of tablets to the server
     if (force) {
-      String path = ZooUtil.getRoot(master.getInstance()) + Constants.ZTSERVERS + "/"
+      String path = ZooUtil.getRoot(master.getInstanceID()) + Constants.ZTSERVERS + "/"
           + server.getLocation();
       ZooLock.deleteLock(path);
-      path = ZooUtil.getRoot(master.getInstance()) + Constants.ZDEADTSERVERS + "/"
+      path = ZooUtil.getRoot(master.getInstanceID()) + Constants.ZDEADTSERVERS + "/"
           + server.getLocation();
       IZooReaderWriter zoo = ZooReaderWriter.getInstance();
       zoo.putPersistentData(path, "forced down".getBytes(UTF_8), NodeExistsPolicy.OVERWRITE);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java b/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java
index c4c2ea4..1e51257 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java
@@ -23,14 +23,12 @@ import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.AdminUtil;
 import org.apache.accumulo.fate.ReadOnlyStore;
 import org.apache.accumulo.fate.ZooStore;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 
 import com.beust.jcommander.JCommander;
@@ -79,9 +77,10 @@ public class FateAdmin {
 
     AdminUtil<Master> admin = new AdminUtil<>();
 
-    Instance instance = HdfsZooInstance.getInstance();
-    String path = ZooUtil.getRoot(instance) + Constants.ZFATE;
-    String masterPath = ZooUtil.getRoot(instance) + Constants.ZMASTER_LOCK;
+    ServerContext context = ServerContext.getInstance();
+    final String zkRoot = context.getZooKeeperRoot();
+    String path = zkRoot + Constants.ZFATE;
+    String masterPath = zkRoot + Constants.ZMASTER_LOCK;
     IZooReaderWriter zk = ZooReaderWriter.getInstance();
     ZooStore<Master> zs = new ZooStore<>(path, zk);
 
@@ -96,10 +95,10 @@ public class FateAdmin {
         if (!admin.prepDelete(zs, zk, masterPath, txid)) {
           System.exit(1);
         }
-        admin.deleteLocks(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS, txid);
+        admin.deleteLocks(zs, zk, zkRoot + Constants.ZTABLE_LOCKS, txid);
       }
     } else if (jc.getParsedCommand().equals("print")) {
-      admin.print(new ReadOnlyStore<>(zs), zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS);
+      admin.print(new ReadOnlyStore<>(zs), zk, zkRoot + Constants.ZTABLE_LOCKS);
     }
   }
 }
diff --git a/server/master/src/test/java/org/apache/accumulo/master/metrics/Metrics2ReplicationMetricsTest.java b/server/master/src/test/java/org/apache/accumulo/master/metrics/Metrics2ReplicationMetricsTest.java
index 676eacb..e2c4d30 100644
--- a/server/master/src/test/java/org/apache/accumulo/master/metrics/Metrics2ReplicationMetricsTest.java
+++ b/server/master/src/test/java/org/apache/accumulo/master/metrics/Metrics2ReplicationMetricsTest.java
@@ -19,6 +19,7 @@ package org.apache.accumulo.master.metrics;
 import java.lang.reflect.Field;
 
 import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.replication.ReplicationUtil;
 import org.apache.hadoop.fs.FileStatus;
@@ -51,6 +52,7 @@ public class Metrics2ReplicationMetricsTest {
   @Test
   public void testAddReplicationQueueTimeMetrics() throws Exception {
     Master master = EasyMock.createMock(Master.class);
+    ServerContext context = EasyMock.createMock(ServerContext.class);
     MetricsSystem system = EasyMock.createMock(MetricsSystem.class);
     VolumeManager fileSystem = EasyMock.createMock(VolumeManager.class);
     ReplicationUtil util = EasyMock.createMock(ReplicationUtil.class);
@@ -61,6 +63,7 @@ public class Metrics2ReplicationMetricsTest {
     Path path2 = new Path("hdfs://localhost:9000/accumulo/wal/file2");
 
     // First call will initialize the map of paths to modification time
+    EasyMock.expect(master.getContext()).andReturn(context).anyTimes();
     EasyMock.expect(util.getPendingReplicationPaths()).andReturn(ImmutableSet.of(path1, path2));
     EasyMock.expect(master.getFileSystem()).andReturn(fileSystem);
     EasyMock.expect(fileSystem.getFileStatus(path1)).andReturn(createStatus(100));
diff --git a/server/master/src/test/java/org/apache/accumulo/master/replication/MasterReplicationCoordinatorTest.java b/server/master/src/test/java/org/apache/accumulo/master/replication/MasterReplicationCoordinatorTest.java
index f19d825..24d527a 100644
--- a/server/master/src/test/java/org/apache/accumulo/master/replication/MasterReplicationCoordinatorTest.java
+++ b/server/master/src/test/java/org/apache/accumulo/master/replication/MasterReplicationCoordinatorTest.java
@@ -19,10 +19,10 @@ package org.apache.accumulo.master.replication;
 import java.util.Collections;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.util.HostAndPort;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
 import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.easymock.EasyMock;
 import org.junit.Assert;
@@ -34,13 +34,10 @@ public class MasterReplicationCoordinatorTest {
   public void randomServer() {
     Master master = EasyMock.createMock(Master.class);
     ZooReader reader = EasyMock.createMock(ZooReader.class);
-    Instance inst = EasyMock.createMock(Instance.class);
-
-    EasyMock.expect(master.getInstance()).andReturn(inst);
-    EasyMock.expect(inst.getInstanceID()).andReturn("1234");
+    ServerContext context = EasyMock.createMock(ServerContext.class);
+    EasyMock.expect(master.getContext()).andReturn(context);
     EasyMock.expect(master.getInstanceID()).andReturn("1234");
-
-    EasyMock.replay(master, reader, inst);
+    EasyMock.replay(master, reader);
 
     MasterReplicationCoordinator coordinator = new MasterReplicationCoordinator(master, reader);
     TServerInstance inst1 = new TServerInstance(HostAndPort.fromParts("host1", 1234), "session");
@@ -51,14 +48,11 @@ public class MasterReplicationCoordinatorTest {
   @Test(expected = IllegalArgumentException.class)
   public void invalidOffset() {
     Master master = EasyMock.createMock(Master.class);
+    ServerContext context = EasyMock.createMock(ServerContext.class);
     ZooReader reader = EasyMock.createMock(ZooReader.class);
-    Instance inst = EasyMock.createMock(Instance.class);
-
-    EasyMock.expect(master.getInstance()).andReturn(inst);
-    EasyMock.expect(inst.getInstanceID()).andReturn("1234");
+    EasyMock.expect(master.getContext()).andReturn(context);
     EasyMock.expect(master.getInstanceID()).andReturn("1234");
-
-    EasyMock.replay(master, reader, inst);
+    EasyMock.replay(master, reader);
 
     MasterReplicationCoordinator coordinator = new MasterReplicationCoordinator(master, reader);
     TServerInstance inst1 = new TServerInstance(HostAndPort.fromParts("host1", 1234), "session");
@@ -70,17 +64,16 @@ public class MasterReplicationCoordinatorTest {
   public void randomServerFromMany() {
     Master master = EasyMock.createMock(Master.class);
     ZooReader reader = EasyMock.createMock(ZooReader.class);
-    Instance inst = EasyMock.createMock(Instance.class);
+    ServerContext context = EasyMock.createMock(ServerContext.class);
 
-    EasyMock.expect(master.getInstance()).andReturn(inst).anyTimes();
-    EasyMock.expect(inst.getInstanceID()).andReturn("1234").anyTimes();
+    EasyMock.expect(context.getInstanceID()).andReturn("1234").anyTimes();
     EasyMock.expect(master.getInstanceID()).andReturn("1234").anyTimes();
-
-    EasyMock.replay(master, reader, inst);
+    EasyMock.expect(master.getContext()).andReturn(context).anyTimes();
+    EasyMock.replay(master, context, reader);
 
     MasterReplicationCoordinator coordinator = new MasterReplicationCoordinator(master, reader);
 
-    EasyMock.verify(master, reader, inst);
+    EasyMock.verify(master, reader);
 
     TreeSet<TServerInstance> instances = new TreeSet<>();
     TServerInstance inst1 = new TServerInstance(HostAndPort.fromParts("host1", 1234), "session");
diff --git a/server/master/src/test/java/org/apache/accumulo/master/replication/SequentialWorkAssignerTest.java b/server/master/src/test/java/org/apache/accumulo/master/replication/SequentialWorkAssignerTest.java
index 32b7a96..1131e6e 100644
--- a/server/master/src/test/java/org/apache/accumulo/master/replication/SequentialWorkAssignerTest.java
+++ b/server/master/src/test/java/org/apache/accumulo/master/replication/SequentialWorkAssignerTest.java
@@ -25,7 +25,6 @@ import java.util.Map;
 import java.util.TreeMap;
 
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.replication.ReplicationConstants;
@@ -54,7 +53,6 @@ public class SequentialWorkAssignerTest {
   public void basicZooKeeperCleanup() throws Exception {
     DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
     ZooCache zooCache = createMock(ZooCache.class);
-    Instance inst = createMock(Instance.class);
 
     Map<String,Map<Table.ID,String>> queuedWork = new TreeMap<>();
     Map<Table.ID,String> cluster1Work = new TreeMap<>();
@@ -73,8 +71,7 @@ public class SequentialWorkAssignerTest {
     assigner.setWorkQueue(workQueue);
     assigner.setQueuedWork(queuedWork);
 
-    expect(conn.getInstance()).andReturn(inst);
-    expect(inst.getInstanceID()).andReturn("instance");
+    expect(conn.getInstanceID()).andReturn("instance");
 
     // file1 replicated
     expect(zooCache.get(ZooUtil.getRoot("instance") + ReplicationConstants.ZOO_WORK_QUEUE + "/"
@@ -88,11 +85,11 @@ public class SequentialWorkAssignerTest {
                     new ReplicationTarget("cluster1", "2", Table.ID.of("2")))))
                         .andReturn(new byte[0]);
 
-    replay(workQueue, zooCache, conn, inst);
+    replay(workQueue, zooCache, conn);
 
     assigner.cleanupFinishedWork();
 
-    verify(workQueue, zooCache, conn, inst);
+    verify(workQueue, zooCache, conn);
 
     Assert.assertEquals(1, cluster1Work.size());
     Assert.assertEquals(
diff --git a/server/master/src/test/java/org/apache/accumulo/master/replication/UnorderedWorkAssignerTest.java b/server/master/src/test/java/org/apache/accumulo/master/replication/UnorderedWorkAssignerTest.java
index a530e23..95337be 100644
--- a/server/master/src/test/java/org/apache/accumulo/master/replication/UnorderedWorkAssignerTest.java
+++ b/server/master/src/test/java/org/apache/accumulo/master/replication/UnorderedWorkAssignerTest.java
@@ -31,7 +31,6 @@ import java.util.UUID;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.replication.ReplicationConstants;
@@ -110,22 +109,20 @@ public class UnorderedWorkAssignerTest {
     Set<String> queuedWork = new LinkedHashSet<>(Arrays.asList("wal1", "wal2"));
     assigner.setQueuedWork(queuedWork);
 
-    Instance inst = createMock(Instance.class);
     ZooCache cache = createMock(ZooCache.class);
     assigner.setZooCache(cache);
 
-    expect(conn.getInstance()).andReturn(inst);
-    expect(inst.getInstanceID()).andReturn("id");
+    expect(conn.getInstanceID()).andReturn("id");
     expect(cache.get(Constants.ZROOT + "/id" + ReplicationConstants.ZOO_WORK_QUEUE + "/wal1"))
         .andReturn(null);
     expect(cache.get(Constants.ZROOT + "/id" + ReplicationConstants.ZOO_WORK_QUEUE + "/wal2"))
         .andReturn(null);
 
-    replay(cache, inst, conn);
+    replay(cache, conn);
 
     assigner.cleanupFinishedWork();
 
-    verify(cache, inst, conn);
+    verify(cache, conn);
     Assert.assertTrue("Queued work was not emptied", queuedWork.isEmpty());
   }
 
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
index d163740..3310edf 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
@@ -40,11 +40,9 @@ import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.MasterClient;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.conf.SiteConfiguration;
 import org.apache.accumulo.core.gc.thrift.GCMonitorService;
 import org.apache.accumulo.core.gc.thrift.GCStatus;
 import org.apache.accumulo.core.master.thrift.MasterClientService;
@@ -54,7 +52,6 @@ import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.rpc.ThriftUtil;
 import org.apache.accumulo.core.tabletserver.thrift.ActiveScan;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client;
-import org.apache.accumulo.core.trace.DistributedTrace;
 import org.apache.accumulo.core.trace.Tracer;
 import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.core.util.HostAndPort;
@@ -66,19 +63,13 @@ import org.apache.accumulo.fate.util.LoggingRunnable;
 import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.Accumulo;
-import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.HighlyAvailableService;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.ServerOpts;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfigurationFactory;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.accumulo.server.metrics.MetricsSystemHelper;
 import org.apache.accumulo.server.monitor.LogService;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
-import org.apache.accumulo.server.security.SecurityUtil;
 import org.apache.accumulo.server.util.Halt;
 import org.apache.accumulo.server.util.TableInfoUtil;
 import org.apache.accumulo.server.util.time.SimpleTimer;
@@ -174,7 +165,7 @@ public class Monitor implements HighlyAvailableService {
   private static GCStatus gcStatus;
 
   private static ServerConfigurationFactory config;
-  private static AccumuloServerContext context;
+  private static ServerContext context;
 
   private static EmbeddedWebServer server;
 
@@ -266,7 +257,7 @@ public class Monitor implements HighlyAvailableService {
           public void run() {
             synchronized (Monitor.class) {
               if (cachedInstanceName.get().equals(DEFAULT_INSTANCE_NAME)) {
-                final String instanceName = HdfsZooInstance.getInstance().getInstanceName();
+                final String instanceName = ServerContext.getInstance().getInstanceName();
                 if (null != instanceName) {
                   cachedInstanceName.set(instanceName);
                 }
@@ -440,30 +431,22 @@ public class Monitor implements HighlyAvailableService {
   }
 
   public static void main(String[] args) throws Exception {
+
     final String app = "monitor";
     ServerOpts opts = new ServerOpts();
     opts.parseArgs(app, args);
-    String hostname = opts.getAddress();
-    SecurityUtil.serverLogin(SiteConfiguration.getInstance());
-
-    VolumeManager fs = VolumeManagerImpl.get();
-    Instance instance = HdfsZooInstance.getInstance();
-    config = new ServerConfigurationFactory(instance);
-    context = new AccumuloServerContext(instance, config);
-    log.info("Version " + Constants.VERSION);
-    log.info("Instance " + context.getInstanceID());
-    MetricsSystemHelper.configure(Monitor.class.getSimpleName());
-    Accumulo.init(fs, instance, config, app);
-    Monitor monitor = new Monitor();
-    // Servlets need access to limit requests when the monitor is not active, but Servlets are
-    // instantiated
-    // via reflection. Expose the service this way instead.
-    Monitor.HA_SERVICE_INSTANCE = monitor;
-    DistributedTrace.enable(hostname, app, config.getSystemConfiguration());
+    ServerContext context = ServerContext.getInstance();
+    context.setupServer(app, Monitor.class.getName(), opts.getAddress());
     try {
-      monitor.run(hostname);
+      config = context.getServerConfFactory();
+      Monitor.context = context;
+      Monitor monitor = new Monitor();
+      // Servlets need access to limit requests when the monitor is not active, but Servlets are
+      // instantiated via reflection. Expose the service this way instead.
+      Monitor.HA_SERVICE_INSTANCE = monitor;
+      monitor.run(context.getHostname());
     } finally {
-      DistributedTrace.disable();
+      context.teardownServer();
     }
   }
... 2842 lines suppressed ...


Mime
View raw message