accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [03/10] accumulo git commit: ACCUMULO-3199 Internal refactor to add ClientContext
Date Tue, 25 Nov 2014 22:36:47 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
index e2ac08b..b8e0b40 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
@@ -39,7 +39,7 @@ import org.apache.accumulo.core.replication.StatusUtil;
 import org.apache.accumulo.core.replication.proto.Replication.Status;
 import org.apache.accumulo.core.trace.Span;
 import org.apache.accumulo.core.trace.Trace;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
@@ -54,16 +54,17 @@ import com.google.protobuf.InvalidProtocolBufferException;
 public class WorkMaker {
   private static final Logger log = LoggerFactory.getLogger(WorkMaker.class);
 
-  private final Connector conn;
+  private final AccumuloServerContext context;
+  private Connector conn;
 
   private BatchWriter writer;
 
-  public WorkMaker(Connector conn) {
+  public WorkMaker(AccumuloServerContext context, Connector conn) {
+    this.context = context;
     this.conn = conn;
   }
 
   public void run() {
-    ServerConfigurationFactory serverConf = new ServerConfigurationFactory(conn.getInstance());
     if (!ReplicationTable.isOnline(conn)) {
       log.info("Replication table is not yet online");
       return;
@@ -110,7 +111,7 @@ public class WorkMaker {
         }
 
         // Get the table configuration for the table specified by the status record
-        tableConf = serverConf.getTableConfiguration(tableId.toString());
+        tableConf = context.getServerConfigurationFactory().getTableConfiguration(tableId.toString());
 
         // Pull the relevant replication targets
         // TODO Cache this instead of pulling it every time

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
index 66c3fcc..8ce728e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
@@ -40,7 +40,6 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.IsolatedScanner;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.impl.ServerClient;
@@ -60,20 +59,18 @@ import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.trace.wrappers.TraceExecutorService;
 import org.apache.accumulo.core.trace.Tracer;
+import org.apache.accumulo.core.trace.wrappers.TraceExecutorService;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.SimpleThreadPool;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
 import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tablets.UniqueNameAllocator;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.zookeeper.DistributedWorkQueue;
@@ -131,9 +128,8 @@ public class BulkImport extends MasterRepo {
     if (!Utils.getReadLock(tableId, tid).tryLock())
       return 100;
 
-    Instance instance = HdfsZooInstance.getInstance();
-    Tables.clearCache(instance);
-    if (Tables.getTableState(instance, tableId) == TableState.ONLINE) {
+    Tables.clearCache(master.getInstance());
+    if (Tables.getTableState(master.getInstance(), tableId) == TableState.ONLINE) {
       long reserve1, reserve2;
       reserve1 = reserve2 = Utils.reserveHdfsDirectory(sourceDir, tid);
       if (reserve1 == 0)
@@ -222,7 +218,7 @@ public class BulkImport extends MasterRepo {
   private String prepareBulkImport(Master master, final VolumeManager fs, String dir, String tableId) throws Exception {
     final Path bulkDir = createNewBulkDir(fs, tableId);
 
-    MetadataTableUtil.addBulkLoadInProgressFlag("/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
+    MetadataTableUtil.addBulkLoadInProgressFlag(master, "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
 
     Path dirPath = new Path(dir);
     FileStatus[] mapFiles = fs.listStatus(dirPath);
@@ -333,8 +329,8 @@ class CleanUpBulkImport extends MasterRepo {
   public Repo<Master> call(long tid, Master master) throws Exception {
     log.debug("removing the bulk processing flag file in " + bulk);
     Path bulkDir = new Path(bulk);
-    MetadataTableUtil.removeBulkLoadInProgressFlag("/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
-    MetadataTableUtil.addDeleteEntry(tableId, bulkDir.toString());
+    MetadataTableUtil.removeBulkLoadInProgressFlag(master, "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
+    MetadataTableUtil.addDeleteEntry(master, tableId, bulkDir.toString());
     log.debug("removing the metadata table markers for loaded files");
     Connector conn = master.getConnector();
     MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid);
@@ -460,7 +456,7 @@ class CopyFailed extends MasterRepo {
     }
 
     if (loadedFailures.size() > 0) {
-      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID()
+      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + master.getInstance().getInstanceID()
           + Constants.ZBULK_FAILED_COPYQ, master.getConfiguration());
 
       HashSet<String> workIds = new HashSet<String>();
@@ -574,12 +570,12 @@ class LoadFiles extends MasterRepo {
               // this is running on the master and there are lots of connections to tablet servers
               // serving the metadata tablets
               long timeInMillis = master.getConfiguration().getTimeInMillis(Property.MASTER_BULK_TIMEOUT);
-              Pair<String,Client> pair = ServerClient.getConnection(master.getInstance(), false, timeInMillis);
+              Pair<String,Client> pair = ServerClient.getConnection(master, false, timeInMillis);
               client = pair.getSecond();
               server = pair.getFirst();
               List<String> attempt = Collections.singletonList(file);
               log.debug("Asking " + pair.getFirst() + " to bulk import " + file);
-              List<String> fail = client.bulkImportFiles(Tracer.traceInfo(), SystemCredentials.get().toThrift(master.getInstance()), tid, tableId, attempt,
+              List<String> fail = client.bulkImportFiles(Tracer.traceInfo(), master.rpcCreds(), tid, tableId, attempt,
                   errorDir, setTime);
               if (fail.isEmpty()) {
                 loaded.add(file);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
index 374fc24..4f4b27e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
@@ -19,14 +19,12 @@ package org.apache.accumulo.master.tableOps;
 import static java.nio.charset.StandardCharsets.UTF_8;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 
 class FinishCancelCompaction extends MasterRepo {
@@ -56,24 +54,22 @@ public class CancelCompactions extends MasterRepo {
 
   private static final long serialVersionUID = 1L;
   private String tableId;
-  private String namespaceId;
 
   public CancelCompactions(String tableId) {
     this.tableId = tableId;
-    Instance inst = HdfsZooInstance.getInstance();
-    this.namespaceId = Tables.getNamespaceId(inst, tableId);
   }
 
   @Override
   public long isReady(long tid, Master environment) throws Exception {
+    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
     return Utils.reserveNamespace(namespaceId, tid, false, true, TableOperation.COMPACT_CANCEL)
         + Utils.reserveTable(tableId, tid, false, true, TableOperation.COMPACT_CANCEL);
   }
 
   @Override
   public Repo<Master> call(long tid, Master environment) throws Exception {
-    String zCompactID = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_ID;
-    String zCancelID = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId
+    String zCompactID = Constants.ZROOT + "/" + environment.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_ID;
+    String zCancelID = Constants.ZROOT + "/" + environment.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId
         + Constants.ZTABLE_COMPACT_CANCEL_ID;
 
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
@@ -101,6 +97,7 @@ public class CancelCompactions extends MasterRepo {
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
+    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
     Utils.unreserveNamespace(namespaceId, tid, false);
     Utils.unreserveTable(tableId, tid, false);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChangeTableState.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChangeTableState.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChangeTableState.java
index f1878b0..c902b06 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChangeTableState.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChangeTableState.java
@@ -16,13 +16,11 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.tables.TableManager;
 import org.apache.log4j.Logger;
 
@@ -31,13 +29,10 @@ public class ChangeTableState extends MasterRepo {
   private static final long serialVersionUID = 1L;
   private String tableId;
   private TableOperation top;
-  private String namespaceId;
 
   public ChangeTableState(String tableId, TableOperation top) {
     this.tableId = tableId;
     this.top = top;
-    Instance inst = HdfsZooInstance.getInstance();
-    this.namespaceId = Tables.getNamespaceId(inst, tableId);
 
     if (top != TableOperation.ONLINE && top != TableOperation.OFFLINE)
       throw new IllegalArgumentException(top.toString());
@@ -45,13 +40,14 @@ public class ChangeTableState extends MasterRepo {
 
   @Override
   public long isReady(long tid, Master environment) throws Exception {
+    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
     // reserve the table so that this op does not run concurrently with create, clone, or delete table
     return Utils.reserveNamespace(namespaceId, tid, false, true, top) + Utils.reserveTable(tableId, tid, true, true, top);
   }
 
   @Override
   public Repo<Master> call(long tid, Master env) throws Exception {
-
+    String namespaceId = Tables.getNamespaceId(env.getInstance(), tableId);
     TableState ts = TableState.ONLINE;
     if (top == TableOperation.OFFLINE)
       ts = TableState.OFFLINE;
@@ -66,6 +62,7 @@ public class ChangeTableState extends MasterRepo {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
+    String namespaceId = Tables.getNamespaceId(env.getInstance(), tableId);
     Utils.unreserveNamespace(namespaceId, tid, false);
     Utils.unreserveTable(tableId, tid, true);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
index da0afd8..7034e39 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
@@ -20,7 +20,6 @@ import java.io.Serializable;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.NamespaceNotFoundException;
 import org.apache.accumulo.core.client.impl.Namespaces;
 import org.apache.accumulo.core.client.impl.Tables;
@@ -35,7 +34,6 @@ import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.log4j.Logger;
@@ -114,17 +112,16 @@ class CloneMetadata extends MasterRepo {
   public Repo<Master> call(long tid, Master environment) throws Exception {
     Logger.getLogger(CloneMetadata.class).info(
         String.format("Cloning %s with tableId %s from srcTableId %s", cloneInfo.tableName, cloneInfo.tableId, cloneInfo.srcTableId));
-    Instance instance = HdfsZooInstance.getInstance();
     // need to clear out any metadata entries for tableId just in case this
     // died before and is executing again
-    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, SystemCredentials.get(), environment.getMasterLock());
-    MetadataTableUtil.cloneTable(instance, cloneInfo.srcTableId, cloneInfo.tableId, environment.getFileSystem());
+    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment, environment.getMasterLock());
+    MetadataTableUtil.cloneTable(environment, cloneInfo.srcTableId, cloneInfo.tableId, environment.getFileSystem());
     return new FinishCloneTable(cloneInfo);
   }
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, SystemCredentials.get(), environment.getMasterLock());
+    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment, environment.getMasterLock());
   }
 
 }
@@ -137,8 +134,7 @@ class CloneZookeeper extends MasterRepo {
 
   public CloneZookeeper(CloneInfo cloneInfo) throws NamespaceNotFoundException {
     this.cloneInfo = cloneInfo;
-    Instance inst = HdfsZooInstance.getInstance();
-    this.cloneInfo.namespaceId = Namespaces.getNamespaceId(inst, Tables.qualify(this.cloneInfo.tableName).getFirst());
+    this.cloneInfo.namespaceId = Namespaces.getNamespaceId(HdfsZooInstance.getInstance(), Tables.qualify(this.cloneInfo.tableName).getFirst());
   }
 
   @Override
@@ -155,13 +151,12 @@ class CloneZookeeper extends MasterRepo {
     Utils.tableNameLock.lock();
     try {
       // write tableName & tableId to zookeeper
-      Instance instance = HdfsZooInstance.getInstance();
 
-      Utils.checkTableDoesNotExist(instance, cloneInfo.tableName, cloneInfo.tableId, TableOperation.CLONE);
+      Utils.checkTableDoesNotExist(environment.getInstance(), cloneInfo.tableName, cloneInfo.tableId, TableOperation.CLONE);
 
       TableManager.getInstance().cloneTable(cloneInfo.srcTableId, cloneInfo.tableId, cloneInfo.tableName, cloneInfo.namespaceId, cloneInfo.propertiesToSet,
           cloneInfo.propertiesToExclude, NodeExistsPolicy.OVERWRITE);
-      Tables.clearCache(instance);
+      Tables.clearCache(environment.getInstance());
 
       return new CloneMetadata(cloneInfo);
     } finally {
@@ -171,12 +166,11 @@ class CloneZookeeper extends MasterRepo {
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    Instance instance = HdfsZooInstance.getInstance();
     TableManager.getInstance().removeTable(cloneInfo.tableId);
     if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
       Utils.unreserveNamespace(cloneInfo.namespaceId, tid, false);
     Utils.unreserveTable(cloneInfo.tableId, tid, true);
-    Tables.clearCache(instance);
+    Tables.clearCache(environment.getInstance());
   }
 
 }
@@ -201,7 +195,7 @@ class ClonePermissions extends MasterRepo {
     // give all table permissions to the creator
     for (TablePermission permission : TablePermission.values()) {
       try {
-        AuditedSecurityOperation.getInstance().grantTablePermission(SystemCredentials.get().toThrift(environment.getInstance()), cloneInfo.user,
+        AuditedSecurityOperation.getInstance(environment).grantTablePermission(environment.rpcCreds(), cloneInfo.user,
             cloneInfo.tableId, permission, cloneInfo.namespaceId);
       } catch (ThriftSecurityException e) {
         Logger.getLogger(FinishCloneTable.class).error(e.getMessage(), e);
@@ -222,7 +216,7 @@ class ClonePermissions extends MasterRepo {
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    AuditedSecurityOperation.getInstance().deleteTable(SystemCredentials.get().toThrift(environment.getInstance()), cloneInfo.tableId, cloneInfo.namespaceId);
+    AuditedSecurityOperation.getInstance(environment).deleteTable(environment.rpcCreds(), cloneInfo.tableId, cloneInfo.namespaceId);
   }
 }
 
@@ -238,8 +232,7 @@ public class CloneTable extends MasterRepo {
     cloneInfo.tableName = tableName;
     cloneInfo.propertiesToExclude = propertiesToExclude;
     cloneInfo.propertiesToSet = propertiesToSet;
-    Instance inst = HdfsZooInstance.getInstance();
-    cloneInfo.srcNamespaceId = Tables.getNamespaceId(inst, cloneInfo.srcTableId);
+    cloneInfo.srcNamespaceId = Tables.getNamespaceId(HdfsZooInstance.getInstance(), cloneInfo.srcTableId);
   }
 
   @Override
@@ -254,8 +247,7 @@ public class CloneTable extends MasterRepo {
 
     Utils.idLock.lock();
     try {
-      Instance instance = HdfsZooInstance.getInstance();
-      cloneInfo.tableId = Utils.getNextTableId(cloneInfo.tableName, instance);
+      cloneInfo.tableId = Utils.getNextTableId(cloneInfo.tableName, environment.getInstance());
       return new ClonePermissions(cloneInfo);
     } finally {
       Utils.idLock.unlock();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
index 13ef68e..b3037d3 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
@@ -53,7 +53,6 @@ import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
@@ -70,10 +69,9 @@ class CompactionDriver extends MasterRepo {
   private static final long serialVersionUID = 1L;
 
   private long compactId;
-  private String tableId;
+  private final String tableId;
   private byte[] startRow;
   private byte[] endRow;
-  private String namespaceId;
 
   public CompactionDriver(long compactId, String tableId, byte[] startRow, byte[] endRow) {
 
@@ -81,14 +79,12 @@ class CompactionDriver extends MasterRepo {
     this.tableId = tableId;
     this.startRow = startRow;
     this.endRow = endRow;
-    Instance inst = HdfsZooInstance.getInstance();
-    this.namespaceId = Tables.getNamespaceId(inst, tableId);
   }
 
   @Override
   public long isReady(long tid, Master master) throws Exception {
 
-    String zCancelID = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId
+    String zCancelID = Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId
         + Constants.ZTABLE_COMPACT_CANCEL_ID;
 
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
@@ -191,7 +187,8 @@ class CompactionDriver extends MasterRepo {
 
   @Override
   public Repo<Master> call(long tid, Master environment) throws Exception {
-    CompactRange.removeIterators(tid, tableId);
+    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
+    CompactRange.removeIterators(environment, tid, tableId);
     Utils.getReadLock(tableId, tid).unlock();
     Utils.getReadLock(namespaceId, tid).unlock();
     return null;
@@ -207,11 +204,10 @@ class CompactionDriver extends MasterRepo {
 public class CompactRange extends MasterRepo {
 
   private static final long serialVersionUID = 1L;
-  private String tableId;
+  private final String tableId;
   private byte[] startRow;
   private byte[] endRow;
   private byte[] iterators;
-  private String namespaceId;
 
   public static class CompactionIterators implements Writable {
     byte[] startRow;
@@ -295,8 +291,6 @@ public class CompactRange extends MasterRepo {
     this.tableId = tableId;
     this.startRow = startRow.length == 0 ? null : startRow;
     this.endRow = endRow.length == 0 ? null : endRow;
-    Instance inst = HdfsZooInstance.getInstance();
-    this.namespaceId = Tables.getNamespaceId(inst, tableId);
 
     if (iterators.size() > 0) {
       this.iterators = WritableUtils.toByteArray(new CompactionIterators(this.startRow, this.endRow, iterators));
@@ -311,13 +305,14 @@ public class CompactRange extends MasterRepo {
 
   @Override
   public long isReady(long tid, Master environment) throws Exception {
+    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
     return Utils.reserveNamespace(namespaceId, tid, false, true, TableOperation.COMPACT)
         + Utils.reserveTable(tableId, tid, false, true, TableOperation.COMPACT);
   }
 
   @Override
   public Repo<Master> call(final long tid, Master environment) throws Exception {
-    String zTablePath = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_ID;
+    String zTablePath = Constants.ZROOT + "/" + environment.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_ID;
 
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
     byte[] cid;
@@ -361,8 +356,8 @@ public class CompactRange extends MasterRepo {
 
   }
 
-  static void removeIterators(final long txid, String tableId) throws Exception {
-    String zTablePath = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_ID;
+  static void removeIterators(Master environment, final long txid, String tableId) throws Exception {
+    String zTablePath = Constants.ZROOT + "/" + environment.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_ID;
 
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
 
@@ -391,8 +386,9 @@ public class CompactRange extends MasterRepo {
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
+    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
     try {
-      removeIterators(tid, tableId);
+      removeIterators(environment, tid, tableId);
     } finally {
       Utils.unreserveNamespace(namespaceId, tid, false);
       Utils.unreserveTable(tableId, tid, false);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
index 8d0aa26..83ab379 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
@@ -30,7 +30,6 @@ import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.util.NamespacePropUtil;
 import org.apache.log4j.Logger;
@@ -142,10 +141,10 @@ class SetupNamespacePermissions extends MasterRepo {
   @Override
   public Repo<Master> call(long tid, Master env) throws Exception {
     // give all namespace permissions to the creator
-    SecurityOperation security = AuditedSecurityOperation.getInstance();
+    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
     for (NamespacePermission permission : NamespacePermission.values()) {
       try {
-        security.grantNamespacePermission(SystemCredentials.get().toThrift(env.getInstance()), namespaceInfo.user, namespaceInfo.namespaceId, permission);
+        security.grantNamespacePermission(env.rpcCreds(), namespaceInfo.user, namespaceInfo.namespaceId, permission);
       } catch (ThriftSecurityException e) {
         Logger.getLogger(FinishCreateNamespace.class).error(e.getMessage(), e);
         throw e;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
index 1e4d40e..247645b 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
@@ -36,7 +36,6 @@ import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.tablets.TabletTime;
 import org.apache.accumulo.server.util.MetadataTableUtil;
@@ -117,7 +116,7 @@ class PopulateMetadata extends MasterRepo {
   @Override
   public Repo<Master> call(long tid, Master environment) throws Exception {
     KeyExtent extent = new KeyExtent(new Text(tableInfo.tableId), null, null);
-    MetadataTableUtil.addTablet(extent, tableInfo.dir, SystemCredentials.get(), tableInfo.timeType, environment.getMasterLock());
+    MetadataTableUtil.addTablet(extent, tableInfo.dir, environment, tableInfo.timeType, environment.getMasterLock());
 
     return new FinishCreateTable(tableInfo);
 
@@ -125,7 +124,7 @@ class PopulateMetadata extends MasterRepo {
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(tableInfo.tableId, false, SystemCredentials.get(), environment.getMasterLock());
+    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment, environment.getMasterLock());
   }
 
 }
@@ -249,11 +248,11 @@ class SetupPermissions extends MasterRepo {
   @Override
   public Repo<Master> call(long tid, Master env) throws Exception {
     // give all table permissions to the creator
-    SecurityOperation security = AuditedSecurityOperation.getInstance();
-    if (!tableInfo.user.equals(SystemCredentials.get().getPrincipal())) {
+    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
+    if (!tableInfo.user.equals(env.getCredentials().getPrincipal())) {
       for (TablePermission permission : TablePermission.values()) {
         try {
-          security.grantTablePermission(SystemCredentials.get().toThrift(env.getInstance()), tableInfo.user, tableInfo.tableId, permission, tableInfo.namespaceId);
+          security.grantTablePermission(env.rpcCreds(), tableInfo.user, tableInfo.tableId, permission, tableInfo.namespaceId);
         } catch (ThriftSecurityException e) {
           Logger.getLogger(FinishCreateTable.class).error(e.getMessage(), e);
           throw e;
@@ -269,7 +268,7 @@ class SetupPermissions extends MasterRepo {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    AuditedSecurityOperation.getInstance().deleteTable(SystemCredentials.get().toThrift(env.getInstance()), tableInfo.tableId, tableInfo.namespaceId);
+    AuditedSecurityOperation.getInstance(env).deleteTable(env.rpcCreds(), tableInfo.tableId, tableInfo.namespaceId);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
index b6a9578..a3da0d2 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
@@ -22,7 +22,6 @@ import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tables.TableManager;
 import org.apache.log4j.Logger;
 
@@ -56,7 +55,7 @@ class NamespaceCleanUp extends MasterRepo {
 
     // remove any permissions associated with this namespace
     try {
-      AuditedSecurityOperation.getInstance().deleteNamespace(SystemCredentials.get().toThrift(master.getInstance()), namespaceId);
+      AuditedSecurityOperation.getInstance(master).deleteNamespace(master.rpcCreds(), namespaceId);
     } catch (ThriftSecurityException e) {
       log.error(e.getMessage(), e);
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
index 6a49a05..1f903ca 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
@@ -23,7 +23,6 @@ import java.util.Map.Entry;
 
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.impl.Tables;
@@ -45,14 +44,12 @@ import org.apache.accumulo.core.volume.Volume;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.master.state.MetaDataTableScanner;
 import org.apache.accumulo.server.master.state.TabletLocationState;
 import org.apache.accumulo.server.master.state.TabletState;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.hadoop.fs.FileStatus;
@@ -167,14 +164,14 @@ class CleanUp extends MasterRepo {
       // Intentionally do not pass master lock. If master loses lock, this operation may complete before master can kill itself.
       // If the master lock passed to deleteTable, it is possible that the delete mutations will be dropped. If the delete operations
       // are dropped and the operation completes, then the deletes will not be repeated.
-      MetadataTableUtil.deleteTable(tableId, refCount != 0, SystemCredentials.get(), null);
+      MetadataTableUtil.deleteTable(tableId, refCount != 0, master, null);
     } catch (Exception e) {
       log.error("error deleting " + tableId + " from metadata table", e);
     }
 
     // remove any problem reports the table may have
     try {
-      ProblemReports.getInstance().deleteProblemReports(tableId);
+      ProblemReports.getInstance(master).deleteProblemReports(tableId);
     } catch (Exception e) {
       log.error("Failed to delete problem reports for table " + tableId, e);
     }
@@ -215,7 +212,7 @@ class CleanUp extends MasterRepo {
 
     // remove any permissions associated with this table
     try {
-      AuditedSecurityOperation.getInstance().deleteTable(SystemCredentials.get().toThrift(master.getInstance()), tableId, namespaceId);
+      AuditedSecurityOperation.getInstance(master).deleteTable(master.rpcCreds(), tableId, namespaceId);
     } catch (ThriftSecurityException e) {
       log.error(e.getMessage(), e);
     }
@@ -307,23 +304,21 @@ public class DeleteTable extends MasterRepo {
 
   private static final long serialVersionUID = 1L;
 
-  private String tableId, namespaceId;
+  private String tableId;
 
   public DeleteTable(String tableId) {
     this.tableId = tableId;
-    Instance inst = HdfsZooInstance.getInstance();
-    this.namespaceId = Tables.getNamespaceId(inst, tableId);
   }
 
   @Override
   public long isReady(long tid, Master environment) throws Exception {
-
-    return Utils.reserveNamespace(namespaceId, tid, false, false, TableOperation.DELETE)
-        + Utils.reserveTable(tableId, tid, true, true, TableOperation.DELETE);
+    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
+    return Utils.reserveNamespace(namespaceId, tid, false, false, TableOperation.DELETE) + Utils.reserveTable(tableId, tid, true, true, TableOperation.DELETE);
   }
 
   @Override
   public Repo<Master> call(long tid, Master environment) throws Exception {
+    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
     TableManager.getInstance().transitionTableState(tableId, TableState.DELETING);
     environment.getEventCoordinator().event("deleting table %s ", tableId);
     return new CleanUp(tableId, namespaceId);
@@ -331,6 +326,7 @@ public class DeleteTable extends MasterRepo {
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
+    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
     Utils.unreserveNamespace(namespaceId, tid, false);
     Utils.unreserveTable(tableId, tid, true);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
index 9c397db..f309879 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
@@ -34,7 +34,6 @@ import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Tables;
@@ -55,9 +54,9 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Lo
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -131,10 +130,8 @@ class WriteExportFiles extends MasterRepo {
   
   @Override
   public Repo<Master> call(long tid, Master master) throws Exception {
-    Connector conn = master.getConnector();
-    
     try {
-      exportTable(master.getFileSystem(), conn, tableInfo.tableName, tableInfo.tableID, tableInfo.exportDir);
+      exportTable(master.getFileSystem(), master, tableInfo.tableName, tableInfo.tableID, tableInfo.exportDir);
     } catch (IOException ioe) {
       throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
           "Failed to create export files " + ioe.getMessage());
@@ -151,7 +148,7 @@ class WriteExportFiles extends MasterRepo {
     Utils.unreserveTable(tableInfo.tableID, tid, false);
   }
   
-  public static void exportTable(VolumeManager fs, Connector conn, String tableName, String tableID, String exportDir) throws Exception {
+  public static void exportTable(VolumeManager fs, AccumuloServerContext context, String tableName, String tableID, String exportDir) throws Exception {
     
     fs.mkdirs(new Path(exportDir));
     Path exportMetaFilePath = fs.getVolumeByPath(new Path(exportDir)).getFileSystem().makeQualified(new Path(exportDir, Constants.EXPORT_FILE));
@@ -166,9 +163,9 @@ class WriteExportFiles extends MasterRepo {
       zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_INFO_FILE));
       OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8);
       osw.append(ExportTable.EXPORT_VERSION_PROP + ":" + ExportTable.VERSION + "\n");
-      osw.append("srcInstanceName:" + conn.getInstance().getInstanceName() + "\n");
-      osw.append("srcInstanceID:" + conn.getInstance().getInstanceID() + "\n");
-      osw.append("srcZookeepers:" + conn.getInstance().getZooKeepers() + "\n");
+      osw.append("srcInstanceName:" + context.getInstance().getInstanceName() + "\n");
+      osw.append("srcInstanceID:" + context.getInstance().getInstanceID() + "\n");
+      osw.append("srcZookeepers:" + context.getInstance().getZooKeepers() + "\n");
       osw.append("srcTableName:" + tableName + "\n");
       osw.append("srcTableID:" + tableID + "\n");
       osw.append(ExportTable.DATA_VERSION_PROP + ":" + ServerConstants.DATA_VERSION + "\n");
@@ -177,10 +174,10 @@ class WriteExportFiles extends MasterRepo {
       osw.flush();
       dataOut.flush();
       
-      exportConfig(conn, tableID, zipOut, dataOut);
+      exportConfig(context, tableID, zipOut, dataOut);
       dataOut.flush();
       
-      Map<String,String> uniqueFiles = exportMetadata(fs, conn, tableID, zipOut, dataOut);
+      Map<String,String> uniqueFiles = exportMetadata(fs, context, tableID, zipOut, dataOut);
       
       dataOut.close();
       dataOut = null;
@@ -214,13 +211,13 @@ class WriteExportFiles extends MasterRepo {
     }
   }
   
-  private static Map<String,String> exportMetadata(VolumeManager fs, Connector conn, String tableID, ZipOutputStream zipOut, DataOutputStream dataOut)
-      throws IOException, TableNotFoundException {
+  private static Map<String,String> exportMetadata(VolumeManager fs, AccumuloServerContext context, String tableID, ZipOutputStream zipOut,
+      DataOutputStream dataOut) throws IOException, TableNotFoundException, AccumuloException, AccumuloSecurityException {
     zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_METADATA_FILE));
     
     Map<String,String> uniqueFiles = new HashMap<String,String>();
     
-    Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    Scanner metaScanner = context.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     metaScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
     TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(metaScanner);
     TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(metaScanner);
@@ -252,15 +249,15 @@ class WriteExportFiles extends MasterRepo {
     return uniqueFiles;
   }
   
-  private static void exportConfig(Connector conn, String tableID, ZipOutputStream zipOut, DataOutputStream dataOut) throws AccumuloException,
+  private static void exportConfig(AccumuloServerContext context, String tableID, ZipOutputStream zipOut, DataOutputStream dataOut) throws AccumuloException,
       AccumuloSecurityException, TableNotFoundException, IOException {
+    Connector conn = context.getConnector();
     
     DefaultConfiguration defaultConfig = AccumuloConfiguration.getDefaultConfiguration();
     Map<String,String> siteConfig = conn.instanceOperations().getSiteConfiguration();
     Map<String,String> systemConfig = conn.instanceOperations().getSystemConfiguration();
     
-    ServerConfigurationFactory factory = new ServerConfigurationFactory(conn.getInstance());
-    TableConfiguration tableConfig = factory.getTableConfiguration(tableID);
+    TableConfiguration tableConfig = context.getServerConfigurationFactory().getTableConfiguration(tableID);
     
     OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8);
     
@@ -292,8 +289,7 @@ public class ExportTable extends MasterRepo {
     tableInfo.tableName = tableName;
     tableInfo.exportDir = exportDir;
     tableInfo.tableID = tableId;
-    Instance inst = HdfsZooInstance.getInstance();
-    tableInfo.namespaceID = Tables.getNamespaceId(inst, tableId);
+    tableInfo.namespaceID = Tables.getNamespaceId(HdfsZooInstance.getInstance(), tableId);
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
index 35067ce..26a6928 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
@@ -59,11 +59,9 @@ import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.tablets.UniqueNameAllocator;
 import org.apache.accumulo.server.util.MetadataTableUtil;
@@ -335,7 +333,7 @@ class PopulateMetadataTable extends MasterRepo {
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(tableInfo.tableId, false, SystemCredentials.get(), environment.getMasterLock());
+    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment, environment.getMasterLock());
   }
 }
 
@@ -483,7 +481,7 @@ class ImportPopulateZookeeper extends MasterRepo {
     Utils.tableNameLock.lock();
     try {
       // write tableName & tableId to zookeeper
-      Instance instance = HdfsZooInstance.getInstance();
+      Instance instance = env.getInstance();
 
       Utils.checkTableDoesNotExist(instance, tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
 
@@ -507,7 +505,7 @@ class ImportPopulateZookeeper extends MasterRepo {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    Instance instance = HdfsZooInstance.getInstance();
+    Instance instance = env.getInstance();
     TableManager.getInstance().removeTable(tableInfo.tableId);
     Utils.unreserveTable(tableInfo.tableId, tid, true);
     Tables.clearCache(instance);
@@ -532,10 +530,10 @@ class ImportSetupPermissions extends MasterRepo {
   @Override
   public Repo<Master> call(long tid, Master env) throws Exception {
     // give all table permissions to the creator
-    SecurityOperation security = AuditedSecurityOperation.getInstance();
+    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
     for (TablePermission permission : TablePermission.values()) {
       try {
-        security.grantTablePermission(SystemCredentials.get().toThrift(env.getInstance()), tableInfo.user, tableInfo.tableId, permission, tableInfo.namespaceId);
+        security.grantTablePermission(env.rpcCreds(), tableInfo.user, tableInfo.tableId, permission, tableInfo.namespaceId);
       } catch (ThriftSecurityException e) {
         Logger.getLogger(ImportSetupPermissions.class).error(e.getMessage(), e);
         throw e;
@@ -550,7 +548,7 @@ class ImportSetupPermissions extends MasterRepo {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    AuditedSecurityOperation.getInstance().deleteTable(SystemCredentials.get().toThrift(env.getInstance()), tableInfo.tableId, tableInfo.namespaceId);
+    AuditedSecurityOperation.getInstance(env).deleteTable(env.rpcCreds(), tableInfo.tableId, tableInfo.namespaceId);
   }
 }
 
@@ -585,7 +583,7 @@ public class ImportTable extends MasterRepo {
 
     Utils.idLock.lock();
     try {
-      Instance instance = HdfsZooInstance.getInstance();
+      Instance instance = env.getInstance();
       tableInfo.tableId = Utils.getNextTableId(tableInfo.tableName, instance);
       return new ImportSetupPermissions(tableInfo);
     } finally {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
index 5d511ac..90b764c 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
@@ -32,7 +32,6 @@ import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.log4j.Logger;
 
@@ -42,10 +41,10 @@ public class RenameTable extends MasterRepo {
   private String tableId;
   private String oldTableName;
   private String newTableName;
-  private String namespaceId;
 
   @Override
   public long isReady(long tid, Master environment) throws Exception {
+    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
     return Utils.reserveNamespace(namespaceId, tid, false, true, TableOperation.RENAME) + Utils.reserveTable(tableId, tid, true, true, TableOperation.RENAME);
   }
 
@@ -53,14 +52,12 @@ public class RenameTable extends MasterRepo {
     this.tableId = tableId;
     this.oldTableName = oldTableName;
     this.newTableName = newTableName;
-    Instance inst = HdfsZooInstance.getInstance();
-    this.namespaceId = Tables.getNamespaceId(inst, tableId);
   }
 
   @Override
   public Repo<Master> call(long tid, Master master) throws Exception {
-
     Instance instance = master.getInstance();
+    String namespaceId = Tables.getNamespaceId(instance, tableId);
     Pair<String,String> qualifiedOldTableName = Tables.qualify(oldTableName);
     Pair<String,String> qualifiedNewTableName = Tables.qualify(newTableName);
 
@@ -97,7 +94,7 @@ public class RenameTable extends MasterRepo {
     } finally {
       Utils.tableNameLock.unlock();
       Utils.unreserveTable(tableId, tid, true);
-      Utils.unreserveNamespace(this.namespaceId, tid, false);
+      Utils.unreserveNamespace(namespaceId, tid, false);
     }
 
     Logger.getLogger(RenameTable.class).debug("Renamed table " + tableId + " " + oldTableName + " " + newTableName);
@@ -107,6 +104,7 @@ public class RenameTable extends MasterRepo {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
+    String namespaceId = Tables.getNamespaceId(env.getInstance(), tableId);
     Utils.unreserveTable(tableId, tid, true);
     Utils.unreserveNamespace(namespaceId, tid, false);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
index 12849b6..ccb5d69 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
@@ -26,7 +25,6 @@ import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.state.MergeInfo;
 import org.apache.accumulo.server.master.state.MergeInfo.Operation;
 import org.apache.accumulo.server.master.state.MergeState;
@@ -49,12 +47,9 @@ class TableRangeOpWait extends MasterRepo {
 
   private static final long serialVersionUID = 1L;
   private String tableId;
-  private String namespaceId;
 
   public TableRangeOpWait(String tableId) {
     this.tableId = tableId;
-    Instance inst = HdfsZooInstance.getInstance();
-    this.namespaceId = Tables.getNamespaceId(inst, tableId);
   }
 
   @Override
@@ -68,6 +63,7 @@ class TableRangeOpWait extends MasterRepo {
 
   @Override
   public Repo<Master> call(long tid, Master master) throws Exception {
+    String namespaceId = Tables.getNamespaceId(master.getInstance(), tableId);
     Text tableIdText = new Text(tableId);
     MergeInfo mergeInfo = master.getMergeInfo(tableIdText);
     log.info("removing merge information " + mergeInfo);
@@ -83,14 +79,14 @@ public class TableRangeOp extends MasterRepo {
 
   private static final long serialVersionUID = 1L;
 
-  private String tableId;
+  private final String tableId;
   private byte[] startRow;
   private byte[] endRow;
   private Operation op;
-  private String namespaceId;
 
   @Override
   public long isReady(long tid, Master environment) throws Exception {
+    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
     return Utils.reserveNamespace(namespaceId, tid, false, true, TableOperation.MERGE)
         + Utils.reserveTable(tableId, tid, true, true, TableOperation.MERGE);
   }
@@ -101,8 +97,6 @@ public class TableRangeOp extends MasterRepo {
     this.startRow = TextUtil.getBytes(startRow);
     this.endRow = TextUtil.getBytes(endRow);
     this.op = op;
-    Instance inst = HdfsZooInstance.getInstance();
-    this.namespaceId = Tables.getNamespaceId(inst, tableId);
   }
 
   @Override
@@ -135,6 +129,7 @@ public class TableRangeOp extends MasterRepo {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
+    String namespaceId = Tables.getNamespaceId(env.getInstance(), tableId);
     // Not sure this is a good thing to do. The Master state engine should be the one to remove it.
     Text tableIdText = new Text(tableId);
     MergeInfo mergeInfo = env.getMergeInfo(tableIdText);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/test/java/org/apache/accumulo/master/TestMergeState.java
----------------------------------------------------------------------
diff --git a/server/master/src/test/java/org/apache/accumulo/master/TestMergeState.java b/server/master/src/test/java/org/apache/accumulo/master/TestMergeState.java
index 29dfefb..5c46ddc 100644
--- a/server/master/src/test/java/org/apache/accumulo/master/TestMergeState.java
+++ b/server/master/src/test/java/org/apache/accumulo/master/TestMergeState.java
@@ -28,7 +28,6 @@ import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.mock.MockInstance;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
@@ -37,8 +36,9 @@ import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.master.state.MergeStats;
+import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.master.state.Assignment;
 import org.apache.accumulo.server.master.state.CurrentState;
 import org.apache.accumulo.server.master.state.MergeInfo;
@@ -92,7 +92,8 @@ public class TestMergeState {
   @Test
   public void test() throws Exception {
     Instance instance = new MockInstance();
-    Connector connector = instance.getConnector("root", new PasswordToken(""));
+    AccumuloServerContext context = new AccumuloServerContext(new ServerConfigurationFactory(instance));
+    Connector connector = context.getConnector();
     BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
 
     // Create a fake METADATA table with these splits
@@ -116,10 +117,9 @@ public class TestMergeState {
 
     // Read out the TabletLocationStates
     MockCurrentState state = new MockCurrentState(new MergeInfo(new KeyExtent(tableId, new Text("p"), new Text("e")), MergeInfo.Operation.MERGE));
-    Credentials credentials = new Credentials("root", new PasswordToken(new byte[0]));
 
     // Verify the tablet state: hosted, and count
-    MetaDataStateStore metaDataStateStore = new MetaDataStateStore(instance, credentials, state);
+    MetaDataStateStore metaDataStateStore = new MetaDataStateStore(context, state);
     int count = 0;
     for (TabletLocationState tss : metaDataStateStore) {
       Assert.assertEquals(TabletState.HOSTED, tss.getState(state.onlineTabletServers()));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/test/java/org/apache/accumulo/master/replication/MasterReplicationCoordinatorTest.java
----------------------------------------------------------------------
diff --git a/server/master/src/test/java/org/apache/accumulo/master/replication/MasterReplicationCoordinatorTest.java b/server/master/src/test/java/org/apache/accumulo/master/replication/MasterReplicationCoordinatorTest.java
index 1ec3f24..a2ea329 100644
--- a/server/master/src/test/java/org/apache/accumulo/master/replication/MasterReplicationCoordinatorTest.java
+++ b/server/master/src/test/java/org/apache/accumulo/master/replication/MasterReplicationCoordinatorTest.java
@@ -74,8 +74,8 @@ public class MasterReplicationCoordinatorTest {
     ZooReader reader = EasyMock.createMock(ZooReader.class);
     Instance inst = EasyMock.createMock(Instance.class);
 
-    EasyMock.expect(master.getInstance()).andReturn(inst);
-    EasyMock.expect(inst.getInstanceID()).andReturn("1234");
+    EasyMock.expect(master.getInstance()).andReturn(inst).anyTimes();
+    EasyMock.expect(inst.getInstanceID()).andReturn("1234").anyTimes();
 
     EasyMock.replay(master, reader, inst);
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/master/src/test/java/org/apache/accumulo/master/replication/WorkMakerTest.java
----------------------------------------------------------------------
diff --git a/server/master/src/test/java/org/apache/accumulo/master/replication/WorkMakerTest.java b/server/master/src/test/java/org/apache/accumulo/master/replication/WorkMakerTest.java
index 0455c44..f53547a 100644
--- a/server/master/src/test/java/org/apache/accumulo/master/replication/WorkMakerTest.java
+++ b/server/master/src/test/java/org/apache/accumulo/master/replication/WorkMakerTest.java
@@ -25,7 +25,6 @@ import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.mock.MockInstance;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
@@ -37,6 +36,8 @@ import org.apache.accumulo.core.replication.ReplicationTarget;
 import org.apache.accumulo.core.replication.StatusUtil;
 import org.apache.accumulo.core.replication.proto.Replication.Status;
 import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.junit.Assert;
@@ -58,11 +59,13 @@ public class WorkMakerTest {
 
   @Rule
   public TestName name = new TestName();
+  private AccumuloServerContext context;
 
   @Before
   public void createMockAccumulo() throws Exception {
     instance = new MockInstance();
-    conn = instance.getConnector("root", new PasswordToken(""));
+    context = new AccumuloServerContext(new ServerConfigurationFactory(instance));
+    conn = context.getConnector();
     conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.WRITE);
     conn.tableOperations().deleteRows(ReplicationTable.NAME, null, null);
   }
@@ -87,7 +90,7 @@ public class WorkMakerTest {
     StatusSection.limit(s);
     Assert.assertEquals(1, Iterables.size(s));
 
-    WorkMaker workMaker = new WorkMaker(conn);
+    WorkMaker workMaker = new WorkMaker(context, conn);
 
     // Invoke the addWorkRecord method to create a Work record from the Status record earlier
     ReplicationTarget expected = new ReplicationTarget("remote_cluster_1", "4", tableId);
@@ -119,7 +122,7 @@ public class WorkMakerTest {
 
     Mutation m = new Mutation(new Path(file).toString());
     m.put(StatusSection.NAME, new Text(tableId), StatusUtil.fileCreatedValue(System.currentTimeMillis()));
-    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
+    BatchWriter bw = ReplicationTable.getBatchWriter(context.getConnector());
     bw.addMutation(m);
     bw.flush();
 
@@ -128,7 +131,7 @@ public class WorkMakerTest {
     StatusSection.limit(s);
     Assert.assertEquals(1, Iterables.size(s));
 
-    WorkMaker workMaker = new WorkMaker(conn);
+    WorkMaker workMaker = new WorkMaker(context, conn);
 
     Map<String,String> targetClusters = ImmutableMap.of("remote_cluster_1", "4", "remote_cluster_2", "6", "remote_cluster_3", "8");
     Set<ReplicationTarget> expectedTargets = new HashSet<>();
@@ -176,7 +179,7 @@ public class WorkMakerTest {
     StatusSection.limit(s);
     Assert.assertEquals(1, Iterables.size(s));
 
-    WorkMaker workMaker = new WorkMaker(conn);
+    WorkMaker workMaker = new WorkMaker(context, conn);
 
     conn.tableOperations().setProperty(ReplicationTable.NAME, Property.TABLE_REPLICATION_TARGET.getKey() + "remote_cluster_1", "4");
 
@@ -194,7 +197,7 @@ public class WorkMakerTest {
 
   @Test
   public void closedStatusRecordsStillMakeWork() throws Exception {
-    WorkMaker workMaker = new WorkMaker(conn);
+    WorkMaker workMaker = new WorkMaker(context, conn);
 
     Assert.assertFalse(workMaker.shouldCreateWork(StatusUtil.fileCreated(System.currentTimeMillis())));
     Assert.assertTrue(workMaker.shouldCreateWork(StatusUtil.ingestedUntil(1000)));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/monitor/src/main/java/org/apache/accumulo/monitor/EmbeddedWebServer.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/EmbeddedWebServer.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/EmbeddedWebServer.java
index 46fe54b..7142ef1 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/EmbeddedWebServer.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/EmbeddedWebServer.java
@@ -42,7 +42,7 @@ public class EmbeddedWebServer {
 
   public EmbeddedWebServer(String host, int port) {
     server = new Server();
-    final AccumuloConfiguration conf = Monitor.getSystemConfiguration();
+    final AccumuloConfiguration conf = Monitor.getContext().getConfiguration();
     if (EMPTY.equals(conf.get(Property.MONITOR_SSL_KEYSTORE)) || EMPTY.equals(conf.get(Property.MONITOR_SSL_KEYSTOREPASS))
         || EMPTY.equals(conf.get(Property.MONITOR_SSL_TRUSTSTORE)) || EMPTY.equals(conf.get(Property.MONITOR_SSL_TRUSTSTOREPASS))) {
       connector = new ServerConnector(server, new HttpConnectionFactory());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
index 8bc255d..c761081 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
@@ -35,7 +35,6 @@ import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.MasterClient;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.conf.SiteConfiguration;
 import org.apache.accumulo.core.gc.thrift.GCMonitorService;
@@ -78,6 +77,7 @@ import org.apache.accumulo.monitor.servlets.trace.ListType;
 import org.apache.accumulo.monitor.servlets.trace.ShowTrace;
 import org.apache.accumulo.monitor.servlets.trace.Summary;
 import org.apache.accumulo.server.Accumulo;
+import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.ServerOpts;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfigurationFactory;
@@ -86,7 +86,6 @@ import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.monitor.LogService;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.Halt;
 import org.apache.accumulo.server.util.TableInfoUtil;
 import org.apache.accumulo.server.zookeeper.ZooLock;
@@ -165,6 +164,7 @@ public class Monitor {
   private static Instance instance;
 
   private static ServerConfigurationFactory config;
+  private static AccumuloServerContext context;
 
   private static EmbeddedWebServer server;
 
@@ -254,9 +254,9 @@ public class Monitor {
       while (retry) {
         MasterClientService.Iface client = null;
         try {
-          client = MasterClient.getConnection(HdfsZooInstance.getInstance());
+          client = MasterClient.getConnection(context);
           if (client != null) {
-            mmi = client.getMasterStats(Tracer.traceInfo(), SystemCredentials.get().toThrift(HdfsZooInstance.getInstance()));
+            mmi = client.getMasterStats(Tracer.traceInfo(), context.rpcCreds());
             retry = false;
           } else {
             mmi = null;
@@ -352,7 +352,7 @@ public class Monitor {
         calcCacheHitRate(dataCacheHitRateOverTime, currentTime, dataCacheHitTracker, dataCacheRequestTracker);
       }
       try {
-        Monitor.problemSummary = ProblemReports.getInstance().summarize();
+        Monitor.problemSummary = ProblemReports.getInstance(getContext()).summarize();
         Monitor.problemException = null;
       } catch (Exception e) {
         log.info("Failed to obtain problem reports ", e);
@@ -387,9 +387,9 @@ public class Monitor {
       if (locks != null && locks.size() > 0) {
         Collections.sort(locks);
         address = new ServerServices(new String(zk.getData(path + "/" + locks.get(0), null), UTF_8)).getAddress(Service.GC_CLIENT);
-        GCMonitorService.Client client = ThriftUtil.getClient(new GCMonitorService.Client.Factory(), address, config.getConfiguration());
+        GCMonitorService.Client client = ThriftUtil.getClient(new GCMonitorService.Client.Factory(), address, new AccumuloServerContext(config));
         try {
-          result = client.getStatus(Tracer.traceInfo(), SystemCredentials.get().toThrift(instance));
+          result = client.getStatus(Tracer.traceInfo(), getContext().rpcCreds());
         } finally {
           ThriftUtil.returnClient(client);
         }
@@ -412,6 +412,7 @@ public class Monitor {
     VolumeManager fs = VolumeManagerImpl.get();
     instance = HdfsZooInstance.getInstance();
     config = new ServerConfigurationFactory(instance);
+    context = new AccumuloServerContext(config);
     Accumulo.init(fs, config, app);
     Monitor monitor = new Monitor();
     DistributedTrace.enable(hostname, app, config.getConfiguration());
@@ -477,7 +478,7 @@ public class Monitor {
     }
 
     if (null != hostname) {
-      LogService.startLogListener(Monitor.getSystemConfiguration(), instance.getInstanceID(), hostname);
+      LogService.startLogListener(Monitor.getContext().getConfiguration(), instance.getInstanceID(), hostname);
     } else {
       log.warn("Not starting log4j listener as we could not determine address to use");
     }
@@ -535,11 +536,11 @@ public class Monitor {
   protected static void fetchScans() throws Exception {
     if (instance == null)
       return;
-    Connector c = instance.getConnector(SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
+    Connector c = context.getConnector();
     for (String server : c.instanceOperations().getTabletServers()) {
-      Client tserver = ThriftUtil.getTServerClient(server, Monitor.getSystemConfiguration());
+      Client tserver = ThriftUtil.getTServerClient(server, context);
       try {
-        List<ActiveScan> scans = tserver.getActiveScans(null, SystemCredentials.get().toThrift(instance));
+        List<ActiveScan> scans = tserver.getActiveScans(null, context.rpcCreds());
         synchronized (allScans) {
           allScans.put(server, new ScanStats(scans));
         }
@@ -612,7 +613,7 @@ public class Monitor {
 
       monitorLock.tryToCancelAsyncLockOrUnlock();
 
-      UtilWaitThread.sleep(getSystemConfiguration().getTimeInMillis(Property.MONITOR_LOCK_CHECK_INTERVAL));
+      UtilWaitThread.sleep(getContext().getConfiguration().getTimeInMillis(Property.MONITOR_LOCK_CHECK_INTERVAL));
     }
 
     log.info("Got Monitor lock.");
@@ -813,15 +814,11 @@ public class Monitor {
     }
   }
 
-  public static AccumuloConfiguration getSystemConfiguration() {
-    return config.getConfiguration();
-  }
-
-  public static Instance getInstance() {
-    return instance;
-  }
-
   public static boolean isUsingSsl() {
     return server.isUsingSsl();
   }
+
+  public static AccumuloServerContext getContext() {
+    return context;
+  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/BasicServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/BasicServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/BasicServlet.java
index 2feb804..02a0615 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/BasicServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/BasicServlet.java
@@ -37,7 +37,6 @@ import javax.servlet.http.HttpServletResponse;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.monitor.Monitor;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.monitor.DedupedLogEvent;
 import org.apache.accumulo.server.monitor.LogService;
 import org.apache.accumulo.server.util.time.SimpleTimer;
@@ -60,9 +59,9 @@ abstract public class BasicServlet extends HttpServlet {
     StringBuilder sb = new StringBuilder();
     try {
       Monitor.fetchData();
-      bannerText = sanitize(Monitor.getSystemConfiguration().get(Property.MONITOR_BANNER_TEXT));
-      bannerColor = Monitor.getSystemConfiguration().get(Property.MONITOR_BANNER_COLOR).replace("'", "&#39;");
-      bannerBackground = Monitor.getSystemConfiguration().get(Property.MONITOR_BANNER_BACKGROUND).replace("'", "&#39;");
+      bannerText = sanitize(Monitor.getContext().getConfiguration().get(Property.MONITOR_BANNER_TEXT));
+      bannerColor = Monitor.getContext().getConfiguration().get(Property.MONITOR_BANNER_COLOR).replace("'", "&#39;");
+      bannerBackground = Monitor.getContext().getConfiguration().get(Property.MONITOR_BANNER_BACKGROUND).replace("'", "&#39;");
       pageStart(req, resp, sb);
       pageBody(req, resp, sb);
       pageEnd(req, resp, sb);
@@ -112,12 +111,12 @@ abstract public class BasicServlet extends HttpServlet {
     synchronized (BasicServlet.class) {
       // Learn our instance name asynchronously so we don't hang up if zookeeper is down
       if (cachedInstanceName == null) {
-        SimpleTimer.getInstance(Monitor.getSystemConfiguration()).schedule(new TimerTask() {
+        SimpleTimer.getInstance(Monitor.getContext().getConfiguration()).schedule(new TimerTask() {
           @Override
           public void run() {
             synchronized (BasicServlet.class) {
               if (cachedInstanceName == null) {
-                cachedInstanceName = HdfsZooInstance.getInstance().getInstanceName();
+                cachedInstanceName = Monitor.getContext().getInstance().getInstanceName();
               }
             }
           }
@@ -175,7 +174,7 @@ abstract public class BasicServlet extends HttpServlet {
     sb.append("<h1>").append(getTitle(req)).append("</h1></div>\n");
     sb.append("<div id='subheader'>Instance&nbsp;Name:&nbsp;").append(cachedInstanceName).append("&nbsp;&nbsp;&nbsp;Version:&nbsp;").append(Constants.VERSION)
         .append("\n");
-    sb.append("<br><span class='smalltext'>Instance&nbsp;ID:&nbsp;").append(HdfsZooInstance.getInstance().getInstanceID()).append("</span>\n");
+    sb.append("<br><span class='smalltext'>Instance&nbsp;ID:&nbsp;").append(Monitor.getContext().getInstance().getInstanceID()).append("</span>\n");
     sb.append("<br><span class='smalltext'>").append(new Date().toString().replace(" ", "&nbsp;")).append("</span>");
     sb.append("</div>\n"); // end <div id='subheader'>
     sb.append("</div>\n"); // end <div id='header'>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/LogServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/LogServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/LogServlet.java
index f877664..7c2172a 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/LogServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/LogServlet.java
@@ -43,7 +43,7 @@ public class LogServlet extends BasicServlet {
   
   @Override
   protected void pageBody(HttpServletRequest req, HttpServletResponse resp, StringBuilder sb) {
-    AccumuloConfiguration conf = Monitor.getSystemConfiguration();
+    AccumuloConfiguration conf = Monitor.getContext().getConfiguration();
     boolean clear = true;
     final String dateFormatStr = conf.get(Property.MONITOR_LOG_DATE_FORMAT);
     SimpleDateFormat fmt;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/MasterServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/MasterServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/MasterServlet.java
index 1b613e5..09ab922 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/MasterServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/MasterServlet.java
@@ -42,7 +42,6 @@ import org.apache.accumulo.monitor.util.celltypes.DurationType;
 import org.apache.accumulo.monitor.util.celltypes.NumberType;
 import org.apache.accumulo.monitor.util.celltypes.ProgressChartType;
 import org.apache.accumulo.monitor.util.celltypes.StringType;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.monitor.DedupedLogEvent;
 import org.apache.accumulo.server.monitor.LogService;
 import org.apache.log4j.Level;
@@ -55,13 +54,13 @@ public class MasterServlet extends BasicServlet {
   
   @Override
   protected String getTitle(HttpServletRequest req) {
-    List<String> masters = Monitor.getInstance().getMasterLocations();
+    List<String> masters = Monitor.getContext().getInstance().getMasterLocations();
     return "Master Server" + (masters.size() == 0 ? "" : ":" + AddressUtil.parseAddress(masters.get(0), false).getHostText());
   }
   
   @Override
   protected void pageBody(HttpServletRequest req, HttpServletResponse response, StringBuilder sb) throws IOException {
-    Map<String,String> tidToNameMap = Tables.getIdToNameMap(HdfsZooInstance.getInstance());
+    Map<String,String> tidToNameMap = Tables.getIdToNameMap(Monitor.getContext().getInstance());
     
     doLogEventBanner(sb);
     TablesServlet.doProblemsBanner(sb);
@@ -107,7 +106,7 @@ public class MasterServlet extends BasicServlet {
           long diff = System.currentTimeMillis() - start;
           gcStatus = label + " " + DateFormat.getInstance().format(new Date(start));
           gcStatus = gcStatus.replace(" ", "&nbsp;");
-          long normalDelay = Monitor.getSystemConfiguration().getTimeInMillis(Property.GC_CYCLE_DELAY);
+          long normalDelay = Monitor.getContext().getConfiguration().getTimeInMillis(Property.GC_CYCLE_DELAY);
           if (diff > normalDelay * 2)
             gcStatus = "<span class='warning'>" + gcStatus + "</span>";
         }
@@ -129,7 +128,7 @@ public class MasterServlet extends BasicServlet {
       for (DeadServer down : Monitor.getMmi().deadTabletServers) {
         slaves.add(down.server);
       }
-      List<String> masters = Monitor.getInstance().getMasterLocations();
+      List<String> masters = Monitor.getContext().getInstance().getMasterLocations();
       
       Table masterStatus = new Table("masterStatus", "Master&nbsp;Status");
       masterStatus.addSortableColumn("Master", new StringType<String>(), "The hostname of the master server");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/OperationServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/OperationServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/OperationServlet.java
index c6c75cd..41149e4 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/OperationServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/OperationServlet.java
@@ -22,9 +22,8 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.monitor.Monitor;
 import org.apache.accumulo.server.master.state.DeadServerList;
 import org.apache.accumulo.server.monitor.LogService;
 import org.apache.accumulo.server.problems.ProblemReports;
@@ -102,7 +101,7 @@ public class OperationServlet extends BasicServlet {
     public void execute(HttpServletRequest req, HttpServletResponse resp, Logger log) {
       String table = req.getParameter("table");
       try {
-        ProblemReports.getInstance().deleteProblemReports(table);
+        ProblemReports.getInstance(Monitor.getContext()).deleteProblemReports(table);
       } catch (Exception e) {
         log.error("Failed to delete problem reports for table " + table, e);
       }
@@ -116,7 +115,7 @@ public class OperationServlet extends BasicServlet {
       String resource = req.getParameter("resource");
       String ptype = req.getParameter("ptype");
       try {
-        ProblemReports.getInstance().deleteProblemReport(table, ProblemType.valueOf(ptype), resource);
+        ProblemReports.getInstance(Monitor.getContext()).deleteProblemReport(table, ProblemType.valueOf(ptype), resource);
       } catch (Exception e) {
         log.error("Failed to delete problem reports for table " + table, e);
       }
@@ -163,9 +162,8 @@ public class OperationServlet extends BasicServlet {
     @Override
     public void execute(HttpServletRequest req, HttpServletResponse resp, Logger log) {
       String server = req.getParameter("server");
-      Instance inst = HdfsZooInstance.getInstance();
       // a dead server should have a uniq address: a logger or tserver
-      DeadServerList obit = new DeadServerList(ZooUtil.getRoot(inst) + Constants.ZDEADTSERVERS);
+      DeadServerList obit = new DeadServerList(ZooUtil.getRoot(Monitor.getContext().getInstance()) + Constants.ZDEADTSERVERS);
       obit.delete(server);
     }
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ProblemServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ProblemServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ProblemServlet.java
index 60cece6..4f7da48 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ProblemServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ProblemServlet.java
@@ -33,7 +33,6 @@ import org.apache.accumulo.monitor.util.celltypes.CellType;
 import org.apache.accumulo.monitor.util.celltypes.DateTimeType;
 import org.apache.accumulo.monitor.util.celltypes.NumberType;
 import org.apache.accumulo.monitor.util.celltypes.StringType;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.problems.ProblemReport;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
@@ -49,7 +48,7 @@ public class ProblemServlet extends BasicServlet {
   
   @Override
   protected void pageBody(final HttpServletRequest req, HttpServletResponse resp, StringBuilder sb) {
-    Map<String,String> tidToNameMap = Tables.getIdToNameMap(HdfsZooInstance.getInstance());
+    Map<String,String> tidToNameMap = Tables.getIdToNameMap(Monitor.getContext().getInstance());
     doProblemSummary(req, sb, tidToNameMap);
     doProblemDetails(req, sb, req.getParameter("table"), tidToNameMap);
   }
@@ -95,7 +94,8 @@ public class ProblemServlet extends BasicServlet {
       return;
     
     ArrayList<ProblemReport> problemReports = new ArrayList<ProblemReport>();
-    Iterator<ProblemReport> iter = tableId == null ? ProblemReports.getInstance().iterator() : ProblemReports.getInstance().iterator(tableId);
+    Iterator<ProblemReport> iter = tableId == null ? ProblemReports.getInstance(Monitor.getContext()).iterator() : ProblemReports.getInstance(
+        Monitor.getContext()).iterator(tableId);
     while (iter.hasNext())
       problemReports.add(iter.next());
     final SimpleDateFormat sdf = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss zzz");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ReplicationServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ReplicationServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ReplicationServlet.java
index 94765f8..97061b5 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ReplicationServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ReplicationServlet.java
@@ -24,22 +24,17 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.admin.TableOperations;
 import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
 import org.apache.accumulo.core.replication.ReplicationConstants;
 import org.apache.accumulo.core.replication.ReplicationTable;
 import org.apache.accumulo.core.replication.ReplicationTarget;
-import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.monitor.Monitor;
 import org.apache.accumulo.monitor.util.Table;
 import org.apache.accumulo.monitor.util.celltypes.NumberType;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.replication.DistributedWorkQueueWorkAssignerHelper;
 import org.apache.accumulo.server.replication.ReplicationUtil;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.zookeeper.DistributedWorkQueue;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -56,7 +51,7 @@ public class ReplicationServlet extends BasicServlet {
   private ReplicationUtil replicationUtil;
 
   public ReplicationServlet() {
-    replicationUtil = new ReplicationUtil();
+    replicationUtil = new ReplicationUtil(Monitor.getContext());
   }
 
   @Override
@@ -66,14 +61,11 @@ public class ReplicationServlet extends BasicServlet {
 
   @Override
   protected void pageBody(HttpServletRequest req, HttpServletResponse response, StringBuilder sb) throws Exception {
-    final Instance inst = HdfsZooInstance.getInstance();
-    final Credentials creds = SystemCredentials.get();
-    final Connector conn = inst.getConnector(creds.getPrincipal(), creds.getToken());
-    final Map<String,String> systemProps = conn.instanceOperations().getSystemConfiguration();
+    final Connector conn = Monitor.getContext().getConnector();
     final MasterMonitorInfo mmi = Monitor.getMmi();
 
     // The total number of "slots" we have to replicate data
-    int totalWorkQueueSize = replicationUtil.getMaxReplicationThreads(systemProps, mmi);
+    int totalWorkQueueSize = replicationUtil.getMaxReplicationThreads(mmi);
 
     TableOperations tops = conn.tableOperations();
     if (!ReplicationTable.isOnline(conn)) {
@@ -88,13 +80,13 @@ public class ReplicationServlet extends BasicServlet {
     replicationStats.addSortableColumn("ReplicaSystem Type");
     replicationStats.addSortableColumn("Files needing replication", new NumberType<Long>(), null);
 
-    Map<String,String> peers = replicationUtil.getPeers(systemProps);
+    Map<String,String> peers = replicationUtil.getPeers();
 
     // The total set of configured targets
-    Set<ReplicationTarget> allConfiguredTargets = replicationUtil.getReplicationTargets(tops);
+    Set<ReplicationTarget> allConfiguredTargets = replicationUtil.getReplicationTargets();
 
     // Number of files per target we have to replicate
-    Map<ReplicationTarget,Long> targetCounts = replicationUtil.getPendingReplications(conn);
+    Map<ReplicationTarget,Long> targetCounts = replicationUtil.getPendingReplications();
 
     Map<String,String> tableNameToId = tops.tableIdMap();
     Map<String,String> tableIdToName = replicationUtil.invert(tableNameToId);
@@ -143,10 +135,10 @@ public class ReplicationServlet extends BasicServlet {
     replicationInProgress.addUnsortableColumn("Status");
 
     // Read the files from the workqueue in zk
-    String zkRoot = ZooUtil.getRoot(inst);
+    String zkRoot = ZooUtil.getRoot(Monitor.getContext().getInstance());
     final String workQueuePath = zkRoot + ReplicationConstants.ZOO_WORK_QUEUE;
 
-    DistributedWorkQueue workQueue = new DistributedWorkQueue(workQueuePath, new ServerConfigurationFactory(inst).getConfiguration());
+    DistributedWorkQueue workQueue = new DistributedWorkQueue(workQueuePath, Monitor.getContext().getConfiguration());
 
     try {
       for (String queueKey : workQueue.getWorkQueued()) {


Mime
View raw message