accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [02/10] accumulo git commit: ACCUMULO-3199 Internal refactor to add ClientContext
Date Tue, 25 Nov 2014 22:36:46 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
index 4409dff..ecb4ff7 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
@@ -26,6 +26,7 @@ import java.util.Map.Entry;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.master.thrift.DeadServer;
 import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
@@ -50,7 +51,6 @@ import org.apache.accumulo.monitor.util.celltypes.ProgressChartType;
 import org.apache.accumulo.monitor.util.celltypes.TServerLinkType;
 import org.apache.accumulo.monitor.util.celltypes.TableLinkType;
 import org.apache.accumulo.server.master.state.TabletServerState;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.ActionStatsUpdator;
 import org.apache.accumulo.server.util.TableInfoUtil;
 
@@ -124,12 +124,13 @@ public class TServersServlet extends BasicServlet {
     TabletStats historical = new TabletStats(null, new ActionStats(), new ActionStats(), new ActionStats(), 0, 0, 0, 0);
     List<TabletStats> tsStats = new ArrayList<TabletStats>();
     try {
-      TabletClientService.Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, Monitor.getSystemConfiguration());
+      ClientContext context = Monitor.getContext();
+      TabletClientService.Client client = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, context);
       try {
         for (String tableId : Monitor.getMmi().tableMap.keySet()) {
-          tsStats.addAll(client.getTabletStats(Tracer.traceInfo(), SystemCredentials.get().toThrift(Monitor.getInstance()), tableId));
+          tsStats.addAll(client.getTabletStats(Tracer.traceInfo(), context.rpcCreds(), tableId));
         }
-        historical = client.getHistoricalStats(Tracer.traceInfo(), SystemCredentials.get().toThrift(Monitor.getInstance()));
+        historical = client.getHistoricalStats(Tracer.traceInfo(), context.rpcCreds());
       } finally {
         ThriftUtil.returnClient(client);
       }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java
index 428880e..e5914f9 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java
@@ -43,10 +43,8 @@ import org.apache.accumulo.monitor.util.celltypes.DurationType;
 import org.apache.accumulo.monitor.util.celltypes.NumberType;
 import org.apache.accumulo.monitor.util.celltypes.TableLinkType;
 import org.apache.accumulo.monitor.util.celltypes.TableStateType;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.state.MetaDataTableScanner;
 import org.apache.accumulo.server.master.state.TabletLocationState;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.util.TableInfoUtil;
 import org.apache.hadoop.io.Text;
@@ -62,7 +60,7 @@ public class TablesServlet extends BasicServlet {
   
   @Override
   protected void pageBody(HttpServletRequest req, HttpServletResponse response, StringBuilder sb) throws Exception {
-    Map<String,String> tidToNameMap = Tables.getIdToNameMap(HdfsZooInstance.getInstance());
+    Map<String,String> tidToNameMap = Tables.getIdToNameMap(Monitor.getContext().getInstance());
     String tableId = req.getParameter("t");
     
     doProblemsBanner(sb);
@@ -117,7 +115,7 @@ public class TablesServlet extends BasicServlet {
     Map<String,Double> compactingByTable = TableInfoUtil.summarizeTableStats(Monitor.getMmi());
     TableManager tableManager = TableManager.getInstance();
     
-    for (Entry<String,String> tableName_tableId : Tables.getNameToIdMap(HdfsZooInstance.getInstance()).entrySet()) {
+    for (Entry<String,String> tableName_tableId : Tables.getNameToIdMap(Monitor.getContext().getInstance()).entrySet()) {
       String tableName = tableName_tableId.getKey();
       String tableId = tableName_tableId.getValue();
       TableInfo tableInfo = tableStats.get(tableName);
@@ -146,14 +144,14 @@ public class TablesServlet extends BasicServlet {
   
   private void doTableDetails(HttpServletRequest req, StringBuilder sb, Map<String,String> tidToNameMap, String tableId) {
     String displayName = Tables.getPrintableTableNameFromId(tidToNameMap, tableId);
-    Instance instance = HdfsZooInstance.getInstance();
+    Instance instance = Monitor.getContext().getInstance();
     TreeSet<String> locs = new TreeSet<String>();
     if (RootTable.ID.equals(tableId)) {
       locs.add(instance.getRootTabletLocation());
     } else {
       String systemTableName = MetadataTable.ID.equals(tableId) ? RootTable.NAME : MetadataTable.NAME;
-      MetaDataTableScanner scanner = new MetaDataTableScanner(instance, SystemCredentials.get(), new Range(KeyExtent.getMetadataEntry(new Text(tableId),
-          new Text()), KeyExtent.getMetadataEntry(new Text(tableId), null)), systemTableName);
+      MetaDataTableScanner scanner = new MetaDataTableScanner(Monitor.getContext(), new Range(KeyExtent.getMetadataEntry(new Text(tableId), new Text()),
+          KeyExtent.getMetadataEntry(new Text(tableId), null)), systemTableName);
       
       while (scanner.hasNext()) {
         TabletLocationState state = scanner.next();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/VisServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/VisServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/VisServlet.java
index fb56573..af1688e 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/VisServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/VisServlet.java
@@ -28,7 +28,7 @@ import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.monitor.Monitor;
 
 public class VisServlet extends BasicServlet {
-  private static final int concurrentScans = Monitor.getSystemConfiguration().getCount(Property.TSERV_READ_AHEAD_MAXCONCURRENT);
+  private static final int concurrentScans = Monitor.getContext().getConfiguration().getCount(Property.TSERV_READ_AHEAD_MAXCONCURRENT);
   
   private static final long serialVersionUID = 1L;
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/XMLServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/XMLServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/XMLServlet.java
index 405d6df..3b115f9 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/XMLServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/XMLServlet.java
@@ -33,7 +33,6 @@ import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.monitor.Monitor;
 import org.apache.accumulo.monitor.util.celltypes.TServerLinkType;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.state.TabletServerState;
 import org.apache.accumulo.server.util.TableInfoUtil;
 
@@ -127,7 +126,7 @@ public class XMLServlet extends BasicServlet {
     sb.append("\n</deadLoggers>\n");
     
     sb.append("\n<tables>\n");
-    Instance instance = HdfsZooInstance.getInstance();
+    Instance instance = Monitor.getContext().getInstance();
     for (Entry<String,TableInfo> entry : tableStats.entrySet()) {
       TableInfo tableInfo = entry.getValue();
       

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/Basic.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/Basic.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/Basic.java
index 53cb172..2d98fed 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/Basic.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/Basic.java
@@ -69,7 +69,7 @@ abstract class Basic extends BasicServlet {
   }
 
   protected Scanner getScanner(StringBuilder sb) throws AccumuloException, AccumuloSecurityException {
-    AccumuloConfiguration conf = Monitor.getSystemConfiguration();
+    AccumuloConfiguration conf = Monitor.getContext().getConfiguration();
     String principal = conf.get(Property.TRACE_USER);
     AuthenticationToken at;
     Map<String,String> loginMap = conf.getAllPropertiesWithPrefix(Property.TRACE_TOKEN_PROPERTY_PREFIX);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/tserver/src/main/java/org/apache/accumulo/tserver/FileManager.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/FileManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/FileManager.java
index fb14ca5..98ccd07 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/FileManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/FileManager.java
@@ -43,7 +43,7 @@ import org.apache.accumulo.core.iterators.system.SourceSwitchingIterator;
 import org.apache.accumulo.core.iterators.system.SourceSwitchingIterator.DataSource;
 import org.apache.accumulo.core.iterators.system.TimeSettingIterator;
 import org.apache.accumulo.core.metadata.schema.DataFileValue;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.problems.ProblemReport;
@@ -112,9 +112,9 @@ public class FileManager {
   private BlockCache indexCache = null;
   
   private long maxIdleTime;
-  
-  private final ServerConfigurationFactory conf;
-  
+
+  private final AccumuloServerContext context;
+
   private class IdleFileCloser implements Runnable {
     
     @Override
@@ -161,11 +161,11 @@ public class FileManager {
    * @param indexCache
    *          : underlying file can and should be able to handle a null cache
    */
-  public FileManager(ServerConfigurationFactory conf, VolumeManager fs, int maxOpen, BlockCache dataCache, BlockCache indexCache) {
-    
+  public FileManager(AccumuloServerContext context, VolumeManager fs, int maxOpen, BlockCache dataCache, BlockCache indexCache) {
+
     if (maxOpen <= 0)
       throw new IllegalArgumentException("maxOpen <= 0");
-    this.conf = conf;
+    this.context = context;
     this.dataCache = dataCache;
     this.indexCache = indexCache;
     
@@ -175,10 +175,10 @@ public class FileManager {
     
     this.openFiles = new HashMap<String,List<OpenReader>>();
     this.reservedReaders = new HashMap<FileSKVIterator,String>();
-    
-    this.maxIdleTime = conf.getConfiguration().getTimeInMillis(Property.TSERV_MAX_IDLE);
-    SimpleTimer.getInstance(conf.getConfiguration()).schedule(new IdleFileCloser(), maxIdleTime, maxIdleTime / 2);
-    
+
+    this.maxIdleTime = context.getConfiguration().getTimeInMillis(Property.TSERV_MAX_IDLE);
+    SimpleTimer.getInstance(context.getConfiguration()).schedule(new IdleFileCloser(), maxIdleTime, maxIdleTime / 2);
+
   }
   
   private static int countReaders(Map<String,List<OpenReader>> files) {
@@ -312,15 +312,15 @@ public class FileManager {
           throw new IllegalArgumentException("Expected uri, got : " + file);
         Path path = new Path(file);
         FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
-        //log.debug("Opening "+file + " path " + path);
-        FileSKVIterator reader = FileOperations.getInstance().openReader(path.toString(), false, ns, ns.getConf(), conf.getTableConfiguration(tablet),
-            dataCache, indexCache);
+        // log.debug("Opening "+file + " path " + path);
+        FileSKVIterator reader = FileOperations.getInstance().openReader(path.toString(), false, ns, ns.getConf(),
+            context.getServerConfigurationFactory().getTableConfiguration(tablet), dataCache, indexCache);
         reservedFiles.add(reader);
         readersReserved.put(reader, file);
       } catch (Exception e) {
-        
-        ProblemReports.getInstance().report(new ProblemReport(tablet.toString(), ProblemType.FILE_READ, file, e));
-        
+
+        ProblemReports.getInstance(context).report(new ProblemReport(tablet.toString(), ProblemType.FILE_READ, file, e));
+
         if (continueOnFailure) {
           // release the permit for the file that failed to open
           if (!tablet.isMeta()) {
@@ -471,9 +471,9 @@ public class FileManager {
       tabletReservedReaders = new ArrayList<FileSKVIterator>();
       dataSources = new ArrayList<FileDataSource>();
       this.tablet = tablet;
-      
-      continueOnFailure = conf.getTableConfiguration(tablet).getBoolean(Property.TABLE_FAILURES_IGNORE);
-      
+
+      continueOnFailure = context.getServerConfigurationFactory().getTableConfiguration(tablet).getBoolean(Property.TABLE_FAILURES_IGNORE);
+
       if (tablet.isMeta()) {
         continueOnFailure = false;
       }
@@ -514,9 +514,9 @@ public class FileManager {
           FileDataSource fds = new FileDataSource(filename, reader);
           dataSources.add(fds);
           SourceSwitchingIterator ssi = new SourceSwitchingIterator(fds);
-          iter = new ProblemReportingIterator(tablet.getTableId().toString(), filename, continueOnFailure, ssi);
+          iter = new ProblemReportingIterator(context, tablet.getTableId().toString(), filename, continueOnFailure, ssi);
         } else {
-          iter = new ProblemReportingIterator(tablet.getTableId().toString(), filename, continueOnFailure, reader);
+          iter = new ProblemReportingIterator(context, tablet.getTableId().toString(), filename, continueOnFailure, reader);
         }
         DataFileValue value = files.get(new FileRef(filename));
         if (value.isTimeSet()) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
index 93161ee..5b49529 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
@@ -144,6 +144,7 @@ import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
 import org.apache.accumulo.fate.zookeeper.ZooLock.LockWatcher;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.server.Accumulo;
+import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.GarbageCollectionLogger;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.ServerOpts;
@@ -171,7 +172,6 @@ import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.replication.ZooKeeperInitialization;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.FileSystemMonitor;
 import org.apache.accumulo.server.util.Halt;
 import org.apache.accumulo.server.util.MasterMetadataUtil;
@@ -241,7 +241,7 @@ import org.apache.zookeeper.KeeperException.NoNodeException;
 
 import com.google.common.net.HostAndPort;
 
-public class TabletServer implements Runnable {
+public class TabletServer extends AccumuloServerContext implements Runnable {
   private static final Logger log = Logger.getLogger(TabletServer.class);
   private static final long MAX_TIME_TO_WAIT_FOR_SCAN_RESULT_MILLIS = 1000;
   private static final long RECENTLY_SPLIT_MILLIES = 60 * 1000;
@@ -255,11 +255,11 @@ public class TabletServer implements Runnable {
   private final TabletServerLogger logger;
 
   private final TabletServerMinCMetrics mincMetrics = new TabletServerMinCMetrics();
+
   public TabletServerMinCMetrics getMinCMetrics() {
     return mincMetrics;
   }
 
-  private final ServerConfigurationFactory serverConfig;
   private final LogSorter logSorter;
   private ReplicationWorker replWorker = null;
   private final TabletStatsKeeper statsKeeper;
@@ -269,9 +269,6 @@ public class TabletServer implements Runnable {
   private final AtomicLong syncCounter = new AtomicLong(0);
 
   private final VolumeManager fs;
-  public Instance getInstance() {
-    return serverConfig.getInstance();
-  }
 
   private final SortedMap<KeyExtent,Tablet> onlineTablets = Collections.synchronizedSortedMap(new TreeMap<KeyExtent,Tablet>());
   private final SortedSet<KeyExtent> unopenedTablets = Collections.synchronizedSortedSet(new TreeSet<KeyExtent>());
@@ -305,10 +302,11 @@ public class TabletServer implements Runnable {
   public static final AtomicLong seekCount = new AtomicLong(0);
 
   private final AtomicLong totalMinorCompactions = new AtomicLong(0);
+  private final ServerConfigurationFactory confFactory;
 
-  public TabletServer(ServerConfigurationFactory conf, VolumeManager fs) {
-    super();
-    this.serverConfig = conf;
+  public TabletServer(ServerConfigurationFactory confFactory, VolumeManager fs) {
+    super(confFactory);
+    this.confFactory = confFactory;
     this.fs = fs;
     AccumuloConfiguration aconf = getConfiguration();
     Instance instance = getInstance();
@@ -331,19 +329,14 @@ public class TabletServer implements Runnable {
       }
     }, 5000, 5000);
 
-    security = AuditedSecurityOperation.getInstance();
-
     long walogMaxSize = getConfiguration().getMemoryInBytes(Property.TSERV_WALOG_MAX_SIZE);
     long minBlockSize = CachedConfiguration.getInstance().getLong("dfs.namenode.fs-limits.min-block-size", 0);
     if (minBlockSize != 0 && minBlockSize > walogMaxSize)
       throw new RuntimeException("Unable to start TabletServer. Logger is set to use blocksize " + walogMaxSize + " but hdfs minimum block size is "
           + minBlockSize + ". Either increase the " + Property.TSERV_WALOG_MAX_SIZE + " or decrease dfs.namenode.fs-limits.min-block-size in hdfs-site.xml.");
     logger = new TabletServerLogger(this, walogMaxSize, syncCounter, flushCounter);
-    this.resourceManager = new TabletServerResourceManager(getInstance(), fs);
-  }
-
-  public AccumuloConfiguration getConfiguration() {
-    return serverConfig.getConfiguration();
+    this.resourceManager = new TabletServerResourceManager(this, fs);
+    this.security = AuditedSecurityOperation.getInstance(this);
   }
 
   private final SessionManager sessionManager;
@@ -361,7 +354,7 @@ public class TabletServer implements Runnable {
   private class ThriftClientHandler extends ClientServiceHandler implements TabletClientService.Iface {
 
     ThriftClientHandler() {
-      super(getInstance(), watcher, fs);
+      super(TabletServer.this, watcher, fs);
       log.debug(ThriftClientHandler.class.getName() + " created");
       // Register the metrics MBean
       try {
@@ -951,8 +944,8 @@ public class TabletServer implements Runnable {
     }
 
     @Override
-    public void update(TInfo tinfo, TCredentials credentials, TKeyExtent tkeyExtent, TMutation tmutation, TDurability tdurability) throws NotServingTabletException,
-        ConstraintViolationException, ThriftSecurityException {
+    public void update(TInfo tinfo, TCredentials credentials, TKeyExtent tkeyExtent, TMutation tmutation, TDurability tdurability)
+        throws NotServingTabletException, ConstraintViolationException, ThriftSecurityException {
 
       final String tableId = new String(tkeyExtent.getTable(), UTF_8);
       if (!security.canWrite(credentials, tableId, Tables.getNamespaceId(getInstance(), tableId)))
@@ -1213,8 +1206,8 @@ public class TabletServer implements Runnable {
     }
 
     @Override
-    public TConditionalSession startConditionalUpdate(TInfo tinfo, TCredentials credentials, List<ByteBuffer> authorizations, String tableId, TDurability tdurabilty)
-        throws ThriftSecurityException, TException {
+    public TConditionalSession startConditionalUpdate(TInfo tinfo, TCredentials credentials, List<ByteBuffer> authorizations, String tableId,
+        TDurability tdurabilty) throws ThriftSecurityException, TException {
 
       Authorizations userauths = null;
       if (!security.canConditionallyUpdate(credentials, tableId, Tables.getNamespaceId(getInstance(), tableId), authorizations))
@@ -1365,7 +1358,7 @@ public class TabletServer implements Runnable {
         }
       } catch (ThriftSecurityException e) {
         log.warn("Got " + request + " message from unauthenticatable user: " + e.getUser());
-        if (SystemCredentials.get().getToken().getClass().getName().equals(credentials.getTokenClassName())) {
+        if (getCredentials().getToken().getClass().getName().equals(credentials.getTokenClassName())) {
           log.fatal("Got message from a service with a mismatched configuration. Please ensure a compatible configuration.", e);
           fatal = true;
         }
@@ -1707,7 +1700,8 @@ public class TabletServer implements Runnable {
           } else {
             log.info("Deleting walog " + filename);
             Path sourcePath = new Path(filename);
-            if (!(!TabletServer.this.getConfiguration().getBoolean(Property.GC_TRASH_IGNORE) && fs.moveToTrash(sourcePath)) && !fs.deleteRecursively(sourcePath))
+            if (!(!TabletServer.this.getConfiguration().getBoolean(Property.GC_TRASH_IGNORE) && fs.moveToTrash(sourcePath))
+                && !fs.deleteRecursively(sourcePath))
               log.warn("Failed to delete walog " + source);
             for (String recovery : ServerConstants.getRecoveryDirs()) {
               Path recoveryPath = new Path(recovery, source.getName());
@@ -2007,7 +2001,7 @@ public class TabletServer implements Runnable {
           log.error("Unexpected error ", e);
         }
         log.debug("Unassigning " + tls);
-        TabletStateStore.unassign(tls);
+        TabletStateStore.unassign(TabletServer.this, tls);
       } catch (DistributedStoreException ex) {
         log.warn("Unable to update storage", ex);
       } catch (KeeperException e) {
@@ -2078,7 +2072,8 @@ public class TabletServer implements Runnable {
       Text locationToOpen = null;
       SortedMap<Key,Value> tabletsKeyValues = new TreeMap<Key,Value>();
       try {
-        Pair<Text,KeyExtent> pair = verifyTabletInformation(extent, TabletServer.this.getTabletSession(), tabletsKeyValues, getClientAddressString(), getLock());
+        Pair<Text,KeyExtent> pair = verifyTabletInformation(TabletServer.this, extent, TabletServer.this.getTabletSession(), tabletsKeyValues,
+            getClientAddressString(), getLock());
         if (pair != null) {
           locationToOpen = pair.getFirst();
           if (pair.getSecond() != null) {
@@ -2141,7 +2136,7 @@ public class TabletServer implements Runnable {
         }
 
         Assignment assignment = new Assignment(extent, getTabletSession());
-        TabletStateStore.setLocation(assignment);
+        TabletStateStore.setLocation(TabletServer.this, assignment);
 
         synchronized (openingTablets) {
           synchronized (onlineTablets) {
@@ -2158,7 +2153,7 @@ public class TabletServer implements Runnable {
         if (e.getMessage() != null)
           log.warn(e.getMessage());
         String table = extent.getTableId().toString();
-        ProblemReports.getInstance().report(new ProblemReport(table, TABLET_LOAD, extent.getUUID().toString(), getClientAddressString(), e));
+        ProblemReports.getInstance(TabletServer.this).report(new ProblemReport(table, TABLET_LOAD, extent.getUUID().toString(), getClientAddressString(), e));
       }
 
       if (!successful) {
@@ -2215,13 +2210,13 @@ public class TabletServer implements Runnable {
     entry.server = logs.get(0).getLogger();
     entry.filename = logs.get(0).getFileName();
     entry.logSet = logSet;
-    MetadataTableUtil.addLogEntry(SystemCredentials.get(), entry, getLock());
+    MetadataTableUtil.addLogEntry(this, entry, getLock());
   }
 
   private HostAndPort startServer(AccumuloConfiguration conf, String address, Property portHint, TProcessor processor, String threadName)
       throws UnknownHostException {
     Property maxMessageSizeProperty = (conf.get(Property.TSERV_MAX_MESSAGE_SIZE) != null ? Property.TSERV_MAX_MESSAGE_SIZE : Property.GENERAL_MAX_MESSAGE_SIZE);
-    ServerAddress sp = TServerUtils.startServer(conf, address, portHint, processor, this.getClass().getSimpleName(), threadName, Property.TSERV_PORTSEARCH,
+    ServerAddress sp = TServerUtils.startServer(this, address, portHint, processor, this.getClass().getSimpleName(), threadName, Property.TSERV_PORTSEARCH,
         Property.TSERV_MINTHREADS, Property.TSERV_THREADCHECK, maxMessageSizeProperty);
     this.server = sp.server;
     return sp.address;
@@ -2246,8 +2241,7 @@ public class TabletServer implements Runnable {
       if (address == null) {
         return null;
       }
-      MasterClientService.Client client = ThriftUtil.getClient(new MasterClientService.Client.Factory(), address, Property.GENERAL_RPC_TIMEOUT,
-          getConfiguration());
+      MasterClientService.Client client = ThriftUtil.getClient(new MasterClientService.Client.Factory(), address, this);
       // log.info("Listener API to master has been opened");
       return client;
     } catch (Exception e) {
@@ -2264,17 +2258,18 @@ public class TabletServer implements Runnable {
     // start listening for client connection last
     Iface tch = RpcWrapper.service(new ThriftClientHandler());
     Processor<Iface> processor = new Processor<Iface>(tch);
-    HostAndPort address = startServer(getConfiguration(), clientAddress.getHostText(), Property.TSERV_CLIENTPORT, processor, "Thrift Client Server");
+    HostAndPort address = startServer(getServerConfigurationFactory().getConfiguration(), clientAddress.getHostText(), Property.TSERV_CLIENTPORT, processor,
+        "Thrift Client Server");
     log.info("address = " + address);
     return address;
   }
 
   private HostAndPort startReplicationService() throws UnknownHostException {
-    ReplicationServicer.Iface repl = RpcWrapper.service(new ReplicationServicerHandler(HdfsZooInstance.getInstance()));
+    ReplicationServicer.Iface repl = RpcWrapper.service(new ReplicationServicerHandler(this));
     ReplicationServicer.Processor<ReplicationServicer.Iface> processor = new ReplicationServicer.Processor<ReplicationServicer.Iface>(repl);
-    AccumuloConfiguration conf = getConfiguration();
+    AccumuloConfiguration conf = getServerConfigurationFactory().getConfiguration();
     Property maxMessageSizeProperty = (conf.get(Property.TSERV_MAX_MESSAGE_SIZE) != null ? Property.TSERV_MAX_MESSAGE_SIZE : Property.GENERAL_MAX_MESSAGE_SIZE);
-    ServerAddress sp = TServerUtils.startServer(conf, clientAddress.getHostText(), Property.REPLICATION_RECEIPT_SERVICE_PORT, processor,
+    ServerAddress sp = TServerUtils.startServer(this, clientAddress.getHostText(), Property.REPLICATION_RECEIPT_SERVICE_PORT, processor,
         "ReplicationServicerHandler", "Replication Servicer", null, Property.REPLICATION_MIN_THREADS, Property.REPLICATION_THREADCHECK, maxMessageSizeProperty);
     this.replServer = sp.server;
     log.info("Started replication service on " + sp.address);
@@ -2450,7 +2445,7 @@ public class TabletServer implements Runnable {
           while (!serverStopRequested && mm != null && client != null && client.getOutputProtocol() != null
               && client.getOutputProtocol().getTransport() != null && client.getOutputProtocol().getTransport().isOpen()) {
             try {
-              mm.send(SystemCredentials.get().toThrift(getInstance()), getClientAddressString(), iface);
+              mm.send(rpcCreds(), getClientAddressString(), iface);
               mm = null;
             } catch (TException ex) {
               log.warn("Error sending message: queuing message again");
@@ -2544,8 +2539,8 @@ public class TabletServer implements Runnable {
     }
   }
 
-  public static Pair<Text,KeyExtent> verifyTabletInformation(KeyExtent extent, TServerInstance instance, SortedMap<Key,Value> tabletsKeyValues,
-      String clientAddress, ZooLock lock) throws AccumuloSecurityException, DistributedStoreException, AccumuloException {
+  public static Pair<Text,KeyExtent> verifyTabletInformation(AccumuloServerContext context, KeyExtent extent, TServerInstance instance,
+      SortedMap<Key,Value> tabletsKeyValues, String clientAddress, ZooLock lock) throws AccumuloSecurityException, DistributedStoreException, AccumuloException {
 
     log.debug("verifying extent " + extent);
     if (extent.isRootTablet()) {
@@ -2559,7 +2554,7 @@ public class TabletServer implements Runnable {
         TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN, TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN,
         TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN, TabletsSection.ServerColumnFamily.TIME_COLUMN});
 
-    ScannerImpl scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SystemCredentials.get(), tableToVerify, Authorizations.EMPTY);
+    ScannerImpl scanner = new ScannerImpl(context, tableToVerify, Authorizations.EMPTY);
     scanner.setRange(extent.toMetadataRange());
 
     TreeMap<Key,Value> tkv = new TreeMap<Key,Value>();
@@ -2593,7 +2588,7 @@ public class TabletServer implements Runnable {
 
       KeyExtent fke;
       try {
-        fke = MasterMetadataUtil.fixSplit(metadataEntry, tabletEntries.get(metadataEntry), instance, SystemCredentials.get(), lock);
+        fke = MasterMetadataUtil.fixSplit(context, metadataEntry, tabletEntries.get(metadataEntry), instance, lock);
       } catch (IOException e) {
         log.error("Error fixing split " + metadataEntry);
         throw new AccumuloException(e.toString());
@@ -2605,7 +2600,7 @@ public class TabletServer implements Runnable {
 
       // reread and reverify metadata entries now that metadata entries were fixed
       tabletsKeyValues.clear();
-      return verifyTabletInformation(fke, instance, tabletsKeyValues, clientAddress, lock);
+      return verifyTabletInformation(context, fke, instance, tabletsKeyValues, clientAddress, lock);
     }
 
     return new Pair<Text,KeyExtent>(new Text(dir.get()), null);
@@ -2879,8 +2874,7 @@ public class TabletServer implements Runnable {
       opts.parseArgs(app, args);
       String hostname = opts.getAddress();
       Accumulo.setupLogging(app);
-      final Instance instance = HdfsZooInstance.getInstance();
-      ServerConfigurationFactory conf = new ServerConfigurationFactory(instance);
+      ServerConfigurationFactory conf = new ServerConfigurationFactory(HdfsZooInstance.getInstance());
       VolumeManager fs = VolumeManagerImpl.get();
       Accumulo.init(fs, conf, app);
       TabletServer server = new TabletServer(conf, fs);
@@ -2904,8 +2898,8 @@ public class TabletServer implements Runnable {
     logger.minorCompactionStarted(tablet, lastUpdateSequence, newMapfileLocation);
   }
 
-  public void recover(VolumeManager fs, KeyExtent extent, TableConfiguration tconf, List<LogEntry> logEntries, Set<String> tabletFiles, MutationReceiver mutationReceiver)
-      throws IOException {
+  public void recover(VolumeManager fs, KeyExtent extent, TableConfiguration tconf, List<LogEntry> logEntries, Set<String> tabletFiles,
+      MutationReceiver mutationReceiver) throws IOException {
     List<Path> recoveryLogs = new ArrayList<Path>();
     List<LogEntry> sorted = new ArrayList<LogEntry>(logEntries);
     Collections.sort(sorted, new Comparator<LogEntry>() {
@@ -2941,7 +2935,7 @@ public class TabletServer implements Runnable {
   }
 
   public TableConfiguration getTableConfiguration(KeyExtent extent) {
-    return serverConfig.getTableConfiguration(extent.getTableId().toString());
+    return confFactory.getTableConfiguration(extent.getTableId().toString());
   }
 
   public DfsLogger.ServerResources getServerConfig() {
@@ -2964,7 +2958,6 @@ public class TabletServer implements Runnable {
     };
   }
 
-
   public Collection<Tablet> getOnlineTablets() {
     return Collections.unmodifiableCollection(onlineTablets.values());
   }
@@ -2988,4 +2981,8 @@ public class TabletServer implements Runnable {
   public double getHoldTimeMillis() {
     return resourceManager.holdTime();
   }
+
+  public SecurityOperation getSecurityOperation() {
+    return security;
+  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
index ba86522..0bcff3d 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
@@ -35,7 +35,6 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.KeyExtent;
@@ -99,6 +98,7 @@ public class TabletServerResourceManager {
 
   private final LruBlockCache _dCache;
   private final LruBlockCache _iCache;
+  private final TabletServer tserver;
   private final ServerConfigurationFactory conf;
 
   private ExecutorService addEs(String name, ExecutorService tp) {
@@ -112,11 +112,11 @@ public class TabletServerResourceManager {
 
   private ExecutorService addEs(final Property maxThreads, String name, final ThreadPoolExecutor tp) {
     ExecutorService result = addEs(name, tp);
-    SimpleTimer.getInstance(conf.getConfiguration()).schedule(new Runnable() {
+    SimpleTimer.getInstance(tserver.getConfiguration()).schedule(new Runnable() {
       @Override
       public void run() {
         try {
-          int max = conf.getConfiguration().getCount(maxThreads);
+          int max = tserver.getConfiguration().getCount(maxThreads);
           if (tp.getMaximumPoolSize() != max) {
             log.info("Changing " + maxThreads.getKey() + " to " + max);
             tp.setCorePoolSize(max);
@@ -149,8 +149,9 @@ public class TabletServerResourceManager {
     return addEs(name, new ThreadPoolExecutor(min, max, timeout, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamingThreadFactory(name)));
   }
 
-  public TabletServerResourceManager(Instance instance, VolumeManager fs) {
-    this.conf = new ServerConfigurationFactory(instance);
+  public TabletServerResourceManager(TabletServer tserver, VolumeManager fs) {
+    this.tserver = tserver;
+    this.conf = tserver.getServerConfigurationFactory();
     this.fs = fs;
     final AccumuloConfiguration acuConf = conf.getConfiguration();
 
@@ -168,8 +169,8 @@ public class TabletServerResourceManager {
     Runtime runtime = Runtime.getRuntime();
     if (!usingNativeMap && maxMemory + dCacheSize + iCacheSize + totalQueueSize > runtime.maxMemory()) {
       throw new IllegalArgumentException(String.format(
-          "Maximum tablet server map memory %,d block cache sizes %,d and mutation queue size %,d is too large for this JVM configuration %,d", maxMemory, dCacheSize + iCacheSize,
-          totalQueueSize, runtime.maxMemory()));
+          "Maximum tablet server map memory %,d block cache sizes %,d and mutation queue size %,d is too large for this JVM configuration %,d", maxMemory,
+          dCacheSize + iCacheSize, totalQueueSize, runtime.maxMemory()));
     }
     runtime.gc();
 
@@ -207,14 +208,14 @@ public class TabletServerResourceManager {
 
     int maxOpenFiles = acuConf.getCount(Property.TSERV_SCAN_MAX_OPENFILES);
 
-    fileManager = new FileManager(conf, fs, maxOpenFiles, _dCache, _iCache);
+    fileManager = new FileManager(tserver, fs, maxOpenFiles, _dCache, _iCache);
 
     memoryManager = Property.createInstanceFromPropertyName(acuConf, Property.TSERV_MEM_MGMT, MemoryManager.class, new LargestFirstMemoryManager());
-    memoryManager.init(conf);
+    memoryManager.init(tserver.getServerConfigurationFactory());
     memMgmt = new MemoryManagementFramework();
     memMgmt.startThreads();
 
-    SimpleTimer timer = SimpleTimer.getInstance(conf.getConfiguration());
+    SimpleTimer timer = SimpleTimer.getInstance(tserver.getConfiguration());
 
     // We can use the same map for both metadata and normal assignments since the keyspace (extent)
     // is guaranteed to be unique. Schedule the task once, the task will reschedule itself.
@@ -545,6 +546,7 @@ public class TabletServerResourceManager {
     KeyExtent getExtent() {
       return extent;
     }
+
     AccumuloConfiguration getTableConfiguration() {
       return tableConf;
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java
index 91ec141..ceb76da 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java
@@ -32,10 +32,6 @@ import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.accumulo.server.util.Halt;
-
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
 import org.apache.accumulo.core.client.Durability;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
@@ -46,7 +42,7 @@ import org.apache.accumulo.core.replication.proto.Replication.Status;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.security.SystemCredentials;
+import org.apache.accumulo.server.util.Halt;
 import org.apache.accumulo.server.util.ReplicationTableUtil;
 import org.apache.accumulo.tserver.Mutations;
 import org.apache.accumulo.tserver.TabletMutations;
@@ -56,6 +52,9 @@ import org.apache.accumulo.tserver.tablet.CommitSession;
 import org.apache.hadoop.fs.Path;
 import org.apache.log4j.Logger;
 
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+
 /**
  * Central logging facility for the TServerInfo.
  *
@@ -288,7 +287,7 @@ public class TabletServerLogger {
                 Status status = StatusUtil.fileCreated(System.currentTimeMillis());
                 log.debug("Writing " + ProtobufUtil.toString(status) + " to metadata table for " + logs);
                 // Got some new WALs, note this in the metadata table
-                ReplicationTableUtil.updateFiles(SystemCredentials.get(), commitSession.getExtent(), logs, status);
+                ReplicationTableUtil.updateFiles(tserver, commitSession.getExtent(), logs, status);
               }
             }
           }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
index 71643e8..9fc7fa4 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
@@ -32,6 +32,7 @@ import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.ClientExecReturn;
 import org.apache.accumulo.core.client.impl.ReplicationClient;
 import org.apache.accumulo.core.client.replication.ReplicaSystem;
@@ -154,13 +155,11 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
   public Status replicate(final Path p, final Status status, final ReplicationTarget target, final ReplicaSystemHelper helper) {
     final Instance localInstance = HdfsZooInstance.getInstance();
     final AccumuloConfiguration localConf = new ServerConfigurationFactory(localInstance).getConfiguration();
-    Credentials credentialsForPeer = getCredentialsForPeer(localConf, target);
-    final TCredentials tCredsForPeer = credentialsForPeer.toThrift(localInstance);
+    final ClientContext peerContext = getContextForPeer(localConf, target);
 
     try {
       Trace.on("AccumuloReplicaSystem");
 
-      Instance peerInstance = getPeerInstance(target);
       // Remote identifier is an integer (table id) in this case.
       final String remoteTableId = target.getRemoteIdentifier();
 
@@ -172,11 +171,11 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
         Span span = Trace.start("Fetch peer tserver");
         try {
           // Ask the master on the remote what TServer we should talk with to replicate the data
-          peerTserver = ReplicationClient.executeCoordinatorWithReturn(peerInstance, new ClientExecReturn<String,ReplicationCoordinator.Client>() {
+          peerTserver = ReplicationClient.executeCoordinatorWithReturn(peerContext, new ClientExecReturn<String,ReplicationCoordinator.Client>() {
 
             @Override
             public String execute(ReplicationCoordinator.Client client) throws Exception {
-              return client.getServicerAddress(remoteTableId, tCredsForPeer);
+              return client.getServicerAddress(remoteTableId, peerContext.rpcCreds());
             }
 
           });
@@ -201,20 +200,20 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
           if (p.getName().endsWith(RFILE_SUFFIX)) {
             span = Trace.start("RFile replication");
             try {
-              finalStatus = replicateRFiles(peerInstance, peerTserver, target, p, status, sizeLimit, remoteTableId, tCredsForPeer, helper);
+              finalStatus = replicateRFiles(peerContext, peerTserver, target, p, status, sizeLimit, remoteTableId, peerContext.rpcCreds(), helper);
             } finally {
               span.stop();
             }
           } else {
             span = Trace.start("WAL replication");
             try {
-              finalStatus = replicateLogs(peerInstance, peerTserver, target, p, status, sizeLimit, remoteTableId, tCredsForPeer, helper);
+              finalStatus = replicateLogs(peerContext, peerTserver, target, p, status, sizeLimit, remoteTableId, peerContext.rpcCreds(), helper);
             } finally {
               span.stop();
             }
           }
 
-          log.debug("New status for {} after replicating to {} is {}", p, peerInstance, ProtobufUtil.toString(finalStatus));
+          log.debug("New status for {} after replicating to {} is {}", p, peerContext.getInstance(), ProtobufUtil.toString(finalStatus));
 
           return finalStatus;
         } catch (TTransportException | AccumuloException | AccumuloSecurityException e) {
@@ -232,9 +231,9 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
     }
   }
 
-  protected Status replicateRFiles(final Instance peerInstance, final String peerTserver, final ReplicationTarget target, final Path p, final Status status,
-      final long sizeLimit, final String remoteTableId, final TCredentials tcreds, final ReplicaSystemHelper helper) throws TTransportException,
-      AccumuloException, AccumuloSecurityException {
+  protected Status replicateRFiles(ClientContext peerContext, final String peerTserver, final ReplicationTarget target,
+      final Path p, final Status status, final long sizeLimit, final String remoteTableId, final TCredentials tcreds, final ReplicaSystemHelper helper)
+      throws TTransportException, AccumuloException, AccumuloSecurityException {
     DataInputStream input;
     try {
       input = getRFileInputStream(p);
@@ -246,7 +245,7 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
     Status lastStatus = status, currentStatus = status;
     while (true) {
       // Read and send a batch of mutations
-      ReplicationStats replResult = ReplicationClient.executeServicerWithReturn(peerInstance, peerTserver, new RFileClientExecReturn(target, input, p,
+      ReplicationStats replResult = ReplicationClient.executeServicerWithReturn(peerContext, peerTserver, new RFileClientExecReturn(target, input, p,
           currentStatus, sizeLimit, remoteTableId, tcreds));
 
       // Catch the overflow
@@ -278,9 +277,9 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
     }
   }
 
-  protected Status replicateLogs(final Instance peerInstance, final String peerTserver, final ReplicationTarget target, final Path p, final Status status,
-      final long sizeLimit, final String remoteTableId, final TCredentials tcreds, final ReplicaSystemHelper helper) throws TTransportException,
-      AccumuloException, AccumuloSecurityException {
+  protected Status replicateLogs(ClientContext peerContext, final String peerTserver, final ReplicationTarget target,
+      final Path p, final Status status, final long sizeLimit, final String remoteTableId, final TCredentials tcreds, final ReplicaSystemHelper helper)
+      throws TTransportException, AccumuloException, AccumuloSecurityException {
 
     final Set<Integer> tids;
     final DataInputStream input;
@@ -334,17 +333,17 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
       span = Trace.start("Replicate WAL batch");
       span.data("Batch size (bytes)", Long.toString(sizeLimit));
       span.data("File", p.toString());
-      span.data("Peer instance name", peerInstance.getInstanceName());
+      span.data("Peer instance name", peerContext.getInstance().getInstanceName());
       span.data("Peer tserver", peerTserver);
       span.data("Remote table ID", remoteTableId);
 
       ReplicationStats replResult;
       try {
         // Read and send a batch of mutations
-        replResult = ReplicationClient.executeServicerWithReturn(peerInstance, peerTserver, new WalClientExecReturn(target, input, p, currentStatus, sizeLimit,
+        replResult = ReplicationClient.executeServicerWithReturn(peerContext, peerTserver, new WalClientExecReturn(target, input, p, currentStatus, sizeLimit,
             remoteTableId, tcreds, tids));
       } catch (Exception e) {
-        log.error("Caught exception replicating data to {} at {}", peerInstance.getInstanceName(), peerTserver, e);
+        log.error("Caught exception replicating data to {} at {}", peerContext.getInstance().getInstanceName(), peerTserver, e);
         throw e;
       } finally {
         span.stop();
@@ -479,14 +478,14 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
     }
   }
 
-  protected Credentials getCredentialsForPeer(AccumuloConfiguration conf, ReplicationTarget target) {
-    Preconditions.checkNotNull(conf);
+  private ClientContext getContextForPeer(AccumuloConfiguration localConf, ReplicationTarget target) {
+    Preconditions.checkNotNull(localConf);
     Preconditions.checkNotNull(target);
 
     String peerName = target.getPeerName();
     String userKey = Property.REPLICATION_PEER_USER.getKey() + peerName, passwordKey = Property.REPLICATION_PEER_PASSWORD.getKey() + peerName;
-    Map<String,String> peerUsers = conf.getAllPropertiesWithPrefix(Property.REPLICATION_PEER_USER);
-    Map<String,String> peerPasswords = conf.getAllPropertiesWithPrefix(Property.REPLICATION_PEER_PASSWORD);
+    Map<String,String> peerUsers = localConf.getAllPropertiesWithPrefix(Property.REPLICATION_PEER_USER);
+    Map<String,String> peerPasswords = localConf.getAllPropertiesWithPrefix(Property.REPLICATION_PEER_PASSWORD);
 
     String user = peerUsers.get(userKey);
     String password = peerPasswords.get(passwordKey);
@@ -494,10 +493,10 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
       throw new IllegalArgumentException(userKey + " and " + passwordKey + " not configured, cannot replicate");
     }
 
-    return new Credentials(user, new PasswordToken(password));
+    return new ClientContext(getPeerInstance(target), new Credentials(user, new PasswordToken(password)), localConf);
   }
 
-  protected Instance getPeerInstance(ReplicationTarget target) {
+  private Instance getPeerInstance(ReplicationTarget target) {
     return new ZooKeeperInstance(instanceName, zookeepers);
   }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/BatchWriterReplicationReplayer.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/BatchWriterReplicationReplayer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/BatchWriterReplicationReplayer.java
index 358857d..8a80ea3 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/BatchWriterReplicationReplayer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/BatchWriterReplicationReplayer.java
@@ -24,12 +24,13 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.Mutation;
@@ -53,10 +54,11 @@ public class BatchWriterReplicationReplayer implements AccumuloReplicationReplay
   private static final Logger log = LoggerFactory.getLogger(BatchWriterReplicationReplayer.class);
 
   @Override
-  public long replicateLog(Connector conn, AccumuloConfiguration conf, String tableName, WalEdits data) throws RemoteReplicationException {
+  public long replicateLog(ClientContext context, String tableName, WalEdits data) throws RemoteReplicationException, AccumuloException,
+      AccumuloSecurityException {
     final LogFileKey key = new LogFileKey();
     final LogFileValue value = new LogFileValue();
-    final long memoryInBytes = conf.getMemoryInBytes(Property.TSERV_REPLICATION_BW_REPLAYER_MEMORY);
+    final long memoryInBytes = context.getConfiguration().getMemoryInBytes(Property.TSERV_REPLICATION_BW_REPLAYER_MEMORY);
 
     BatchWriter bw = null;
     long mutationsApplied = 0l;
@@ -78,7 +80,7 @@ public class BatchWriterReplicationReplayer implements AccumuloReplicationReplay
           BatchWriterConfig bwConfig = new BatchWriterConfig();
           bwConfig.setMaxMemory(memoryInBytes);
           try {
-            bw = conn.createBatchWriter(tableName, bwConfig);
+            bw = context.getConnector().createBatchWriter(tableName, bwConfig);
           } catch (TableNotFoundException e) {
             throw new RemoteReplicationException(RemoteReplicationErrorCode.TABLE_DOES_NOT_EXIST, "Table " + tableName + " does not exist");
           }
@@ -159,7 +161,7 @@ public class BatchWriterReplicationReplayer implements AccumuloReplicationReplay
   }
 
   @Override
-  public long replicateKeyValues(Connector conn, String tableName, KeyValues kvs) {
+  public long replicateKeyValues(ClientContext context, String tableName, KeyValues kvs) {
     // TODO Implement me
     throw new UnsupportedOperationException();
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java
index 30361b1..e2af4df 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java
@@ -20,8 +20,6 @@ import java.util.Map;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
@@ -32,10 +30,8 @@ import org.apache.accumulo.core.replication.thrift.RemoteReplicationErrorCode;
 import org.apache.accumulo.core.replication.thrift.RemoteReplicationException;
 import org.apache.accumulo.core.replication.thrift.ReplicationServicer.Iface;
 import org.apache.accumulo.core.replication.thrift.WalEdits;
-import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.tserver.TabletServer;
 import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -46,35 +42,27 @@ import org.slf4j.LoggerFactory;
 public class ReplicationServicerHandler implements Iface {
   private static final Logger log = LoggerFactory.getLogger(ReplicationServicerHandler.class);
 
-  private Instance inst;
+  private TabletServer tabletServer;
 
-  public ReplicationServicerHandler(Instance inst) {
-    this.inst = inst;
+  public ReplicationServicerHandler(TabletServer tabletServer) {
+    this.tabletServer = tabletServer;
   }
 
   @Override
   public long replicateLog(String tableId, WalEdits data, TCredentials tcreds) throws RemoteReplicationException, TException {
     log.debug("Got replication request to tableID {} with {} edits", tableId, data.getEditsSize());
+    tabletServer.getSecurityOperation().authenticateUser(tabletServer.rpcCreds(), tcreds);
 
-    Credentials creds = Credentials.fromThrift(tcreds);
-    Connector conn;
     String tableName;
 
     try {
-      conn = inst.getConnector(creds.getPrincipal(), creds.getToken());
-    } catch (AccumuloException | AccumuloSecurityException e) {
-      log.error("Could not get connection", e);
-      throw new RemoteReplicationException(RemoteReplicationErrorCode.CANNOT_AUTHENTICATE, "Cannot get connector as " + creds.getPrincipal());
-    }
-
-    try {
-      tableName = Tables.getTableName(inst, tableId);
+      tableName = Tables.getTableName(tabletServer.getInstance(), tableId);
     } catch (TableNotFoundException e) {
       log.error("Could not find table with id {}", tableId);
       throw new RemoteReplicationException(RemoteReplicationErrorCode.TABLE_DOES_NOT_EXIST, "Table with id " + tableId + " does not exist");
     }
 
-    AccumuloConfiguration conf = new ServerConfigurationFactory(inst).getConfiguration();
+    AccumuloConfiguration conf = tabletServer.getConfiguration();
 
     Map<String,String> replicationHandlers = conf.getAllPropertiesWithPrefix(Property.TSERV_REPLICATION_REPLAYERS);
     String propertyForHandlerTable = Property.TSERV_REPLICATION_REPLAYERS.getKey() + tableId;
@@ -110,7 +98,14 @@ public class ReplicationServicerHandler implements Iface {
           + clz.getName());
     }
 
-    long entriesReplicated = replayer.replicateLog(conn, new ServerConfigurationFactory(HdfsZooInstance.getInstance()).getConfiguration(), tableName, data);
+    long entriesReplicated;
+    try {
+      entriesReplicated = replayer.replicateLog(tabletServer, tableName, data);
+    } catch (AccumuloException | AccumuloSecurityException e) {
+      log.error("Could not get connection", e);
+      throw new RemoteReplicationException(RemoteReplicationErrorCode.CANNOT_AUTHENTICATE, "Cannot get connector as "
+          + tabletServer.getCredentials().getPrincipal());
+    }
 
     log.debug("Replicated {} mutations to {}", entriesReplicated, tableName);
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java
index 20da0d6..1d20e2b 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java
@@ -69,7 +69,7 @@ public class ReplicationWorker implements Runnable {
         workQueue = new DistributedWorkQueue(ZooUtil.getRoot(inst) + ReplicationConstants.ZOO_WORK_QUEUE, conf);
       }
 
-      workQueue.startProcessing(new ReplicationProcessor(inst, conf, fs, SystemCredentials.get()), executor);
+      workQueue.startProcessing(new ReplicationProcessor(inst, conf, fs, SystemCredentials.get(inst)), executor);
     } catch (KeeperException | InterruptedException e) {
       throw new RuntimeException(e);
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
index 4f19ff9..869cc33 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
@@ -52,6 +52,7 @@ import org.apache.accumulo.core.trace.Span;
 import org.apache.accumulo.core.trace.Trace;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
 import org.apache.accumulo.core.util.LocalityGroupUtil.LocalityGroupConfigurationError;
+import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.problems.ProblemReport;
@@ -103,8 +104,11 @@ public class Compactor implements Callable<CompactionStats> {
   // a unique id to identify a compactor
   private final long compactorID = nextCompactorID.getAndIncrement();
   protected volatile Thread thread;
+  private final AccumuloServerContext context;
 
-  public long getCompactorID() { return compactorID; }
+  public long getCompactorID() {
+    return compactorID;
+  }
 
   private synchronized void setLocalityGroup(String name) {
     this.currentLocalityGroup = name;
@@ -133,8 +137,9 @@ public class Compactor implements Callable<CompactionStats> {
     return compactions;
   }
 
-  public Compactor(Tablet tablet, Map<FileRef,DataFileValue> files, InMemoryMap imm, FileRef outputFile, boolean propogateDeletes,
-      CompactionEnv env, List<IteratorSetting> iterators, int reason, AccumuloConfiguration tableConfiguation) {
+  public Compactor(AccumuloServerContext context, Tablet tablet, Map<FileRef,DataFileValue> files, InMemoryMap imm, FileRef outputFile,
+      boolean propogateDeletes, CompactionEnv env, List<IteratorSetting> iterators, int reason, AccumuloConfiguration tableConfiguation) {
+    this.context = context;
     this.extent = tablet.getExtent();
     this.fs = tablet.getTabletServer().getFileSystem();
     this.acuTableConf = tableConfiguation;
@@ -161,7 +166,9 @@ public class Compactor implements Callable<CompactionStats> {
     return outputFile.toString();
   }
 
-  MajorCompactionReason getMajorCompactionReason() { return MajorCompactionReason.values()[reason]; }
+  MajorCompactionReason getMajorCompactionReason() {
+    return MajorCompactionReason.values()[reason];
+  }
 
   @Override
   public CompactionStats call() throws IOException, CompactionCanceledException {
@@ -272,7 +279,7 @@ public class Compactor implements Callable<CompactionStats> {
 
         readers.add(reader);
 
-        SortedKeyValueIterator<Key,Value> iter = new ProblemReportingIterator(extent.getTableId().toString(), mapFile.path().toString(), false, reader);
+        SortedKeyValueIterator<Key,Value> iter = new ProblemReportingIterator(context, extent.getTableId().toString(), mapFile.path().toString(), false, reader);
 
         if (filesToCompact.get(mapFile).isTimeSet()) {
           iter = new TimeSettingIterator(iter, filesToCompact.get(mapFile).getTime());
@@ -282,7 +289,7 @@ public class Compactor implements Callable<CompactionStats> {
 
       } catch (Throwable e) {
 
-        ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_READ, mapFile.path().toString(), e));
+        ProblemReports.getInstance(context).report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_READ, mapFile.path().toString(), e));
 
         log.warn("Some problem opening map file " + mapFile + " " + e.getMessage(), e);
         // failed to open some map file... close the ones that were opened

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
index 3eb7229..8ba8128 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
@@ -35,7 +35,6 @@ import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.apache.accumulo.core.replication.ReplicationConfigurationUtil;
 import org.apache.accumulo.core.replication.StatusUtil;
-import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.trace.Span;
 import org.apache.accumulo.core.trace.Trace;
 import org.apache.accumulo.core.util.MapCounter;
@@ -43,11 +42,9 @@ import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.MasterMetadataUtil;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.util.ReplicationTableUtil;
@@ -142,7 +139,7 @@ class DatafileManager {
 
     if (filesToDelete.size() > 0) {
       log.debug("Removing scan refs from metadata " + tablet.getExtent() + " " + filesToDelete);
-      MetadataTableUtil.removeScanFiles(tablet.getExtent(), filesToDelete, SystemCredentials.get(), tablet.getTabletServer().getLock());
+      MetadataTableUtil.removeScanFiles(tablet.getExtent(), filesToDelete, tablet.getTabletServer(), tablet.getTabletServer().getLock());
     }
   }
 
@@ -163,7 +160,7 @@ class DatafileManager {
 
     if (filesToDelete.size() > 0) {
       log.debug("Removing scan refs from metadata " + tablet.getExtent() + " " + filesToDelete);
-      MetadataTableUtil.removeScanFiles(tablet.getExtent(), filesToDelete, SystemCredentials.get(), tablet.getTabletServer().getLock());
+      MetadataTableUtil.removeScanFiles(tablet.getExtent(), filesToDelete, tablet.getTabletServer(), tablet.getTabletServer().getLock());
     }
   }
 
@@ -243,10 +240,9 @@ class DatafileManager {
     }
 
     synchronized (bulkFileImportLock) {
-      Credentials creds = SystemCredentials.get();
       Connector conn;
       try {
-        conn = HdfsZooInstance.getInstance().getConnector(creds.getPrincipal(), creds.getToken());
+        conn = tablet.getTabletServer().getConnector();
       } catch (Exception ex) {
         throw new IOException(ex);
       }
@@ -394,7 +390,7 @@ class DatafileManager {
     // very important to write delete entries outside of log lock, because
     // this metadata write does not go up... it goes sideways or to itself
     if (absMergeFile != null)
-      MetadataTableUtil.addDeleteEntries(tablet.getExtent(), Collections.singleton(absMergeFile), SystemCredentials.get());
+      MetadataTableUtil.addDeleteEntries(tablet.getExtent(), Collections.singleton(absMergeFile), tablet.getTabletServer());
 
     Set<String> unusedWalLogs = tablet.beginClearingUnusedLogs();
     boolean replicate = ReplicationConfigurationUtil.isEnabled(tablet.getExtent(), tablet.getTableConfiguration());
@@ -428,7 +424,7 @@ class DatafileManager {
         if (log.isDebugEnabled()) {
           log.debug("Recording that data has been ingested into " + tablet.getExtent() + " using " + logFileOnly);
         }
-        ReplicationTableUtil.updateFiles(SystemCredentials.get(), tablet.getExtent(), logFileOnly, StatusUtil.openWithUnknownLength());
+        ReplicationTableUtil.updateFiles(tablet.getTabletServer(), tablet.getExtent(), logFileOnly, StatusUtil.openWithUnknownLength());
       }
     } finally {
       tablet.finishClearingUnusedLogs();
@@ -497,8 +493,7 @@ class DatafileManager {
     majorCompactingFiles.clear();
   }
 
-  void bringMajorCompactionOnline(Set<FileRef> oldDatafiles, FileRef tmpDatafile, FileRef newDatafile, Long compactionId, DataFileValue dfv)
-      throws IOException {
+  void bringMajorCompactionOnline(Set<FileRef> oldDatafiles, FileRef tmpDatafile, FileRef newDatafile, Long compactionId, DataFileValue dfv) throws IOException {
     final KeyExtent extent = tablet.getExtent();
     long t1, t2;
 
@@ -544,7 +539,8 @@ class DatafileManager {
         // rename the compacted map file, in case
         // the system goes down
 
-        RootFiles.replaceFiles(tablet.getTableConfiguration(), tablet.getTabletServer().getFileSystem(), tablet.getLocation(), oldDatafiles, tmpDatafile, newDatafile);
+        RootFiles.replaceFiles(tablet.getTableConfiguration(), tablet.getTabletServer().getFileSystem(), tablet.getLocation(), oldDatafiles, tmpDatafile,
+            newDatafile);
       }
 
       // atomically remove old files and add new file
@@ -579,8 +575,8 @@ class DatafileManager {
       Set<FileRef> filesInUseByScans = waitForScansToFinish(oldDatafiles, false, 10000);
       if (filesInUseByScans.size() > 0)
         log.debug("Adding scan refs to metadata " + extent + " " + filesInUseByScans);
-      MasterMetadataUtil.replaceDatafiles(extent, oldDatafiles, filesInUseByScans, newDatafile, compactionId, dfv, SystemCredentials.get(),
-          tablet.getTabletServer().getClientAddressString(), lastLocation, tablet.getTabletServer().getLock());
+      MasterMetadataUtil.replaceDatafiles(tablet.getTabletServer(), extent, oldDatafiles, filesInUseByScans, newDatafile, compactionId, dfv, tablet
+          .getTabletServer().getClientAddressString(), lastLocation, tablet.getTabletServer().getLock());
       removeFilesAfterScan(filesInUseByScans);
     }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
index 115aed7..b513167 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
@@ -27,7 +27,6 @@ import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.problems.ProblemReport;
@@ -35,6 +34,7 @@ import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
 import org.apache.accumulo.tserver.InMemoryMap;
 import org.apache.accumulo.tserver.MinorCompactionReason;
+import org.apache.accumulo.tserver.TabletServer;
 import org.apache.hadoop.fs.Path;
 import org.apache.log4j.Logger;
 
@@ -51,8 +51,11 @@ public class MinorCompactor extends Compactor {
     return Collections.singletonMap(mergeFile, dfv);
   }
   
-  public MinorCompactor(Tablet tablet, InMemoryMap imm, FileRef mergeFile, DataFileValue dfv, FileRef outputFile, MinorCompactionReason mincReason, TableConfiguration tableConfig) {
-    super(tablet, toFileMap(mergeFile, dfv), imm, outputFile, true, new CompactionEnv() {
+  private final TabletServer tabletServer;
+
+  public MinorCompactor(TabletServer tabletServer, Tablet tablet, InMemoryMap imm, FileRef mergeFile, DataFileValue dfv, FileRef outputFile,
+      MinorCompactionReason mincReason, TableConfiguration tableConfig) {
+    super(tabletServer, tablet, toFileMap(mergeFile, dfv), imm, outputFile, true, new CompactionEnv() {
       
       @Override
       public boolean isCompactionEnabled() {
@@ -64,11 +67,12 @@ public class MinorCompactor extends Compactor {
         return IteratorScope.minc;
       }
     }, Collections.<IteratorSetting>emptyList(), mincReason.ordinal(), tableConfig);
+    this.tabletServer = tabletServer;
   }
   
   private boolean isTableDeleting() {
     try {
-      return Tables.getTableState(HdfsZooInstance.getInstance(), extent.getTableId().toString()) == TableState.DELETING;
+      return Tables.getTableState(tabletServer.getInstance(), extent.getTableId().toString()) == TableState.DELETING;
     } catch (Exception e) {
       log.warn("Failed to determine if table " + extent.getTableId() + " was deleting ", e);
       return false; // can not get positive confirmation that its deleting.
@@ -95,19 +99,19 @@ public class MinorCompactor extends Compactor {
           // (int)(map.size()/((t2 - t1)/1000.0)), (t2 - t1)/1000.0, estimatedSizeInBytes()));
           
           if (reportedProblem) {
-            ProblemReports.getInstance().deleteProblemReport(getExtent().getTableId().toString(), ProblemType.FILE_WRITE, getOutputFile());
+            ProblemReports.getInstance(tabletServer).deleteProblemReport(getExtent().getTableId().toString(), ProblemType.FILE_WRITE, getOutputFile());
           }
           
           return ret;
         } catch (IOException e) {
           log.warn("MinC failed (" + e.getMessage() + ") to create " + getOutputFile() + " retrying ...");
-          ProblemReports.getInstance().report(new ProblemReport(getExtent().getTableId().toString(), ProblemType.FILE_WRITE, getOutputFile(), e));
+          ProblemReports.getInstance(tabletServer).report(new ProblemReport(getExtent().getTableId().toString(), ProblemType.FILE_WRITE, getOutputFile(), e));
           reportedProblem = true;
         } catch (RuntimeException e) {
           // if this is coming from a user iterator, it is possible that the user could change the iterator config and that the
           // minor compaction would succeed
           log.warn("MinC failed (" + e.getMessage() + ") to create " + getOutputFile() + " retrying ...", e);
-          ProblemReports.getInstance().report(new ProblemReport(getExtent().getTableId().toString(), ProblemType.FILE_WRITE, getOutputFile(), e));
+          ProblemReports.getInstance(tabletServer).report(new ProblemReport(getExtent().getTableId().toString(), ProblemType.FILE_WRITE, getOutputFile(), e));
           reportedProblem = true;
         } catch (CompactionCanceledException e) {
           throw new IllegalStateException(e);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
index 1f3306e..bc55c4f 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
@@ -81,7 +81,6 @@ import org.apache.accumulo.core.replication.StatusUtil;
 import org.apache.accumulo.core.replication.proto.Replication.Status;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
 import org.apache.accumulo.core.trace.Span;
@@ -89,7 +88,7 @@ import org.apache.accumulo.core.trace.Trace;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
@@ -101,7 +100,6 @@ import org.apache.accumulo.server.master.tableOps.CompactionIterators;
 import org.apache.accumulo.server.problems.ProblemReport;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tablets.TabletTime;
 import org.apache.accumulo.server.tablets.UniqueNameAllocator;
 import org.apache.accumulo.server.util.FileUtil;
@@ -331,7 +329,7 @@ public class Tablet implements TabletCommitter {
     return null;
   }
 
-  private static SortedMap<FileRef,DataFileValue> lookupDatafiles(AccumuloConfiguration conf, VolumeManager fs, KeyExtent extent,
+  private static SortedMap<FileRef,DataFileValue> lookupDatafiles(AccumuloServerContext context, VolumeManager fs, KeyExtent extent,
       SortedMap<Key,Value> tabletsKeyValues) throws IOException {
 
     TreeMap<FileRef,DataFileValue> datafiles = new TreeMap<FileRef,DataFileValue>();
@@ -370,12 +368,12 @@ public class Tablet implements TabletCommitter {
     return datafiles;
   }
 
-  private static List<LogEntry> lookupLogEntries(KeyExtent ke, SortedMap<Key,Value> tabletsKeyValues) {
+  private static List<LogEntry> lookupLogEntries(AccumuloServerContext context, KeyExtent ke, SortedMap<Key,Value> tabletsKeyValues) {
     List<LogEntry> logEntries = new ArrayList<LogEntry>();
 
     if (ke.isMeta()) {
       try {
-        logEntries = MetadataTableUtil.getLogEntries(SystemCredentials.get(), ke);
+        logEntries = MetadataTableUtil.getLogEntries(context, ke);
       } catch (Exception ex) {
         throw new RuntimeException("Unable to read tablet log entries", ex);
       }
@@ -443,7 +441,7 @@ public class Tablet implements TabletCommitter {
 
   public Tablet(TabletServer tabletServer, KeyExtent extent, Text location, TabletResourceManager trm, SortedMap<Key,Value> tabletsKeyValues)
       throws IOException {
-    this(tabletServer, extent, location, trm, lookupLogEntries(extent, tabletsKeyValues), lookupDatafiles(tabletServer.getConfiguration(),
+    this(tabletServer, extent, location, trm, lookupLogEntries(tabletServer, extent, tabletsKeyValues), lookupDatafiles(tabletServer,
         tabletServer.getFileSystem(), extent, tabletsKeyValues), lookupTime(tabletServer.getConfiguration(), extent, tabletsKeyValues), lookupLastServer(
         extent, tabletsKeyValues), lookupScanFiles(extent, tabletsKeyValues, tabletServer.getFileSystem()), lookupFlushID(extent, tabletsKeyValues),
         lookupCompactID(extent, tabletsKeyValues));
@@ -468,8 +466,8 @@ public class Tablet implements TabletCommitter {
 
     this.tableConfiguration = tblConf;
 
-    TabletFiles tabletPaths = VolumeUtil.updateTabletVolumes(tabletServer.getLock(), tabletServer.getFileSystem(), extent, new TabletFiles(location.toString(),
-        rawLogEntries, rawDatafiles), ReplicationConfigurationUtil.isEnabled(extent, this.tableConfiguration));
+    TabletFiles tabletPaths = VolumeUtil.updateTabletVolumes(tabletServer, tabletServer.getLock(), tabletServer.getFileSystem(), extent, new TabletFiles(
+        location.toString(), rawLogEntries, rawDatafiles), ReplicationConfigurationUtil.isEnabled(extent, this.tableConfiguration));
 
     Path locationPath;
 
@@ -610,7 +608,7 @@ public class Tablet implements TabletCommitter {
 
         if (count[0] == 0) {
           log.debug("No replayed mutations applied, removing unused entries for " + extent);
-          MetadataTableUtil.removeUnusedWALEntries(extent, logEntries, tabletServer.getLock());
+          MetadataTableUtil.removeUnusedWALEntries(getTabletServer(), extent, logEntries, tabletServer.getLock());
 
           // No replication update to be made because the fact that this tablet didn't use any mutations
           // from the WAL implies nothing about use of this WAL by other tablets. Do nothing.
@@ -628,7 +626,7 @@ public class Tablet implements TabletCommitter {
           Status status = StatusUtil.openWithUnknownLength();
           for (LogEntry logEntry : logEntries) {
             log.debug("Writing updated status to metadata table for " + logEntry.logSet + " " + ProtobufUtil.toString(status));
-            ReplicationTableUtil.updateFiles(SystemCredentials.get(), extent, logEntry.logSet, status);
+            ReplicationTableUtil.updateFiles(tabletServer, extent, logEntry.logSet, status);
           }
         }
 
@@ -943,7 +941,7 @@ public class Tablet implements TabletCommitter {
         if (mergeFile != null)
           dfv = getDatafileManager().getDatafileSizes().get(mergeFile);
 
-        MinorCompactor compactor = new MinorCompactor(this, memTable, mergeFile, dfv, tmpDatafile, mincReason, tableConfiguration);
+        MinorCompactor compactor = new MinorCompactor(tabletServer, this, memTable, mergeFile, dfv, tmpDatafile, mincReason, tableConfiguration);
         stats = compactor.call();
       } finally {
         span.stop();
@@ -1026,10 +1024,9 @@ public class Tablet implements TabletCommitter {
       }
 
       if (updateMetadata) {
-        Credentials creds = SystemCredentials.get();
         // if multiple threads were allowed to update this outside of a sync block, then it would be
         // a race condition
-        MetadataTableUtil.updateTabletFlushID(extent, tableFlushID, creds, getTabletServer().getLock());
+        MetadataTableUtil.updateTabletFlushID(extent, tableFlushID, tabletServer, getTabletServer().getLock());
       } else if (initiateMinor)
         initiateMinorCompaction(tableFlushID, MinorCompactionReason.USER);
 
@@ -1126,7 +1123,7 @@ public class Tablet implements TabletCommitter {
 
   public long getFlushID() throws NoNodeException {
     try {
-      String zTablePath = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZTABLES + "/" + extent.getTableId()
+      String zTablePath = Constants.ZROOT + "/" + tabletServer.getInstance().getInstanceID() + Constants.ZTABLES + "/" + extent.getTableId()
           + Constants.ZTABLE_FLUSH_ID;
       return Long.parseLong(new String(ZooReaderWriter.getInstance().getData(zTablePath, null), UTF_8));
     } catch (InterruptedException e) {
@@ -1143,7 +1140,7 @@ public class Tablet implements TabletCommitter {
   }
 
   long getCompactionCancelID() {
-    String zTablePath = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZTABLES + "/" + extent.getTableId()
+    String zTablePath = Constants.ZROOT + "/" + tabletServer.getInstance().getInstanceID() + Constants.ZTABLES + "/" + extent.getTableId()
         + Constants.ZTABLE_COMPACT_CANCEL_ID;
 
     try {
@@ -1157,7 +1154,7 @@ public class Tablet implements TabletCommitter {
 
   public Pair<Long,List<IteratorSetting>> getCompactionID() throws NoNodeException {
     try {
-      String zTablePath = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZTABLES + "/" + extent.getTableId()
+      String zTablePath = Constants.ZROOT + "/" + tabletServer.getInstance().getInstanceID() + Constants.ZTABLES + "/" + extent.getTableId()
           + Constants.ZTABLE_COMPACT_ID;
 
       String[] tokens = new String(ZooReaderWriter.getInstance().getData(zTablePath, null), UTF_8).split(",");
@@ -1461,7 +1458,8 @@ public class Tablet implements TabletCommitter {
         }
       }
       if (err != null) {
-        ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.TABLET_LOAD, this.extent.toString(), err));
+        ProblemReports.getInstance(tabletServer)
+            .report(new ProblemReport(extent.getTableId().toString(), ProblemType.TABLET_LOAD, this.extent.toString(), err));
         log.error("Tablet closed consistency check has failed for " + this.extent + " giving up and closing");
       }
     }
@@ -1501,7 +1499,7 @@ public class Tablet implements TabletCommitter {
     }
 
     try {
-      Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> fileLog = MetadataTableUtil.getFileAndLogEntries(SystemCredentials.get(), extent);
+      Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> fileLog = MetadataTableUtil.getFileAndLogEntries(tabletServer, extent);
 
       if (fileLog.getFirst().size() != 0) {
         String msg = "Closed tablet " + extent + " has walog entries in " + MetadataTable.NAME + " " + fileLog.getFirst();
@@ -1937,7 +1935,7 @@ public class Tablet implements TabletCommitter {
 
           // always propagate deletes, unless last batch
           boolean lastBatch = filesToCompact.isEmpty();
-          Compactor compactor = new Compactor(this, copy, null, compactTmpName, lastBatch ? propogateDeletes : true, cenv, compactionIterators,
+          Compactor compactor = new Compactor(tabletServer, this, copy, null, compactTmpName, lastBatch ? propogateDeletes : true, cenv, compactionIterators,
               reason.ordinal(), tableConf);
 
           CompactionStats mcs = compactor.call();
@@ -2051,7 +2049,7 @@ public class Tablet implements TabletCommitter {
       try {
         majCStats = _majorCompact(reason);
         if (reason == MajorCompactionReason.CHOP) {
-          MetadataTableUtil.chopped(getExtent(), this.getTabletServer().getLock());
+          MetadataTableUtil.chopped(getTabletServer(), getExtent(), this.getTabletServer().getLock());
           getTabletServer().enqueueMasterMessage(new TabletStatusMessage(TabletLoadState.CHOPPED, extent));
         }
         success = true;
@@ -2227,12 +2225,12 @@ public class Tablet implements TabletCommitter {
       // it is possible that some of the bulk loading flags will be deleted after being read below because the bulk load
       // finishes.... therefore split could propagate load flags for a finished bulk load... there is a special iterator
       // on the metadata table to clean up this type of garbage
-      Map<FileRef,Long> bulkLoadedFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(), extent);
+      Map<FileRef,Long> bulkLoadedFiles = MetadataTableUtil.getBulkFilesLoaded(getTabletServer(), extent);
 
-      MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SystemCredentials.get(), getTabletServer().getLock());
-      MasterMetadataUtil.addNewTablet(low, lowDirectory, getTabletServer().getTabletSession(), lowDatafileSizes, bulkLoadedFiles, SystemCredentials.get(),
-          time, lastFlushID, lastCompactID, getTabletServer().getLock());
-      MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SystemCredentials.get(), getTabletServer().getLock());
+      MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, getTabletServer(), getTabletServer().getLock());
+      MasterMetadataUtil.addNewTablet(getTabletServer(), low, lowDirectory, getTabletServer().getTabletSession(), lowDatafileSizes, bulkLoadedFiles, time,
+          lastFlushID, lastCompactID, getTabletServer().getLock());
+      MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, getTabletServer(), getTabletServer().getLock());
 
       log.log(TLevel.TABLET_HIST, extent + " split " + low + " " + high);
 
@@ -2532,7 +2530,7 @@ public class Tablet implements TabletCommitter {
       try {
         // if multiple threads were allowed to update this outside of a sync block, then it would be
         // a race condition
-        MetadataTableUtil.updateTabletCompactID(extent, compactionId, SystemCredentials.get(), getTabletServer().getLock());
+        MetadataTableUtil.updateTabletCompactID(extent, compactionId, getTabletServer(), getTabletServer().getLock());
       } finally {
         synchronized (this) {
           majorCompactionState = null;
@@ -2575,8 +2573,7 @@ public class Tablet implements TabletCommitter {
       if (bulkTime > persistedTime)
         persistedTime = bulkTime;
 
-      MetadataTableUtil.updateTabletDataFile(tid, extent, paths, tabletTime.getMetadataValue(persistedTime), SystemCredentials.get(), getTabletServer()
-          .getLock());
+      MetadataTableUtil.updateTabletDataFile(tid, extent, paths, tabletTime.getMetadataValue(persistedTime), getTabletServer(), getTabletServer().getLock());
     }
 
   }
@@ -2588,7 +2585,7 @@ public class Tablet implements TabletCommitter {
         persistedTime = maxCommittedTime;
 
       String time = tabletTime.getMetadataValue(persistedTime);
-      MasterMetadataUtil.updateTabletDataFile(extent, newDatafile, absMergeFile, dfv, time, SystemCredentials.get(), filesInUseByScans,
+      MasterMetadataUtil.updateTabletDataFile(getTabletServer(), extent, newDatafile, absMergeFile, dfv, time, filesInUseByScans,
           tabletServer.getClientAddressString(), tabletServer.getLock(), unusedWalLogs, lastLocation, flushId);
     }
 


Mime
View raw message