accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [05/10] accumulo git commit: ACCUMULO-3199 Internal refactor to add ClientContext
Date Tue, 25 Nov 2014 22:36:49 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
index 01eb477..88316c6 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
@@ -30,6 +30,7 @@ import java.util.TreeMap;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.ScannerImpl;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
@@ -44,12 +45,10 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Da
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.util.ColumnFQ;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
@@ -67,8 +66,8 @@ public class MasterMetadataUtil {
   
   private static final Logger log = Logger.getLogger(MasterMetadataUtil.class);
   
-  public static void addNewTablet(KeyExtent extent, String path, TServerInstance location, Map<FileRef,DataFileValue> datafileSizes,
-      Map<FileRef,Long> bulkLoadedFiles, Credentials credentials, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) {
+  public static void addNewTablet(ClientContext context, KeyExtent extent, String path, TServerInstance location,
+      Map<FileRef,DataFileValue> datafileSizes, Map<FileRef,Long> bulkLoadedFiles, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) {
     Mutation m = extent.getPrevRowUpdateMutation();
     
     TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(path.getBytes(UTF_8)));
@@ -92,10 +91,10 @@ public class MasterMetadataUtil {
       m.put(TabletsSection.BulkFileColumnFamily.NAME, entry.getKey().meta(), new Value(tidBytes));
     }
     
-    MetadataTableUtil.update(credentials, zooLock, m, extent);
+    MetadataTableUtil.update(context, zooLock, m, extent);
   }
   
-  public static KeyExtent fixSplit(Text metadataEntry, SortedMap<ColumnFQ,Value> columns, TServerInstance tserver, Credentials credentials, ZooLock lock)
+  public static KeyExtent fixSplit(ClientContext context, Text metadataEntry, SortedMap<ColumnFQ,Value> columns, TServerInstance tserver, ZooLock lock)
       throws AccumuloException, IOException {
     log.info("Incomplete split " + metadataEntry + " attempting to fix");
     
@@ -133,11 +132,11 @@ public class MasterMetadataUtil {
     
     Text table = (new KeyExtent(metadataEntry, (Text) null)).getTableId();
     
-    return fixSplit(table, metadataEntry, metadataPrevEndRow, oper, splitRatio, tserver, credentials, time.toString(), initFlushID, initCompactID, lock);
+    return fixSplit(context, table, metadataEntry, metadataPrevEndRow, oper, splitRatio, tserver, time.toString(), initFlushID, initCompactID, lock);
   }
   
-  private static KeyExtent fixSplit(Text table, Text metadataEntry, Text metadataPrevEndRow, Value oper, double splitRatio, TServerInstance tserver,
-      Credentials credentials, String time, long initFlushID, long initCompactID, ZooLock lock) throws AccumuloException, IOException {
+  private static KeyExtent fixSplit(ClientContext context, Text table, Text metadataEntry, Text metadataPrevEndRow, Value oper, double splitRatio,
+      TServerInstance tserver, String time, long initFlushID, long initCompactID, ZooLock lock) throws AccumuloException, IOException {
     if (metadataPrevEndRow == null)
       // something is wrong, this should not happen... if a tablet is split, it will always have a
       // prev end row....
@@ -146,20 +145,20 @@ public class MasterMetadataUtil {
     // check to see if prev tablet exist in metadata tablet
     Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(table, metadataPrevEndRow)));
     
-    ScannerImpl scanner2 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
+    ScannerImpl scanner2 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
     scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
     
     VolumeManager fs = VolumeManagerImpl.get();
     if (!scanner2.iterator().hasNext()) {
       log.info("Rolling back incomplete split " + metadataEntry + " " + metadataPrevEndRow);
-      MetadataTableUtil.rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), credentials, lock);
+      MetadataTableUtil.rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), context, lock);
       return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
     } else {
       log.info("Finishing incomplete split " + metadataEntry + " " + metadataPrevEndRow);
       
       List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
       
-      Scanner scanner3 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
+      Scanner scanner3 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
       Key rowKey = new Key(metadataEntry);
       
       SortedMap<FileRef,DataFileValue> origDatafileSizes = new TreeMap<FileRef,DataFileValue>();
@@ -177,7 +176,7 @@ public class MasterMetadataUtil {
       MetadataTableUtil.splitDatafiles(table, metadataPrevEndRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), origDatafileSizes, lowDatafileSizes,
           highDatafileSizes, highDatafilesToRemove);
       
-      MetadataTableUtil.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, credentials, lock);
+      MetadataTableUtil.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, context, lock);
       
       return new KeyExtent(metadataEntry, KeyExtent.encodePrevEndRow(metadataPrevEndRow));
     }
@@ -197,17 +196,17 @@ public class MasterMetadataUtil {
     }
   }
   
-  public static void replaceDatafiles(KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path, Long compactionId,
-      DataFileValue size, Credentials credentials, String address, TServerInstance lastLocation, ZooLock zooLock) throws IOException {
-    replaceDatafiles(extent, datafilesToDelete, scanFiles, path, compactionId, size, credentials, address, lastLocation, zooLock, true);
+  public static void replaceDatafiles(ClientContext context, KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path,
+      Long compactionId, DataFileValue size, String address, TServerInstance lastLocation, ZooLock zooLock) throws IOException {
+    replaceDatafiles(context, extent, datafilesToDelete, scanFiles, path, compactionId, size, address, lastLocation, zooLock, true);
   }
   
-  public static void replaceDatafiles(KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path, Long compactionId,
-      DataFileValue size, Credentials credentials, String address, TServerInstance lastLocation, ZooLock zooLock, boolean insertDeleteFlags) throws IOException {
+  public static void replaceDatafiles(ClientContext context, KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path,
+      Long compactionId, DataFileValue size, String address, TServerInstance lastLocation, ZooLock zooLock, boolean insertDeleteFlags) throws IOException {
     
     if (insertDeleteFlags) {
       // add delete flags for those paths before the data file reference is removed
-      MetadataTableUtil.addDeleteEntries(extent, datafilesToDelete, credentials);
+      MetadataTableUtil.addDeleteEntries(extent, datafilesToDelete, context);
     }
     
     // replace data file references to old mapfiles with the new mapfiles
@@ -232,7 +231,7 @@ public class MasterMetadataUtil {
     if (lastLocation != null && !lastLocation.equals(self))
       lastLocation.clearLastLocation(m);
     
-    MetadataTableUtil.update(credentials, zooLock, m, extent);
+    MetadataTableUtil.update(context, zooLock, m, extent);
   }
   
   /**
@@ -242,7 +241,7 @@ public class MasterMetadataUtil {
    *          should be relative to the table directory
    * 
    */
-  public static void updateTabletDataFile(KeyExtent extent, FileRef path, FileRef mergeFile, DataFileValue dfv, String time, Credentials credentials,
+  public static void updateTabletDataFile(ClientContext context, KeyExtent extent, FileRef path, FileRef mergeFile, DataFileValue dfv, String time,
       Set<FileRef> filesInUseByScans, String address, ZooLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) {
     if (extent.isRootTablet()) {
       if (unusedWalLogs != null) {
@@ -254,7 +253,7 @@ public class MasterMetadataUtil {
 
     Mutation m = getUpdateForTabletDataFile(extent, path, mergeFile, dfv, time, filesInUseByScans, address, zooLock, unusedWalLogs, lastLocation, flushId);
 
-    MetadataTableUtil.update(credentials, zooLock, m, extent);
+    MetadataTableUtil.update(context, zooLock, m, extent);
 
   }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
index 97b8cff..dd3355a 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -42,12 +42,12 @@ import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.IsolatedScanner;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.BatchWriterImpl;
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.ScannerImpl;
 import org.apache.accumulo.core.client.impl.Writer;
 import org.apache.accumulo.core.data.Key;
@@ -80,12 +80,12 @@ import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
+import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tablets.TabletTime;
 import org.apache.accumulo.server.zookeeper.ZooLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
@@ -108,19 +108,21 @@ public class MetadataTableUtil {
 
   private MetadataTableUtil() {}
 
-  public synchronized static Writer getMetadataTable(Credentials credentials) {
+  public synchronized static Writer getMetadataTable(ClientContext context) {
+    Credentials credentials = context.getCredentials();
     Writer metadataTable = metadata_tables.get(credentials);
     if (metadataTable == null) {
-      metadataTable = new Writer(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID);
+      metadataTable = new Writer(context, MetadataTable.ID);
       metadata_tables.put(credentials, metadataTable);
     }
     return metadataTable;
   }
 
-  private synchronized static Writer getRootTable(Credentials credentials) {
+  private synchronized static Writer getRootTable(ClientContext context) {
+    Credentials credentials = context.getCredentials();
     Writer rootTable = root_tables.get(credentials);
     if (rootTable == null) {
-      rootTable = new Writer(HdfsZooInstance.getInstance(), credentials, RootTable.ID);
+      rootTable = new Writer(context, RootTable.ID);
       root_tables.put(credentials, rootTable);
     }
     return rootTable;
@@ -131,12 +133,12 @@ public class MetadataTableUtil {
         .getBytes(UTF_8)));
   }
 
-  private static void update(Credentials credentials, Mutation m, KeyExtent extent) {
-    update(credentials, null, m, extent);
+  private static void update(ClientContext context, Mutation m, KeyExtent extent) {
+    update(context, null, m, extent);
   }
 
-  public static void update(Credentials credentials, ZooLock zooLock, Mutation m, KeyExtent extent) {
-    Writer t = extent.isMeta() ? getRootTable(credentials) : getMetadataTable(credentials);
+  public static void update(ClientContext context, ZooLock zooLock, Mutation m, KeyExtent extent) {
+    Writer t = extent.isMeta() ? getRootTable(context) : getMetadataTable(context);
     update(t, zooLock, m);
   }
 
@@ -162,23 +164,24 @@ public class MetadataTableUtil {
     }
   }
 
-  public static void updateTabletFlushID(KeyExtent extent, long flushID, Credentials credentials, ZooLock zooLock) {
+  public static void updateTabletFlushID(KeyExtent extent, long flushID, ClientContext context, ZooLock zooLock) {
     if (!extent.isRootTablet()) {
       Mutation m = new Mutation(extent.getMetadataEntry());
       TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value((flushID + "").getBytes(UTF_8)));
-      update(credentials, zooLock, m, extent);
+      update(context, zooLock, m, extent);
     }
   }
 
-  public static void updateTabletCompactID(KeyExtent extent, long compactID, Credentials credentials, ZooLock zooLock) {
+  public static void updateTabletCompactID(KeyExtent extent, long compactID, ClientContext context, ZooLock zooLock) {
     if (!extent.isRootTablet()) {
       Mutation m = new Mutation(extent.getMetadataEntry());
       TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value((compactID + "").getBytes(UTF_8)));
-      update(credentials, zooLock, m, extent);
+      update(context, zooLock, m, extent);
     }
   }
 
-  public static void updateTabletDataFile(long tid, KeyExtent extent, Map<FileRef,DataFileValue> estSizes, String time, Credentials credentials, ZooLock zooLock) {
+  public static void updateTabletDataFile(long tid, KeyExtent extent, Map<FileRef,DataFileValue> estSizes, String time, ClientContext context,
+      ZooLock zooLock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
     byte[] tidBytes = Long.toString(tid).getBytes(UTF_8);
 
@@ -188,31 +191,31 @@ public class MetadataTableUtil {
       m.put(TabletsSection.BulkFileColumnFamily.NAME, file, new Value(tidBytes));
     }
     TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(UTF_8)));
-    update(credentials, zooLock, m, extent);
+    update(context, zooLock, m, extent);
   }
 
-  public static void updateTabletDir(KeyExtent extent, String newDir, Credentials creds, ZooLock lock) {
+  public static void updateTabletDir(KeyExtent extent, String newDir, ClientContext context, ZooLock lock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
     TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(newDir.getBytes(UTF_8)));
-    update(creds, lock, m, extent);
+    update(context, lock, m, extent);
   }
 
-  public static void addTablet(KeyExtent extent, String path, Credentials credentials, char timeType, ZooLock lock) {
+  public static void addTablet(KeyExtent extent, String path, ClientContext context, char timeType, ZooLock lock) {
     Mutation m = extent.getPrevRowUpdateMutation();
 
     TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(path.getBytes(UTF_8)));
     TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value((timeType + "0").getBytes(UTF_8)));
 
-    update(credentials, lock, m, extent);
+    update(context, lock, m, extent);
   }
 
-  public static void updateTabletPrevEndRow(KeyExtent extent, Credentials credentials) {
+  public static void updateTabletPrevEndRow(KeyExtent extent, ClientContext context) {
     Mutation m = extent.getPrevRowUpdateMutation(); //
-    update(credentials, m, extent);
+    update(context, m, extent);
   }
 
   public static void updateTabletVolumes(KeyExtent extent, List<LogEntry> logsToRemove, List<LogEntry> logsToAdd, List<FileRef> filesToRemove,
-      SortedMap<FileRef,DataFileValue> filesToAdd, String newDir, ZooLock zooLock, Credentials credentials) {
+      SortedMap<FileRef,DataFileValue> filesToAdd, String newDir, ZooLock zooLock, AccumuloServerContext context) {
 
     if (extent.isRootTablet()) {
       if (newDir != null)
@@ -223,9 +226,9 @@ public class MetadataTableUtil {
 
       // add before removing in case of process death
       for (LogEntry logEntry : logsToAdd)
-        addLogEntry(credentials, logEntry, zooLock);
+        addLogEntry(context, logEntry, zooLock);
 
-      removeUnusedWALEntries(extent, logsToRemove, zooLock);
+      removeUnusedWALEntries(context, extent, logsToRemove, zooLock);
     } else {
       Mutation m = new Mutation(extent.getMetadataEntry());
 
@@ -244,14 +247,14 @@ public class MetadataTableUtil {
       if (newDir != null)
         ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(newDir.getBytes(UTF_8)));
 
-      update(credentials, m, extent);
+      update(context, m, extent);
     }
   }
 
-  public static SortedMap<FileRef,DataFileValue> getDataFileSizes(KeyExtent extent, Credentials credentials) throws IOException {
+  public static SortedMap<FileRef,DataFileValue> getDataFileSizes(KeyExtent extent, ClientContext context) throws IOException {
     TreeMap<FileRef,DataFileValue> sizes = new TreeMap<FileRef,DataFileValue>();
 
-    Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
+    Scanner mdScanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
     mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
     Text row = extent.getMetadataEntry();
     VolumeManager fs = VolumeManagerImpl.get();
@@ -271,26 +274,26 @@ public class MetadataTableUtil {
     return sizes;
   }
 
-  public static void rollBackSplit(Text metadataEntry, Text oldPrevEndRow, Credentials credentials, ZooLock zooLock) {
+  public static void rollBackSplit(Text metadataEntry, Text oldPrevEndRow, ClientContext context, ZooLock zooLock) {
     KeyExtent ke = new KeyExtent(metadataEntry, oldPrevEndRow);
     Mutation m = ke.getPrevRowUpdateMutation();
     TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m);
     TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m);
-    update(credentials, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
+    update(context, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
   }
 
-  public static void splitTablet(KeyExtent extent, Text oldPrevEndRow, double splitRatio, Credentials credentials, ZooLock zooLock) {
+  public static void splitTablet(KeyExtent extent, Text oldPrevEndRow, double splitRatio, ClientContext context, ZooLock zooLock) {
     Mutation m = extent.getPrevRowUpdateMutation(); //
 
     TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(splitRatio).getBytes(UTF_8)));
 
     TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(oldPrevEndRow));
     ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
-    update(credentials, zooLock, m, extent);
+    update(context, zooLock, m, extent);
   }
 
-  public static void finishSplit(Text metadataEntry, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, Credentials credentials,
-      ZooLock zooLock) {
+  public static void finishSplit(Text metadataEntry, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove,
+      final ClientContext context, ZooLock zooLock) {
     Mutation m = new Mutation(metadataEntry);
     TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m);
     TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m);
@@ -304,26 +307,26 @@ public class MetadataTableUtil {
       m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
     }
 
-    update(credentials, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
+    update(context, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
   }
 
-  public static void finishSplit(KeyExtent extent, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, Credentials credentials,
+  public static void finishSplit(KeyExtent extent, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, ClientContext context,
       ZooLock zooLock) {
-    finishSplit(extent.getMetadataEntry(), datafileSizes, highDatafilesToRemove, credentials, zooLock);
+    finishSplit(extent.getMetadataEntry(), datafileSizes, highDatafilesToRemove, context, zooLock);
   }
 
-  public static void addDeleteEntries(KeyExtent extent, Set<FileRef> datafilesToDelete, Credentials credentials) throws IOException {
+  public static void addDeleteEntries(KeyExtent extent, Set<FileRef> datafilesToDelete, ClientContext context) throws IOException {
 
     String tableId = extent.getTableId().toString();
 
     // TODO could use batch writer,would need to handle failure and retry like update does - ACCUMULO-1294
     for (FileRef pathToRemove : datafilesToDelete) {
-      update(credentials, createDeleteMutation(tableId, pathToRemove.path().toString()), extent);
+      update(context, createDeleteMutation(tableId, pathToRemove.path().toString()), extent);
     }
   }
 
-  public static void addDeleteEntry(String tableId, String path) throws IOException {
-    update(SystemCredentials.get(), createDeleteMutation(tableId, path), new KeyExtent(new Text(tableId), null, null));
+  public static void addDeleteEntry(AccumuloServerContext context, String tableId, String path) throws IOException {
+    update(context, createDeleteMutation(tableId, path), new KeyExtent(new Text(tableId), null, null));
   }
 
   public static Mutation createDeleteMutation(String tableId, String pathToRemove) throws IOException {
@@ -333,13 +336,13 @@ public class MetadataTableUtil {
     return delFlag;
   }
 
-  public static void removeScanFiles(KeyExtent extent, Set<FileRef> scanFiles, Credentials credentials, ZooLock zooLock) {
+  public static void removeScanFiles(KeyExtent extent, Set<FileRef> scanFiles, ClientContext context, ZooLock zooLock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
 
     for (FileRef pathToRemove : scanFiles)
       m.putDelete(ScanFileColumnFamily.NAME, pathToRemove.meta());
 
-    update(credentials, zooLock, m, extent);
+    update(context, zooLock, m, extent);
   }
 
   public static void splitDatafiles(Text table, Text midRow, double splitRatio, Map<FileRef,FileUtil.FileInfo> firstAndLastRows,
@@ -385,11 +388,11 @@ public class MetadataTableUtil {
     }
   }
 
-  public static void deleteTable(String tableId, boolean insertDeletes, Credentials credentials, ZooLock lock) throws AccumuloException, IOException {
-    Scanner ms = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
+  public static void deleteTable(String tableId, boolean insertDeletes, ClientContext context, ZooLock lock) throws AccumuloException, IOException {
+    Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
     Text tableIdText = new Text(tableId);
-    BatchWriter bw = new BatchWriterImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000)
-        .setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2));
+    BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000).setMaxLatency(120000l, TimeUnit.MILLISECONDS)
+        .setMaxWriteThreads(2));
 
     // scan metadata for our table and delete everything we find
     Mutation m = null;
@@ -447,7 +450,7 @@ public class MetadataTableUtil {
     return ZooUtil.getRoot(HdfsZooInstance.getInstance()) + RootTable.ZROOT_TABLET_WALOGS;
   }
 
-  public static void addLogEntry(Credentials credentials, LogEntry entry, ZooLock zooLock) {
+  public static void addLogEntry(ClientContext context, LogEntry entry, ZooLock zooLock) {
     if (entry.extent.isRootTablet()) {
       String root = getZookeeperLogLocation();
       while (true) {
@@ -471,7 +474,7 @@ public class MetadataTableUtil {
     } else {
       Mutation m = new Mutation(entry.getRow());
       m.put(entry.getColumnFamily(), entry.getColumnQualifier(), entry.getValue());
-      update(credentials, zooLock, m, entry.extent);
+      update(context, zooLock, m, entry.extent);
     }
   }
 
@@ -501,8 +504,8 @@ public class MetadataTableUtil {
     }
   }
 
-  public static Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> getFileAndLogEntries(Credentials credentials, KeyExtent extent) throws KeeperException,
-      InterruptedException, IOException {
+  public static Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> getFileAndLogEntries(ClientContext context, KeyExtent extent)
+      throws KeeperException, InterruptedException, IOException {
     ArrayList<LogEntry> result = new ArrayList<LogEntry>();
     TreeMap<FileRef,DataFileValue> sizes = new TreeMap<FileRef,DataFileValue>();
 
@@ -521,7 +524,7 @@ public class MetadataTableUtil {
 
     } else {
       String systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
-      Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, systemTableToCheck, Authorizations.EMPTY);
+      Scanner scanner = new ScannerImpl(context, systemTableToCheck, Authorizations.EMPTY);
       scanner.fetchColumnFamily(LogColumnFamily.NAME);
       scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
       scanner.setRange(extent.toMetadataRange());
@@ -545,7 +548,7 @@ public class MetadataTableUtil {
     return new Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>>(result, sizes);
   }
 
-  public static List<LogEntry> getLogEntries(Credentials credentials, KeyExtent extent) throws IOException, KeeperException, InterruptedException {
+  public static List<LogEntry> getLogEntries(ClientContext context, KeyExtent extent) throws IOException, KeeperException, InterruptedException {
     log.info("Scanning logging entries for " + extent);
     ArrayList<LogEntry> result = new ArrayList<LogEntry>();
     if (extent.equals(RootTable.EXTENT)) {
@@ -553,7 +556,7 @@ public class MetadataTableUtil {
       getRootLogEntries(result);
     } else {
       log.info("Scanning metadata for logs used for tablet " + extent);
-      Scanner scanner = getTabletLogScanner(credentials, extent);
+      Scanner scanner = getTabletLogScanner(context, extent);
       Text pattern = extent.getMetadataEntry();
       for (Entry<Key,Value> entry : scanner) {
         Text row = entry.getKey().getRow();
@@ -602,11 +605,11 @@ public class MetadataTableUtil {
     }
   }
 
-  private static Scanner getTabletLogScanner(Credentials credentials, KeyExtent extent) {
+  private static Scanner getTabletLogScanner(ClientContext context, KeyExtent extent) {
     String tableId = MetadataTable.ID;
     if (extent.isMeta())
       tableId = RootTable.ID;
-    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, tableId, Authorizations.EMPTY);
+    Scanner scanner = new ScannerImpl(context, tableId, Authorizations.EMPTY);
     scanner.fetchColumnFamily(LogColumnFamily.NAME);
     Text start = extent.getMetadataEntry();
     Key endKey = new Key(start, LogColumnFamily.NAME);
@@ -621,12 +624,11 @@ public class MetadataTableUtil {
     Iterator<LogEntry> rootTableEntries = null;
     Iterator<Entry<Key,Value>> metadataEntries = null;
 
-    LogEntryIterator(Credentials creds) throws IOException, KeeperException, InterruptedException {
-      zookeeperEntries = getLogEntries(creds, RootTable.EXTENT).iterator();
-      rootTableEntries = getLogEntries(creds, new KeyExtent(new Text(MetadataTable.ID), null, null)).iterator();
+    LogEntryIterator(ClientContext context) throws IOException, KeeperException, InterruptedException {
+      zookeeperEntries = getLogEntries(context, RootTable.EXTENT).iterator();
+      rootTableEntries = getLogEntries(context, new KeyExtent(new Text(MetadataTable.ID), null, null)).iterator();
       try {
-        Scanner scanner = HdfsZooInstance.getInstance().getConnector(creds.getPrincipal(), creds.getToken())
-            .createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+        Scanner scanner = context.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
         log.info("Setting range to " + MetadataSchema.TabletsSection.getRange());
         scanner.setRange(MetadataSchema.TabletsSection.getRange());
         scanner.fetchColumnFamily(LogColumnFamily.NAME);
@@ -659,11 +661,11 @@ public class MetadataTableUtil {
     }
   }
 
-  public static Iterator<LogEntry> getLogEntries(Credentials creds) throws IOException, KeeperException, InterruptedException {
-    return new LogEntryIterator(creds);
+  public static Iterator<LogEntry> getLogEntries(ClientContext context) throws IOException, KeeperException, InterruptedException {
+    return new LogEntryIterator(context);
   }
 
-  public static void removeUnusedWALEntries(KeyExtent extent, List<LogEntry> logEntries, ZooLock zooLock) {
+  public static void removeUnusedWALEntries(AccumuloServerContext context, KeyExtent extent, List<LogEntry> logEntries, ZooLock zooLock) {
     if (extent.isRootTablet()) {
       for (LogEntry entry : logEntries) {
         String root = getZookeeperLogLocation();
@@ -686,7 +688,7 @@ public class MetadataTableUtil {
       for (LogEntry entry : logEntries) {
         m.putDelete(LogColumnFamily.NAME, new Text(entry.getName()));
       }
-      update(SystemCredentials.get(), zooLock, m, extent);
+      update(context, zooLock, m, extent);
     }
   }
 
@@ -841,9 +843,9 @@ public class MetadataTableUtil {
     return rewrites;
   }
 
-  public static void cloneTable(Instance instance, String srcTableId, String tableId, VolumeManager volumeManager) throws Exception {
+  public static void cloneTable(ClientContext context, String srcTableId, String tableId, VolumeManager volumeManager) throws Exception {
 
-    Connector conn = instance.getConnector(SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
+    Connector conn = context.getConnector();
     BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
 
     while (true) {
@@ -868,7 +870,7 @@ public class MetadataTableUtil {
         bw.flush();
 
         // delete what we have cloned and try again
-        deleteTable(tableId, false, SystemCredentials.get(), null);
+        deleteTable(tableId, false, context, null);
 
         log.debug("Tablets merged in table " + srcTableId + " while attempting to clone, trying again");
 
@@ -898,10 +900,10 @@ public class MetadataTableUtil {
 
   }
 
-  public static void chopped(KeyExtent extent, ZooLock zooLock) {
+  public static void chopped(AccumuloServerContext context, KeyExtent extent, ZooLock zooLock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
     ChoppedColumnFamily.CHOPPED_COLUMN.put(m, new Value("chopped".getBytes(UTF_8)));
-    update(SystemCredentials.get(), zooLock, m, extent);
+    update(context, zooLock, m, extent);
   }
 
   public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception {
@@ -940,12 +942,12 @@ public class MetadataTableUtil {
     }
   }
 
-  public static Map<FileRef,Long> getBulkFilesLoaded(Credentials credentials, KeyExtent extent) throws IOException {
+  public static Map<FileRef,Long> getBulkFilesLoaded(ClientContext context, KeyExtent extent) throws IOException {
     Text metadataRow = extent.getMetadataEntry();
     Map<FileRef,Long> ret = new HashMap<FileRef,Long>();
 
     VolumeManager fs = VolumeManagerImpl.get();
-    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, extent.isMeta() ? RootTable.ID : MetadataTable.ID, Authorizations.EMPTY);
+    Scanner scanner = new ScannerImpl(context, extent.isMeta() ? RootTable.ID : MetadataTable.ID, Authorizations.EMPTY);
     scanner.setRange(new Range(metadataRow));
     scanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
     for (Entry<Key,Value> entry : scanner) {
@@ -955,30 +957,30 @@ public class MetadataTableUtil {
     return ret;
   }
 
-  public static void addBulkLoadInProgressFlag(String path) {
+  public static void addBulkLoadInProgressFlag(AccumuloServerContext context, String path) {
 
     Mutation m = new Mutation(MetadataSchema.BlipSection.getRowPrefix() + path);
     m.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {}));
 
     // new KeyExtent is only added to force update to write to the metadata table, not the root table
     // because bulk loads aren't supported to the metadata table
-    update(SystemCredentials.get(), m, new KeyExtent(new Text("anythingNotMetadata"), null, null));
+    update(context, m, new KeyExtent(new Text("anythingNotMetadata"), null, null));
   }
 
-  public static void removeBulkLoadInProgressFlag(String path) {
+  public static void removeBulkLoadInProgressFlag(AccumuloServerContext context, String path) {
 
     Mutation m = new Mutation(MetadataSchema.BlipSection.getRowPrefix() + path);
     m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
 
     // new KeyExtent is only added to force update to write to the metadata table, not the root table
     // because bulk loads aren't supported to the metadata table
-    update(SystemCredentials.get(), m, new KeyExtent(new Text("anythingNotMetadata"), null, null));
+    update(context, m, new KeyExtent(new Text("anythingNotMetadata"), null, null));
   }
 
   /**
    * During an upgrade from 1.6 to 1.7, we need to add the replication table
    */
-  public static void createReplicationTable(Instance instance, SystemCredentials systemCredentials) throws IOException {
+  public static void createReplicationTable(ClientContext context) throws IOException {
     String dir = VolumeManagerImpl.get().choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + ReplicationTable.ID
         + Constants.DEFAULT_TABLET_LOCATION;
 
@@ -986,59 +988,59 @@ public class MetadataTableUtil {
     m.put(DIRECTORY_COLUMN.getColumnFamily(), DIRECTORY_COLUMN.getColumnQualifier(), 0, new Value(dir.getBytes(UTF_8)));
     m.put(TIME_COLUMN.getColumnFamily(), TIME_COLUMN.getColumnQualifier(), 0, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(UTF_8)));
     m.put(PREV_ROW_COLUMN.getColumnFamily(), PREV_ROW_COLUMN.getColumnQualifier(), 0, KeyExtent.encodePrevEndRow(null));
-    update(getMetadataTable(systemCredentials), null, m);
+    update(getMetadataTable(context), null, m);
   }
 
   /**
    * During an upgrade we need to move deletion requests for files under the !METADATA table to the root tablet.
    */
-  public static void moveMetaDeleteMarkers(Instance instance, Credentials creds) {
+  public static void moveMetaDeleteMarkers(ClientContext context) {
     String oldDeletesPrefix = "!!~del";
     Range oldDeletesRange = new Range(oldDeletesPrefix, true, "!!~dem", false);
 
     // move old delete markers to new location, to standardize table schema between all metadata tables
-    Scanner scanner = new ScannerImpl(instance, creds, RootTable.ID, Authorizations.EMPTY);
+    Scanner scanner = new ScannerImpl(context, RootTable.ID, Authorizations.EMPTY);
     scanner.setRange(oldDeletesRange);
     for (Entry<Key,Value> entry : scanner) {
       String row = entry.getKey().getRow().toString();
       if (row.startsWith(oldDeletesPrefix)) {
-        moveDeleteEntry(creds, RootTable.OLD_EXTENT, entry, row, oldDeletesPrefix);
+        moveDeleteEntry(context, RootTable.OLD_EXTENT, entry, row, oldDeletesPrefix);
       } else {
         break;
       }
     }
   }
 
-  public static void moveMetaDeleteMarkersFrom14(Instance instance, Credentials creds) {
+  public static void moveMetaDeleteMarkersFrom14(ClientContext context) {
     // new KeyExtent is only added to force update to write to the metadata table, not the root table
     KeyExtent notMetadata = new KeyExtent(new Text("anythingNotMetadata"), null, null);
 
     // move delete markers from the normal delete keyspace to the root tablet delete keyspace if the files are for the !METADATA table
-    Scanner scanner = new ScannerImpl(instance, creds, MetadataTable.ID, Authorizations.EMPTY);
+    Scanner scanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
     scanner.setRange(MetadataSchema.DeletesSection.getRange());
     for (Entry<Key,Value> entry : scanner) {
       String row = entry.getKey().getRow().toString();
       if (row.startsWith(MetadataSchema.DeletesSection.getRowPrefix() + "/" + MetadataTable.ID)) {
-        moveDeleteEntry(creds, notMetadata, entry, row, MetadataSchema.DeletesSection.getRowPrefix());
+        moveDeleteEntry(context, notMetadata, entry, row, MetadataSchema.DeletesSection.getRowPrefix());
       } else {
         break;
       }
     }
   }
 
-  private static void moveDeleteEntry(Credentials creds, KeyExtent oldExtent, Entry<Key,Value> entry, String rowID, String prefix) {
+  private static void moveDeleteEntry(ClientContext context, KeyExtent oldExtent, Entry<Key,Value> entry, String rowID, String prefix) {
     String filename = rowID.substring(prefix.length());
 
     // add the new entry first
     log.info("Moving " + filename + " marker in " + RootTable.NAME);
     Mutation m = new Mutation(MetadataSchema.DeletesSection.getRowPrefix() + filename);
     m.put(EMPTY_BYTES, EMPTY_BYTES, EMPTY_BYTES);
-    update(creds, m, RootTable.EXTENT);
+    update(context, m, RootTable.EXTENT);
 
     // then remove the old entry
     m = new Mutation(entry.getKey().getRow());
     m.putDelete(EMPTY_BYTES, EMPTY_BYTES);
-    update(creds, m, oldExtent);
+    update(context, m, oldExtent);
   }
 
   public static SortedMap<Text,SortedMap<ColumnFQ,Value>> getTabletEntries(SortedMap<Key,Value> tabletKeyValues, List<ColumnFQ> columns) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
index 002659d..82cc855 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
@@ -38,10 +38,11 @@ import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.SimpleThreadPool;
+import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tables.TableManager;
 import org.apache.hadoop.fs.Path;
 import org.apache.log4j.Logger;
@@ -54,8 +55,8 @@ public class RandomizeVolumes {
     opts.parseArgs(RandomizeVolumes.class.getName(), args);
     Connector c;
     if (opts.getToken() == null) {
-      SystemCredentials creds = SystemCredentials.get();
-      c = opts.getInstance().getConnector(creds.getPrincipal(), creds.getToken());
+      AccumuloServerContext context = new AccumuloServerContext(new ServerConfigurationFactory(opts.getInstance()));
+      c = context.getConnector();
     } else {
       c = opts.getConnector();
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java b/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
index 0b4f896..d5b586c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
@@ -30,11 +30,11 @@ import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.ClientConfiguration;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
@@ -45,6 +45,7 @@ import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
@@ -120,7 +121,7 @@ public class RemoveEntriesForMissingFiles {
     }
   }
 
-  private static int checkTable(Instance instance, String principal, AuthenticationToken token, String table, Range range, boolean fix) throws Exception {
+  private static int checkTable(ClientContext context, String table, Range range, boolean fix) throws Exception {
 
     @SuppressWarnings({"rawtypes"})
     Map cache = new LRUMap(100000);
@@ -130,7 +131,7 @@ public class RemoveEntriesForMissingFiles {
     System.out.printf("Scanning : %s %s\n", table, range);
 
     VolumeManager fs = VolumeManagerImpl.get();
-    Connector connector = instance.getConnector(principal, token);
+    Connector connector = context.getConnector();
     Scanner metadata = connector.createScanner(table, Authorizations.EMPTY);
     metadata.setRange(range);
     metadata.fetchColumnFamily(DataFileColumnFamily.NAME);
@@ -182,24 +183,24 @@ public class RemoveEntriesForMissingFiles {
     return missing.get();
   }
 
-  static int checkAllTables(Instance instance, String principal, AuthenticationToken token, boolean fix) throws Exception {
-    int missing = checkTable(instance, principal, token, RootTable.NAME, MetadataSchema.TabletsSection.getRange(), fix);
+  static int checkAllTables(ClientContext context, boolean fix) throws Exception {
+    int missing = checkTable(context, RootTable.NAME, MetadataSchema.TabletsSection.getRange(), fix);
 
     if (missing == 0)
-      return checkTable(instance, principal, token, MetadataTable.NAME, MetadataSchema.TabletsSection.getRange(), fix);
+      return checkTable(context, MetadataTable.NAME, MetadataSchema.TabletsSection.getRange(), fix);
     else
       return missing;
   }
 
-  static int checkTable(Instance instance, String principal, AuthenticationToken token, String tableName, boolean fix) throws Exception {
+  static int checkTable(ClientContext context, String tableName, boolean fix) throws Exception {
     if (tableName.equals(RootTable.NAME)) {
       throw new IllegalArgumentException("Can not check root table");
     } else if (tableName.equals(MetadataTable.NAME)) {
-      return checkTable(instance, principal, token, RootTable.NAME, MetadataSchema.TabletsSection.getRange(), fix);
+      return checkTable(context, RootTable.NAME, MetadataSchema.TabletsSection.getRange(), fix);
     } else {
-      String tableId = Tables.getTableId(instance, tableName);
+      String tableId = Tables.getTableId(context.getInstance(), tableName);
       Range range = new KeyExtent(new Text(tableId), null, null).toMetadataRange();
-      return checkTable(instance, principal, token, MetadataTable.NAME, range, fix);
+      return checkTable(context, MetadataTable.NAME, range, fix);
     }
   }
 
@@ -209,6 +210,6 @@ public class RemoveEntriesForMissingFiles {
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(RemoveEntriesForMissingFiles.class.getName(), args, scanOpts, bwOpts);
 
-    checkAllTables(opts.getInstance(), opts.principal, opts.getToken(), opts.fix);
+    checkAllTables(new ClientContext(opts.getInstance(), new Credentials(opts.principal, opts.getToken()), ClientConfiguration.loadDefault()), opts.fix);
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
index 8f0656c..344e245 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
@@ -26,11 +26,11 @@ import java.util.Map.Entry;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.IteratorSetting.Column;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.Writer;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.KeyExtent;
@@ -47,7 +47,6 @@ import org.apache.accumulo.core.replication.proto.Replication.Status;
 import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.tabletserver.thrift.ConstraintViolationException;
 import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.replication.StatusCombiner;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
@@ -81,21 +80,20 @@ public class ReplicationTableUtil {
     writers.put(creds, writer);
   }
 
-  synchronized static Writer getWriter(Credentials credentials) {
-    Writer replicationTable = writers.get(credentials);
+  synchronized static Writer getWriter(ClientContext context) {
+    Writer replicationTable = writers.get(context.getCredentials());
     if (replicationTable == null) {
-      Instance inst = HdfsZooInstance.getInstance();
       Connector conn;
       try {
-        conn = inst.getConnector(credentials.getPrincipal(), credentials.getToken());
+        conn = context.getConnector();
       } catch (AccumuloException | AccumuloSecurityException e) {
         throw new RuntimeException(e);
       }
 
       configureMetadataTable(conn, MetadataTable.NAME);
 
-      replicationTable = new Writer(inst, credentials, MetadataTable.ID);
-      writers.put(credentials, replicationTable);
+      replicationTable = new Writer(context, MetadataTable.ID);
+      writers.put(context.getCredentials(), replicationTable);
     }
     return replicationTable;
   }
@@ -156,8 +154,8 @@ public class ReplicationTableUtil {
   /**
    * Write the given Mutation to the replication table.
    */
-  static void update(Credentials credentials, Mutation m, KeyExtent extent) {
-    Writer t = getWriter(credentials);
+  static void update(ClientContext context, Mutation m, KeyExtent extent) {
+    Writer t = getWriter(context);
     while (true) {
       try {
         t.update(m);
@@ -178,7 +176,7 @@ public class ReplicationTableUtil {
   /**
    * Write replication ingest entries for each provided file with the given {@link Status}.
    */
-  public static void updateFiles(Credentials creds, KeyExtent extent, Collection<String> files, Status stat) {
+  public static void updateFiles(ClientContext context, KeyExtent extent, Collection<String> files, Status stat) {
     if (log.isDebugEnabled()) {
       log.debug("Updating replication status for " + extent + " with " + files + " using " + ProtobufUtil.toString(stat));
     }
@@ -190,7 +188,7 @@ public class ReplicationTableUtil {
     Value v = ProtobufUtil.toValue(stat);
     for (String file : files) {
       // TODO Can preclude this addition if the extent is for a table we don't need to replicate
-      update(creds, createUpdateMutation(new Path(file), v, extent), extent);
+      update(context, createUpdateMutation(new Path(file), v, extent), extent);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/base/src/main/java/org/apache/accumulo/server/util/TServerUtils.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TServerUtils.java b/server/base/src/main/java/org/apache/accumulo/server/util/TServerUtils.java
index 470394d..d30f101 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/TServerUtils.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/TServerUtils.java
@@ -28,7 +28,6 @@ import java.util.Random;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.ThreadPoolExecutor;
 
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.core.util.LoggingRunnable;
@@ -37,6 +36,7 @@ import org.apache.accumulo.core.util.SslConnectionParams;
 import org.apache.accumulo.core.util.TBufferedSocket;
 import org.apache.accumulo.core.util.ThriftUtil;
 import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.metrics.ThriftMetrics;
 import org.apache.accumulo.server.util.time.SimpleTimer;
 import org.apache.log4j.Logger;
@@ -83,22 +83,22 @@ public class TServerUtils {
    * @throws UnknownHostException
    *           when we don't know our own address
    */
-  public static ServerAddress startServer(AccumuloConfiguration conf, String address, Property portHintProperty, TProcessor processor, String serverName,
+  public static ServerAddress startServer(AccumuloServerContext service, String address, Property portHintProperty, TProcessor processor, String serverName,
       String threadName, Property portSearchProperty, Property minThreadProperty, Property timeBetweenThreadChecksProperty, Property maxMessageSizeProperty)
       throws UnknownHostException {
-    int portHint = conf.getPort(portHintProperty);
+    int portHint = service.getConfiguration().getPort(portHintProperty);
     int minThreads = 2;
     if (minThreadProperty != null)
-      minThreads = conf.getCount(minThreadProperty);
+      minThreads = service.getConfiguration().getCount(minThreadProperty);
     long timeBetweenThreadChecks = 1000;
     if (timeBetweenThreadChecksProperty != null)
-      timeBetweenThreadChecks = conf.getTimeInMillis(timeBetweenThreadChecksProperty);
+      timeBetweenThreadChecks = service.getConfiguration().getTimeInMillis(timeBetweenThreadChecksProperty);
     long maxMessageSize = 10 * 1000 * 1000;
     if (maxMessageSizeProperty != null)
-      maxMessageSize = conf.getMemoryInBytes(maxMessageSizeProperty);
+      maxMessageSize = service.getConfiguration().getMemoryInBytes(maxMessageSizeProperty);
     boolean portSearch = false;
     if (portSearchProperty != null)
-      portSearch = conf.getBoolean(portSearchProperty);
+      portSearch = service.getConfiguration().getBoolean(portSearchProperty);
     // create the TimedProcessor outside the port search loop so we don't try to register the same metrics mbean more than once
     TServerUtils.TimedProcessor timedProcessor = new TServerUtils.TimedProcessor(processor, serverName, threadName);
     Random random = new Random();
@@ -118,8 +118,8 @@ public class TServerUtils {
         try {
           HostAndPort addr = HostAndPort.fromParts(address, port);
           return TServerUtils.startTServer(addr, timedProcessor, serverName, threadName, minThreads,
-              conf.getCount(Property.GENERAL_SIMPLETIMER_THREADPOOL_SIZE), timeBetweenThreadChecks, maxMessageSize,
-              SslConnectionParams.forServer(conf), conf.getTimeInMillis(Property.GENERAL_RPC_TIMEOUT));
+              service.getConfiguration().getCount(Property.GENERAL_SIMPLETIMER_THREADPOOL_SIZE), timeBetweenThreadChecks, maxMessageSize,
+              service.getServerSslParams(), service.getClientTimeoutInMillis());
         } catch (TTransportException ex) {
           log.error("Unable to start TServer", ex);
           if (ex.getCause() == null || ex.getCause().getClass() == BindException.class) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java b/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
index 50d7014..9646991 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
@@ -30,11 +30,10 @@ import java.util.concurrent.TimeUnit;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.thrift.InitialMultiScan;
@@ -52,7 +51,6 @@ import org.apache.accumulo.core.trace.Tracer;
 import org.apache.accumulo.core.trace.thrift.TInfo;
 import org.apache.accumulo.core.util.ThriftUtil;
 import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 import org.apache.thrift.TException;
@@ -72,14 +70,15 @@ public class VerifyTabletAssignments {
     Opts opts = new Opts();
     opts.parseArgs(VerifyTabletAssignments.class.getName(), args);
     
+    ClientContext context = new ClientContext(opts.getInstance(), new Credentials(opts.principal, opts.getToken()), opts.getClientConfiguration());
     Connector conn = opts.getConnector();
     for (String table : conn.tableOperations().list())
-      checkTable(opts, table, null);
+      checkTable(context, opts, table, null);
     
   }
   
-  private static void checkTable(final Opts opts, String tableName, HashSet<KeyExtent> check) throws AccumuloException, AccumuloSecurityException,
-      TableNotFoundException, InterruptedException {
+  private static void checkTable(final ClientContext context, final Opts opts, String tableName, HashSet<KeyExtent> check) throws AccumuloException,
+      AccumuloSecurityException, TableNotFoundException, InterruptedException {
     
     if (check == null)
       System.out.println("Checking table " + tableName);
@@ -88,11 +87,8 @@ public class VerifyTabletAssignments {
     
     TreeMap<KeyExtent,String> tabletLocations = new TreeMap<KeyExtent,String>();
     
-    Connector conn = opts.getConnector();
-    final Instance inst = conn.getInstance();
-    String tableId = Tables.getNameToIdMap(inst).get(tableName);
-    Credentials credentials = new Credentials(opts.principal, opts.getToken());
-    MetadataServicer.forTableId(inst, credentials, tableId).getTabletLocations(tabletLocations);
+    String tableId = Tables.getNameToIdMap(context.getInstance()).get(tableName);
+    MetadataServicer.forTableId(context, tableId).getTabletLocations(tabletLocations);
     
     final HashSet<KeyExtent> failures = new HashSet<KeyExtent>();
     
@@ -119,14 +115,13 @@ public class VerifyTabletAssignments {
     }
     
     ExecutorService tp = Executors.newFixedThreadPool(20);
-    final ServerConfigurationFactory conf = new ServerConfigurationFactory(inst);
     for (final Entry<String,List<KeyExtent>> entry : extentsPerServer.entrySet()) {
       Runnable r = new Runnable() {
         
         @Override
         public void run() {
           try {
-            checkTabletServer(inst, conf.getConfiguration(), new Credentials(opts.principal, opts.getToken()), entry, failures);
+            checkTabletServer(context, entry, failures);
           } catch (Exception e) {
             log.error("Failure on tablet server '"+entry.getKey()+".", e);
             failures.addAll(entry.getValue());
@@ -143,7 +138,7 @@ public class VerifyTabletAssignments {
     while (!tp.awaitTermination(1, TimeUnit.HOURS)) {}
     
     if (failures.size() > 0)
-      checkTable(opts, tableName, failures);
+      checkTable(context, opts, tableName, failures);
   }
   
   private static void checkFailures(String server, HashSet<KeyExtent> failures, MultiScanResult scanResult) {
@@ -154,9 +149,9 @@ public class VerifyTabletAssignments {
     }
   }
   
-  private static void checkTabletServer(Instance inst, AccumuloConfiguration conf, Credentials creds, Entry<String,List<KeyExtent>> entry,
-      HashSet<KeyExtent> failures) throws ThriftSecurityException, TException, NoSuchScanIDException {
-    TabletClientService.Iface client = ThriftUtil.getTServerClient(entry.getKey(), conf);
+  private static void checkTabletServer(ClientContext context, Entry<String,List<KeyExtent>> entry, HashSet<KeyExtent> failures)
+      throws ThriftSecurityException, TException, NoSuchScanIDException {
+    TabletClientService.Iface client = ThriftUtil.getTServerClient(entry.getKey(), context);
     
     Map<TKeyExtent,List<TRange>> batch = new TreeMap<TKeyExtent,List<TRange>>();
     
@@ -190,7 +185,7 @@ public class VerifyTabletAssignments {
     Map<String,Map<String,String>> emptyMapSMapSS = Collections.emptyMap();
     List<IterInfo> emptyListIterInfo = Collections.emptyList();
     List<TColumn> emptyListColumn = Collections.emptyList();
-    InitialMultiScan is = client.startMultiScan(tinfo, creds.toThrift(inst), batch, emptyListColumn, emptyListIterInfo, emptyMapSMapSS,
+    InitialMultiScan is = client.startMultiScan(tinfo, context.rpcCreds(), batch, emptyListColumn, emptyListIterInfo, emptyMapSMapSS,
         Authorizations.EMPTY.getAuthorizationsBB(), false);
     if (is.result.more) {
       MultiScanResult result = client.continueMultiScan(tinfo, is.scanID);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/base/src/test/java/org/apache/accumulo/server/client/BulkImporterTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/client/BulkImporterTest.java b/server/base/src/test/java/org/apache/accumulo/server/client/BulkImporterTest.java
index 3680341..d12483c 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/client/BulkImporterTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/client/BulkImporterTest.java
@@ -25,10 +25,14 @@ import java.util.TreeSet;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.TabletLocator;
 import org.apache.accumulo.core.client.impl.TabletLocator.TabletLocation;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
@@ -63,19 +67,19 @@ public class BulkImporterTest {
     int invalidated = 0;
     
     @Override
-    public TabletLocation locateTablet(Credentials credentials, Text row, boolean skipRow, boolean retry) throws AccumuloException, AccumuloSecurityException,
+    public TabletLocation locateTablet(ClientContext context, Text row, boolean skipRow, boolean retry) throws AccumuloException, AccumuloSecurityException,
         TableNotFoundException {
       return new TabletLocation(fakeMetaData.tailSet(new KeyExtent(tableId, row, null)).first(), "localhost", "1");
     }
     
     @Override
-    public <T extends Mutation> void binMutations(Credentials credentials, List<T> mutations, Map<String,TabletServerMutations<T>> binnedMutations, List<T> failures)
-        throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    public <T extends Mutation> void binMutations(ClientContext context, List<T> mutations, Map<String,TabletServerMutations<T>> binnedMutations,
+        List<T> failures) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
       throw new NotImplementedException();
     }
     
     @Override
-    public List<Range> binRanges(Credentials credentials, List<Range> ranges, Map<String,Map<KeyExtent,List<Range>>> binnedRanges) throws AccumuloException,
+    public List<Range> binRanges(ClientContext context, List<Range> ranges, Map<String,Map<KeyExtent,List<Range>>> binnedRanges) throws AccumuloException,
         AccumuloSecurityException, TableNotFoundException {
       throw new NotImplementedException();
     }
@@ -96,20 +100,19 @@ public class BulkImporterTest {
     }
     
     @Override
-    public void invalidateCache(String server) {
+    public void invalidateCache(Instance instance, String server) {
       throw new NotImplementedException();
     }
   }
   
   @Test
   public void testFindOverlappingTablets() throws Exception {
-    Credentials credentials = null;
     MockTabletLocator locator = new MockTabletLocator();
     FileSystem fs = FileSystem.getLocal(CachedConfiguration.getInstance());
-    AccumuloConfiguration acuConf = AccumuloConfiguration.getDefaultConfiguration();
+    ClientContext context = new ClientContext(new MockInstance(), new Credentials("root", new PasswordToken("")), new ClientConfiguration());
     String file = "target/testFile.rf";
     fs.delete(new Path(file), true);
-    FileSKVWriter writer = FileOperations.getInstance().openWriter(file, fs, fs.getConf(), acuConf);
+    FileSKVWriter writer = FileOperations.getInstance().openWriter(file, fs, fs.getConf(), context.getConfiguration());
     writer.startDefaultLocalityGroup();
     Value empty = new Value(new byte[] {});
     writer.append(new Key("a", "cf", "cq"), empty);
@@ -133,8 +136,8 @@ public class BulkImporterTest {
     writer.append(new Key("iterator", "cf", "cq5"), empty);
     writer.append(new Key("xyzzy", "cf", "cq"), empty);
     writer.close();
-    VolumeManager vm = VolumeManagerImpl.get(acuConf);
-    List<TabletLocation> overlaps = BulkImporter.findOverlappingTablets(acuConf, vm, locator, new Path(file), credentials);
+    VolumeManager vm = VolumeManagerImpl.get(context.getConfiguration());
+    List<TabletLocation> overlaps = BulkImporter.findOverlappingTablets(context, vm, locator, new Path(file));
     Assert.assertEquals(5, overlaps.size());
     Collections.sort(overlaps);
     Assert.assertEquals(new KeyExtent(tableId, new Text("a"), null), overlaps.get(0).tablet_extent);
@@ -143,8 +146,8 @@ public class BulkImporterTest {
     Assert.assertEquals(new KeyExtent(tableId, new Text("j"), new Text("i")), overlaps.get(3).tablet_extent);
     Assert.assertEquals(new KeyExtent(tableId, null, new Text("l")), overlaps.get(4).tablet_extent);
     
-    List<TabletLocation> overlaps2 = BulkImporter.findOverlappingTablets(acuConf, vm, locator, new Path(file), new KeyExtent(tableId, new Text("h"), new Text(
-        "b")), credentials);
+    List<TabletLocation> overlaps2 = BulkImporter.findOverlappingTablets(context, vm, locator, new Path(file), new KeyExtent(tableId, new Text("h"), new Text(
+        "b")));
     Assert.assertEquals(3, overlaps2.size());
     Assert.assertEquals(new KeyExtent(tableId, new Text("d"), new Text("cm")), overlaps2.get(0).tablet_extent);
     Assert.assertEquals(new KeyExtent(tableId, new Text("dm"), new Text("d")), overlaps2.get(1).tablet_extent);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/base/src/test/java/org/apache/accumulo/server/conf/TableConfigurationTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/conf/TableConfigurationTest.java b/server/base/src/test/java/org/apache/accumulo/server/conf/TableConfigurationTest.java
index e66ec98..0ede571 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/conf/TableConfigurationTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/conf/TableConfigurationTest.java
@@ -21,7 +21,6 @@ import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.createMock;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 import static org.junit.Assert.assertEquals;
@@ -60,16 +59,16 @@ public class TableConfigurationTest {
   public void setUp() {
     iid = UUID.randomUUID().toString();
     instance = createMock(Instance.class);
+    expect(instance.getInstanceID()).andReturn(iid).anyTimes();
+    expect(instance.getZooKeepers()).andReturn(ZOOKEEPERS);
+    expect(instance.getZooKeepersSessionTimeOut()).andReturn(ZK_SESSION_TIMEOUT);
+    replay(instance);
+
     parent = createMock(NamespaceConfiguration.class);
-    c = new TableConfiguration(iid, instance, TID, parent);
+    c = new TableConfiguration(instance, TID, parent);
     zcf = createMock(ZooCacheFactory.class);
     c.setZooCacheFactory(zcf);
 
-    expect(instance.getInstanceID()).andReturn(iid);
-    expectLastCall().anyTimes();
-    expect(instance.getZooKeepers()).andReturn(ZOOKEEPERS);
-    expect(instance.getZooKeepersSessionTimeOut()).andReturn(ZK_SESSION_TIMEOUT);
-    replay(instance);
     zc = createMock(ZooCache.class);
     expect(zcf.getZooCache(eq(ZOOKEEPERS), eq(ZK_SESSION_TIMEOUT), anyObject(TableConfWatcher.class))).andReturn(zc);
     replay(zcf);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/base/src/test/java/org/apache/accumulo/server/master/balancer/TableLoadBalancerTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/master/balancer/TableLoadBalancerTest.java b/server/base/src/test/java/org/apache/accumulo/server/master/balancer/TableLoadBalancerTest.java
index 5c134a5..f107759 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/master/balancer/TableLoadBalancerTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/master/balancer/TableLoadBalancerTest.java
@@ -30,10 +30,14 @@ import org.apache.accumulo.core.client.admin.TableOperations;
 import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.DefaultConfiguration;
+import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
+import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.state.TabletMigration;
 import org.apache.hadoop.io.Text;
@@ -103,16 +107,6 @@ public class TableLoadBalancerTest {
       super();
     }
     
-    // need to use our mock instance
-    @Override
-    protected TableOperations getTableOperations() {
-      try {
-        return instance.getConnector("user", new PasswordToken("pass")).tableOperations();
-      } catch (Exception e) {
-        throw new RuntimeException(e);
-      }
-    }
-    
     // use our new classname to test class loading
     @Override
     protected String getLoadBalancerClassNameForTable(String table) {
@@ -129,6 +123,18 @@ public class TableLoadBalancerTest {
   @Test
   public void test() throws Exception {
     Connector c = instance.getConnector("user", new PasswordToken("pass"));
+    ServerConfigurationFactory confFactory = new ServerConfigurationFactory(instance) {
+      @Override
+      public TableConfiguration getTableConfiguration(String tableId) {
+        return new TableConfiguration(instance, tableId, null) {
+          @Override
+          public String get(Property property) {
+            // fake the get table configuration so the test doesn't try to look in zookeeper for per-table classpath stuff
+            return DefaultConfiguration.getInstance().get(property);
+          }
+        };
+      }
+    };
     TableOperations tops = c.tableOperations();
     tops.create("t1");
     tops.create("t2");
@@ -141,11 +147,13 @@ public class TableLoadBalancerTest {
     Set<KeyExtent> migrations = Collections.emptySet();
     List<TabletMigration> migrationsOut = new ArrayList<TabletMigration>();
     TableLoadBalancer tls = new TableLoadBalancer();
+    tls.init(confFactory);
     tls.balance(state, migrations, migrationsOut);
     Assert.assertEquals(0, migrationsOut.size());
     
     state.put(mkts("10.0.0.2", "0x02030405"), status());
     tls = new TableLoadBalancer();
+    tls.init(confFactory);
     tls.balance(state, migrations, migrationsOut);
     int count = 0;
     Map<String,Integer> movedByTable = new HashMap<String,Integer>();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/base/src/test/java/org/apache/accumulo/server/problems/ProblemReportingIteratorTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/problems/ProblemReportingIteratorTest.java b/server/base/src/test/java/org/apache/accumulo/server/problems/ProblemReportingIteratorTest.java
index 6c61469..2e0ad0c 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/problems/ProblemReportingIteratorTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/problems/ProblemReportingIteratorTest.java
@@ -28,11 +28,14 @@ import static org.junit.Assert.assertTrue;
 import java.util.Collection;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.system.InterruptibleIterator;
+import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -46,7 +49,8 @@ public class ProblemReportingIteratorTest {
   @Before
   public void setUp() throws Exception {
     ii = createMock(InterruptibleIterator.class);
-    pri = new ProblemReportingIterator(TABLE, RESOURCE, false, ii);
+    AccumuloServerContext context = new AccumuloServerContext(new ServerConfigurationFactory(new MockInstance()));
+    pri = new ProblemReportingIterator(context, TABLE, RESOURCE, false, ii);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java b/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
index c8610d5..4202a7e 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
@@ -23,8 +23,8 @@ import java.io.File;
 import java.io.IOException;
 import java.util.UUID;
 
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.ConnectorImpl;
+import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.security.SystemCredentials.SystemToken;
@@ -36,8 +36,11 @@ import org.junit.Test;
  */
 public class SystemCredentialsTest {
   
+  private static MockInstance inst;
+
   @BeforeClass
   public static void setUp() throws IOException {
+    inst = new MockInstance();
     File testInstanceId = new File(new File(new File(new File("target"), "instanceTest"), ServerConstants.INSTANCE_ID_DIR), UUID.fromString(
         "00000000-0000-0000-0000-000000000000").toString());
     if (!testInstanceId.exists()) {
@@ -53,20 +56,20 @@ public class SystemCredentialsTest {
   }
   
   /**
-   * This is a test to ensure the string literal in {@link ConnectorImpl#ConnectorImpl(Instance, Credentials)} is kept up-to-date if we move the
-   * {@link SystemToken}<br/>
+   * This is a test to ensure the string literal in {@link ConnectorImpl#ConnectorImpl(org.apache.accumulo.core.client.impl.ClientContext)} is kept up-to-date
+   * if we move the {@link SystemToken}<br/>
    * This check will not be needed after ACCUMULO-1578
    */
   @Test
   public void testSystemToken() {
     assertEquals("org.apache.accumulo.server.security.SystemCredentials$SystemToken", SystemToken.class.getName());
-    assertEquals(SystemCredentials.get().getToken().getClass(), SystemToken.class);
+    assertEquals(SystemCredentials.get(inst).getToken().getClass(), SystemToken.class);
   }
   
   @Test
   public void testSystemCredentials() {
-    Credentials a = SystemCredentials.get();
-    Credentials b = SystemCredentials.get();
-    assertTrue(a == b);
+    Credentials a = SystemCredentials.get(inst);
+    Credentials b = SystemCredentials.get(inst);
+    assertTrue(a.equals(b));
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java
index 88f13c9..4db171e 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java
@@ -31,11 +31,14 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.UUID;
 
+import org.apache.accumulo.core.client.ClientConfiguration;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.IteratorSetting.Column;
 import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.Writer;
+import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.ColumnUpdate;
@@ -71,6 +74,7 @@ public class ReplicationTableUtilTest {
 
     // Mock a Writer to just add the mutation to a list
     EasyMock.expectLastCall().andAnswer(new IAnswer<Object>() {
+      @Override
       public Object answer() {
         mutations.add(((Mutation) EasyMock.getCurrentArguments()[0]));
         return null;
@@ -80,6 +84,7 @@ public class ReplicationTableUtilTest {
     EasyMock.replay(writer);
 
     Credentials creds = new Credentials("root", new PasswordToken(""));
+    ClientContext context = new ClientContext(new MockInstance(), creds, new ClientConfiguration());
 
     // Magic hook to create a Writer
     ReplicationTableUtil.addWriter(creds, writer);
@@ -89,7 +94,7 @@ public class ReplicationTableUtilTest {
     String myFile = "file:////home/user/accumulo/wal/server+port/" + uuid;
 
     long createdTime = System.currentTimeMillis();
-    ReplicationTableUtil.updateFiles(creds, new KeyExtent(new Text("1"), null, null), Collections.singleton(myFile), StatusUtil.fileCreated(createdTime));
+    ReplicationTableUtil.updateFiles(context, new KeyExtent(new Text("1"), null, null), Collections.singleton(myFile), StatusUtil.fileCreated(createdTime));
 
     verify(writer);
 
@@ -144,7 +149,7 @@ public class ReplicationTableUtilTest {
     tops.attachIterator(myMetadataTable, combiner);
     expectLastCall().once();
 
-    expect(tops.getProperties(myMetadataTable)).andReturn((Iterable<Entry<String,String>>) Collections.<Entry<String,String>> emptyList());
+    expect(tops.getProperties(myMetadataTable)).andReturn(Collections.<Entry<String,String>> emptyList());
     tops.setProperty(myMetadataTable, Property.TABLE_FORMATTER_CLASS.getKey(), ReplicationTableUtil.STATUS_FORMATTER_CLASS_NAME);
     expectLastCall().once();
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/42c25faa/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
----------------------------------------------------------------------
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
index 4a9dc3e..774548e 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
@@ -35,7 +35,6 @@ import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
@@ -50,7 +49,6 @@ import org.apache.accumulo.core.replication.ReplicationTableOfflineException;
 import org.apache.accumulo.core.replication.StatusUtil;
 import org.apache.accumulo.core.replication.proto.Replication.Status;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client;
@@ -60,10 +58,9 @@ import org.apache.accumulo.core.trace.Tracer;
 import org.apache.accumulo.core.util.AddressUtil;
 import org.apache.accumulo.core.util.ThriftUtil;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.server.AccumuloServerContext;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
 import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.hadoop.fs.FileStatus;
@@ -80,7 +77,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
 public class GarbageCollectWriteAheadLogs {
   private static final Logger log = LoggerFactory.getLogger(GarbageCollectWriteAheadLogs.class);
 
-  private final Instance instance;
+  private final AccumuloServerContext context;
   private final VolumeManager fs;
 
   private boolean useTrash;
@@ -88,15 +85,15 @@ public class GarbageCollectWriteAheadLogs {
   /**
    * Creates a new GC WAL object.
    *
-   * @param instance
-   *          instance to use
+   * @param context
+   *          the collection server's context
    * @param fs
    *          volume manager to use
    * @param useTrash
    *          true to move files to trash rather than delete them
    */
-  GarbageCollectWriteAheadLogs(Instance instance, VolumeManager fs, boolean useTrash) throws IOException {
-    this.instance = instance;
+  GarbageCollectWriteAheadLogs(AccumuloServerContext context, VolumeManager fs, boolean useTrash) throws IOException {
+    this.context = context;
     this.fs = fs;
     this.useTrash = useTrash;
   }
@@ -107,7 +104,7 @@ public class GarbageCollectWriteAheadLogs {
    * @return instance
    */
   Instance getInstance() {
-    return instance;
+    return context.getInstance();
   }
 
   /**
@@ -148,7 +145,7 @@ public class GarbageCollectWriteAheadLogs {
 
       span = Trace.start("removeMetadataEntries");
       try {
-        count = removeMetadataEntries(nameToFileMap, sortedWALogs, status, SystemCredentials.get());
+        count = removeMetadataEntries(nameToFileMap, sortedWALogs, status);
       } catch (Exception ex) {
         log.error("Unable to scan metadata table", ex);
         return;
@@ -161,7 +158,7 @@ public class GarbageCollectWriteAheadLogs {
 
       span = Trace.start("removeReplicationEntries");
       try {
-        count = removeReplicationEntries(nameToFileMap, sortedWALogs, status, SystemCredentials.get());
+        count = removeReplicationEntries(nameToFileMap, sortedWALogs, status);
       } catch (Exception ex) {
         log.error("Unable to scan replication table", ex);
         return;
@@ -193,7 +190,7 @@ public class GarbageCollectWriteAheadLogs {
 
   boolean holdsLock(HostAndPort addr) {
     try {
-      String zpath = ZooUtil.getRoot(instance) + Constants.ZTSERVERS + "/" + addr.toString();
+      String zpath = ZooUtil.getRoot(context.getInstance()) + Constants.ZTSERVERS + "/" + addr.toString();
       List<String> children = ZooReaderWriter.getInstance().getChildren(zpath);
       return !(children == null || children.isEmpty());
     } catch (KeeperException.NoNodeException ex) {
@@ -205,7 +202,6 @@ public class GarbageCollectWriteAheadLogs {
   }
 
   private int removeFiles(Map<String,Path> nameToFileMap, Map<String,ArrayList<Path>> serverToFileMap, Map<String,Path> sortedWALogs, final GCStatus status) {
-    AccumuloConfiguration conf = new ServerConfigurationFactory(instance).getConfiguration();
     for (Entry<String,ArrayList<Path>> entry : serverToFileMap.entrySet()) {
       if (entry.getKey().isEmpty()) {
         // old-style log entry, just remove it
@@ -240,8 +236,8 @@ public class GarbageCollectWriteAheadLogs {
         } else {
           Client tserver = null;
           try {
-            tserver = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, conf);
-            tserver.removeLogs(Tracer.traceInfo(), SystemCredentials.get().toThrift(instance), paths2strings(entry.getValue()));
+            tserver = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, context);
+            tserver.removeLogs(Tracer.traceInfo(), context.rpcCreds(), paths2strings(entry.getValue()));
             log.debug("deleted " + entry.getValue() + " from " + entry.getKey());
             status.currentLog.deleted += entry.getValue().size();
           } catch (TException e) {
@@ -315,10 +311,10 @@ public class GarbageCollectWriteAheadLogs {
     return result;
   }
 
-  protected int removeMetadataEntries(Map<String,Path> nameToFileMap, Map<String,Path> sortedWALogs, GCStatus status, Credentials creds) throws IOException,
-      KeeperException, InterruptedException {
+  protected int removeMetadataEntries(Map<String,Path> nameToFileMap, Map<String,Path> sortedWALogs, GCStatus status) throws IOException, KeeperException,
+      InterruptedException {
     int count = 0;
-    Iterator<LogEntry> iterator = MetadataTableUtil.getLogEntries(creds);
+    Iterator<LogEntry> iterator = MetadataTableUtil.getLogEntries(context);
 
     // For each WAL reference in the metadata table
     while (iterator.hasNext()) {
@@ -345,11 +341,11 @@ public class GarbageCollectWriteAheadLogs {
     return count;
   }
 
-  protected int removeReplicationEntries(Map<String,Path> nameToFileMap, Map<String,Path> sortedWALogs, GCStatus status, Credentials creds) throws IOException,
-      KeeperException, InterruptedException {
+  protected int removeReplicationEntries(Map<String,Path> nameToFileMap, Map<String,Path> sortedWALogs, GCStatus status) throws IOException, KeeperException,
+      InterruptedException {
     Connector conn;
     try {
-      conn = instance.getConnector(creds.getPrincipal(), creds.getToken());
+      conn = context.getConnector();
     } catch (AccumuloException | AccumuloSecurityException e) {
       log.error("Failed to get connector", e);
       throw new IllegalArgumentException(e);


Mime
View raw message