accumulo-notifications mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From GitBox <...@apache.org>
Subject [GitHub] mikewalch closed pull request #28: Updated examples to work with Accumulo 2.0.0-alpha-1
Date Fri, 26 Oct 2018 20:10:59 GMT
mikewalch closed pull request #28: Updated examples to work with Accumulo 2.0.0-alpha-1
URL: https://github.com/apache/accumulo-examples/pull/28
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/.travis.yml b/.travis.yml
index 8b4786b..42dc0da 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -13,19 +13,10 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 language: java
-notifications:
-  irc:
-    channels:
-      - "chat.freenode.net#accumulo"
-    use_notice: true
-    on_success: change
-    on_failure: always
-    template:
-      - "%{result} %{repository_slug} %{branch} (%{build_url}): %{message}"
 cache:
   directories:
     - $HOME/.m2
 jdk:
-  - oraclejdk8
+  - openjdk8
 install: true
 script: mvn clean verify -DskipITs
diff --git a/README.md b/README.md
index dbea091..d3a8c74 100644
--- a/README.md
+++ b/README.md
@@ -16,6 +16,8 @@ limitations under the License.
 -->
 # Apache Accumulo Examples
 
+[![Build Status][ti]][tl]
+
 ## Setup instructions
 
 Before running any of the examples, the following steps must be performed.
@@ -124,3 +126,5 @@ This repository can be used to test Accumulo release candidates.  See
 [tabletofile]: docs/tabletofile.md
 [terasort]: docs/terasort.md
 [visibility]: docs/visibility.md
+[ti]: https://travis-ci.org/apache/accumulo-examples.svg?branch=master
+[tl]: https://travis-ci.org/apache/accumulo-examples
diff --git a/pom.xml b/pom.xml
index 9f5f986..1e3fe68 100644
--- a/pom.xml
+++ b/pom.xml
@@ -33,8 +33,8 @@
   <description>Example code and corresponding documentation for using Apache Accumulo</description>
 
   <properties>
-    <accumulo.version>2.0.0-SNAPSHOT</accumulo.version>
-    <hadoop.version>2.6.4</hadoop.version>
+    <accumulo.version>2.0.0-alpha-1</accumulo.version>
+    <hadoop.version>3.1.1</hadoop.version>
     <slf4j.version>1.7.21</slf4j.version>
     <maven.compiler.source>1.8</maven.compiler.source>
     <maven.compiler.target>1.8</maven.compiler.target>
@@ -45,7 +45,7 @@
       <dependency>
         <groupId>com.google.guava</groupId>
         <artifactId>guava</artifactId>
-        <version>14.0.1</version>
+        <version>26.0-jre</version>
       </dependency>
     </dependencies>
   </dependencyManagement>
@@ -131,17 +131,17 @@
     <dependency>
       <groupId>com.beust</groupId>
       <artifactId>jcommander</artifactId>
-      <version>1.48</version>
+      <version>1.72</version>
     </dependency>
     <dependency>
       <groupId>com.google.guava</groupId>
       <artifactId>guava</artifactId>
-      <version>14.0.1</version>
+      <version>26.0-jre</version>
     </dependency>
     <dependency>
       <groupId>commons-configuration</groupId>
       <artifactId>commons-configuration</artifactId>
-      <version>1.6</version>
+      <version>1.10</version>
     </dependency>
     <dependency>
       <groupId>jline</groupId>
diff --git a/src/main/java/org/apache/accumulo/examples/bloom/BloomBatchScanner.java b/src/main/java/org/apache/accumulo/examples/bloom/BloomBatchScanner.java
index 8aeaf12..feb690a 100644
--- a/src/main/java/org/apache/accumulo/examples/bloom/BloomBatchScanner.java
+++ b/src/main/java/org/apache/accumulo/examples/bloom/BloomBatchScanner.java
@@ -23,10 +23,11 @@
 import java.util.Map.Entry;
 import java.util.Random;
 
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
@@ -39,13 +40,13 @@
 public class BloomBatchScanner {
 
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
 
-    scan(connector, "bloom_test1", 7);
-    scan(connector, "bloom_test2", 7);
+    scan(client, "bloom_test1", 7);
+    scan(client, "bloom_test2", 7);
   }
 
-  static void scan(Connector connector, String tableName, int seed) throws TableNotFoundException {
+  static void scan(AccumuloClient client, String tableName, int seed) throws TableNotFoundException {
     Random r = new Random(seed);
     HashSet<Range> ranges = new HashSet<>();
     HashMap<String,Boolean> expectedRows = new HashMap<>();
@@ -61,7 +62,7 @@ static void scan(Connector connector, String tableName, int seed) throws TableNo
     long lookups = ranges.size();
 
     System.out.println("Scanning " + tableName + " with seed " + seed);
-    try (BatchScanner scan = connector.createBatchScanner(tableName, Authorizations.EMPTY, 20)) {
+    try (BatchScanner scan = client.createBatchScanner(tableName, Authorizations.EMPTY, 20)) {
       scan.setRanges(ranges);
       for (Entry<Key, Value> entry : scan) {
         Key key = entry.getKey();
diff --git a/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java b/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java
index 4dd2ed8..f46f3c0 100644
--- a/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java
+++ b/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java
@@ -18,10 +18,11 @@
 
 import java.util.Random;
 
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
@@ -32,41 +33,41 @@
 public class BloomFilters {
 
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
     try {
       System.out.println("Creating bloom_test1 and bloom_test2");
-      connector.tableOperations().create("bloom_test1");
-      connector.tableOperations().setProperty("bloom_test1", "table.compaction.major.ratio", "7");
-      connector.tableOperations().create("bloom_test2");
-      connector.tableOperations().setProperty("bloom_test2", "table.bloom.enabled", "true");
-      connector.tableOperations().setProperty("bloom_test2", "table.compaction.major.ratio", "7");
+      client.tableOperations().create("bloom_test1");
+      client.tableOperations().setProperty("bloom_test1", "table.compaction.major.ratio", "7");
+      client.tableOperations().create("bloom_test2");
+      client.tableOperations().setProperty("bloom_test2", "table.bloom.enabled", "true");
+      client.tableOperations().setProperty("bloom_test2", "table.compaction.major.ratio", "7");
     } catch (TableExistsException e) {
       // ignore
     }
 
     // Write a million rows 3 times flushing files to disk separately
     System.out.println("Writing data to bloom_test1");
-    writeData(connector, "bloom_test1", 7);
-    connector.tableOperations().flush("bloom_test1", null, null, true);
-    writeData(connector, "bloom_test1", 8);
-    connector.tableOperations().flush("bloom_test1", null, null, true);
-    writeData(connector, "bloom_test1", 9);
-    connector.tableOperations().flush("bloom_test1", null, null, true);
+    writeData(client, "bloom_test1", 7);
+    client.tableOperations().flush("bloom_test1", null, null, true);
+    writeData(client, "bloom_test1", 8);
+    client.tableOperations().flush("bloom_test1", null, null, true);
+    writeData(client, "bloom_test1", 9);
+    client.tableOperations().flush("bloom_test1", null, null, true);
 
     System.out.println("Writing data to bloom_test2");
-    writeData(connector, "bloom_test2", 7);
-    connector.tableOperations().flush("bloom_test2", null, null, true);
-    writeData(connector, "bloom_test2", 8);
-    connector.tableOperations().flush("bloom_test2", null, null, true);
-    writeData(connector, "bloom_test2", 9);
-    connector.tableOperations().flush("bloom_test2", null, null, true);
+    writeData(client, "bloom_test2", 7);
+    client.tableOperations().flush("bloom_test2", null, null, true);
+    writeData(client, "bloom_test2", 8);
+    client.tableOperations().flush("bloom_test2", null, null, true);
+    writeData(client, "bloom_test2", 9);
+    client.tableOperations().flush("bloom_test2", null, null, true);
   }
 
   // write a million random rows
-  static void writeData(Connector connector, String tableName, int seed) throws TableNotFoundException,
+  static void writeData(AccumuloClient client, String tableName, int seed) throws TableNotFoundException,
         MutationsRejectedException{
     Random r = new Random(seed);
-    try (BatchWriter bw = connector.createBatchWriter(tableName)) {
+    try (BatchWriter bw = client.createBatchWriter(tableName)) {
       for (int x = 0; x < 1_000_000; x++) {
         Long rowId = RandomBatchWriter.abs(r.nextLong()) % 1_000_000_000;
         Mutation m = RandomBatchWriter.createMutation(rowId, 50, new ColumnVisibility());
diff --git a/src/main/java/org/apache/accumulo/examples/bloom/BloomFiltersNotFound.java b/src/main/java/org/apache/accumulo/examples/bloom/BloomFiltersNotFound.java
index 21a8738..1edc2c4 100644
--- a/src/main/java/org/apache/accumulo/examples/bloom/BloomFiltersNotFound.java
+++ b/src/main/java/org/apache/accumulo/examples/bloom/BloomFiltersNotFound.java
@@ -18,30 +18,31 @@
 
 import static org.apache.accumulo.examples.bloom.BloomFilters.writeData;
 
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 
 public class BloomFiltersNotFound {
 
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
     try {
-      connector.tableOperations().create("bloom_test3");
-      connector.tableOperations().create("bloom_test4");
-      connector.tableOperations().setProperty("bloom_test4", "table.bloom.enabled", "true");
+      client.tableOperations().create("bloom_test3");
+      client.tableOperations().create("bloom_test4");
+      client.tableOperations().setProperty("bloom_test4", "table.bloom.enabled", "true");
     } catch (TableExistsException e) {
       // ignore
     }
     System.out.println("Writing data to bloom_test3 and bloom_test4 (bloom filters enabled)");
-    writeData(connector, "bloom_test3", 7);
-    connector.tableOperations().flush("bloom_test3", null, null, true);
-    writeData(connector, "bloom_test4", 7);
-    connector.tableOperations().flush("bloom_test4", null, null, true);
+    writeData(client, "bloom_test3", 7);
+    client.tableOperations().flush("bloom_test3", null, null, true);
+    writeData(client, "bloom_test4", 7);
+    client.tableOperations().flush("bloom_test4", null, null, true);
 
-    BloomBatchScanner.scan(connector, "bloom_test3", 8);
-    BloomBatchScanner.scan(connector, "bloom_test4", 8);
+    BloomBatchScanner.scan(client, "bloom_test3", 8);
+    BloomBatchScanner.scan(client, "bloom_test4", 8);
   }
 }
diff --git a/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java b/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java
index 4df1eae..40b5cfa 100644
--- a/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java
+++ b/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java
@@ -19,18 +19,14 @@
 import java.io.File;
 import java.time.Duration;
 
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.commons.configuration.Configuration;
-import org.apache.commons.configuration.ConfigurationException;
-import org.apache.commons.configuration.PropertiesConfiguration;
 
 import com.beust.jcommander.IStringConverter;
 import com.beust.jcommander.Parameter;
@@ -98,40 +94,38 @@ public File convert(String filename) {
   }
 
   @Parameter(names = {"-c", "--conf"}, required = true, converter = PropertiesConverter.class,
-      description = "Config file for connecting to Accumulo.  See README.md for details.")
+      description = "Accumulo client properties file.  See README.md for details.")
   private File config = null;
 
   @Parameter(names = {"-auths", "--auths"}, converter = AuthConverter.class, description = "the authorizations to use when reading or writing")
   public Authorizations auths = Authorizations.EMPTY;
 
-  public Connector getConnector() {
-    try {
-      ZooKeeperInstance zki = new ZooKeeperInstance(getClientConfiguration());
-      return zki.getConnector(getPrincipal(), getToken());
-    } catch (AccumuloException | AccumuloSecurityException e) {
-      throw new RuntimeException(e);
+  private ClientInfo cachedInfo = null;
+  private AccumuloClient cachedAccumuloClient = null;
+
+  public AccumuloClient getAccumuloClient() {
+    if (cachedAccumuloClient == null) {
+      try {
+        cachedAccumuloClient = Accumulo.newClient().usingClientInfo(getClientInfo()).build();
+      } catch (AccumuloException|AccumuloSecurityException e) {
+        throw new IllegalArgumentException(e);
+      }
     }
+    return cachedAccumuloClient;
   }
 
-  public ClientConfiguration getClientConfiguration() {
-    return ClientConfiguration.fromFile(config);
+  public ClientInfo getClientInfo() {
+    if (cachedInfo == null) {
+      cachedInfo = Accumulo.newClient().usingProperties(config.getAbsolutePath()).info();
+    }
+    return cachedInfo;
   }
 
   public String getPrincipal() {
-    String user = getClientConfiguration().getString("accumulo.examples.principal");
-    if(user != null)
-      return user;
-
-    return "root";
+    return getClientInfo().getPrincipal();
   }
 
   public AuthenticationToken getToken() {
-    AuthenticationToken token = new PasswordToken("secret");
-    String password = getClientConfiguration().getString("accumulo.examples.password");
-    if(password != null){
-      token = new PasswordToken(password);
-    }
-
-    return token;
+    return getClientInfo().getAuthenticationToken();
   }
 }
diff --git a/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOpts.java b/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOpts.java
index b73ddd4..87b296f 100644
--- a/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOpts.java
+++ b/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOpts.java
@@ -16,11 +16,10 @@
  */
 package org.apache.accumulo.examples.cli;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
-import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.KerberosToken;
 import org.apache.accumulo.core.security.SystemPermission;
@@ -36,8 +35,8 @@
   private static final Logger log = LoggerFactory.getLogger(MapReduceClientOpts.class);
 
   public void setAccumuloConfigs(Job job) throws AccumuloSecurityException {
-    AccumuloInputFormat.setZooKeeperInstance(job, this.getClientConfiguration());
-    AccumuloOutputFormat.setZooKeeperInstance(job, this.getClientConfiguration());
+    AccumuloInputFormat.setClientInfo(job, this.getClientInfo());
+    AccumuloInputFormat.setClientInfo(job, this.getClientInfo());
   }
 
   @Override
@@ -57,19 +56,19 @@ public AuthenticationToken getToken() {
         String newPrincipal = user.getUserName();
         log.info("Obtaining delegation token for {}", newPrincipal);
 
-        Connector conn = getConnector();
+        AccumuloClient client = getAccumuloClient();
 
         // Do the explicit check to see if the user has the permission to get a delegation token
-        if (!conn.securityOperations().hasSystemPermission(conn.whoami(), SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
+        if (!client.securityOperations().hasSystemPermission(client.whoami(), SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
           log.error(
               "{} doesn't have the {} SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's"
                   + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.",
               user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name());
-          throw new IllegalStateException(conn.whoami() + " does not have permission to obtain a delegation token");
+          throw new IllegalStateException(client.whoami() + " does not have permission to obtain a delegation token");
         }
 
         // Get the delegation token from Accumulo
-        return conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+        return client.securityOperations().getDelegationToken(new DelegationTokenConfig());
       } catch (Exception e) {
         final String msg = "Failed to acquire DelegationToken for use with MapReduce";
         log.error(msg, e);
diff --git a/src/main/java/org/apache/accumulo/examples/client/CountingVerifyingReceiver.java b/src/main/java/org/apache/accumulo/examples/client/CountingVerifyingReceiver.java
index ac5eb11..092144d 100644
--- a/src/main/java/org/apache/accumulo/examples/client/CountingVerifyingReceiver.java
+++ b/src/main/java/org/apache/accumulo/examples/client/CountingVerifyingReceiver.java
@@ -23,7 +23,6 @@
 
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.io.Text;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
diff --git a/src/main/java/org/apache/accumulo/examples/client/Flush.java b/src/main/java/org/apache/accumulo/examples/client/Flush.java
index ea183d2..9827885 100644
--- a/src/main/java/org/apache/accumulo/examples/client/Flush.java
+++ b/src/main/java/org/apache/accumulo/examples/client/Flush.java
@@ -16,7 +16,7 @@
  */
 package org.apache.accumulo.examples.client;
 
-import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
 
 /**
@@ -28,8 +28,8 @@ public static void main(String[] args) {
     ClientOnRequiredTable opts = new ClientOnRequiredTable();
     opts.parseArgs(Flush.class.getName(), args);
     try {
-      Connector connector = opts.getConnector();
-      connector.tableOperations().flush(opts.getTableName(), null, null, true);
+      AccumuloClient client = opts.getAccumuloClient();
+      client.tableOperations().flush(opts.getTableName(), null, null, true);
     } catch (Exception e) {
       throw new RuntimeException(e);
     }
diff --git a/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java b/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java
index 9a88d39..60af086 100644
--- a/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java
+++ b/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java
@@ -25,16 +25,19 @@
 import java.util.Map.Entry;
 import java.util.Random;
 
+import com.beust.jcommander.Parameter;
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.examples.cli.Help;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -45,10 +48,18 @@
 
   private static final Logger log = LoggerFactory.getLogger(RandomBatchScanner.class);
 
+  static class Opts extends Help {
+    @Parameter(names = "-c")
+    String clientProps = "conf/accumulo-client.properties";
+  }
+
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    Opts opts = new Opts();
+    opts.parseArgs(RandomBatchScanner.class.getName(), args);
+
+    AccumuloClient client = Accumulo.newClient().usingProperties(opts.clientProps).build();
     try {
-      connector.tableOperations().create("batch");
+      client.tableOperations().create("batch");
     } catch (TableExistsException e) {
       // ignore
     }
@@ -70,7 +81,7 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
     long lookups = 0;
 
     log.info("Reading ranges using BatchScanner");
-    try (BatchScanner scan = connector.createBatchScanner("batch", Authorizations.EMPTY, 20)) {
+    try (BatchScanner scan = client.createBatchScanner("batch", Authorizations.EMPTY, 20)) {
       scan.setRanges(ranges);
       for (Entry<Key, Value> entry : scan) {
         Key key = entry.getKey();
diff --git a/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java b/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java
index 20f2f94..c2dc295 100644
--- a/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java
+++ b/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java
@@ -22,10 +22,10 @@
 import java.util.Random;
 import java.util.Set;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.security.SecurityErrorCode;
@@ -136,8 +136,8 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
     else {
       r = new Random(opts.seed);
     }
-    Connector connector = opts.getConnector();
-    BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
+    AccumuloClient client = opts.getAccumuloClient();
+    BatchWriter bw = client.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
 
     // reuse the ColumnVisibility object to improve performance
     ColumnVisibility cv = opts.visiblity;
diff --git a/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java b/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java
index 0fcb4ce..e19c866 100644
--- a/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java
+++ b/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java
@@ -18,8 +18,10 @@
 
 import java.util.Map.Entry;
 
+import com.beust.jcommander.Parameter;
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.NamespaceExistsException;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableExistsException;
@@ -27,6 +29,7 @@
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.examples.cli.Help;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,23 +39,30 @@
   private static final String namespace = "examples";
   private static final String table = namespace + ".readwrite";
 
+  static class Opts extends Help {
+    @Parameter(names = "-c")
+    String clientProps = "conf/accumulo-client.properties";
+  }
+
   public static void main(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(ReadWriteExample.class.getName(), args);
 
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties(opts.clientProps).build();
 
     try {
-      connector.namespaceOperations().create(namespace);
+      client.namespaceOperations().create(namespace);
     } catch (NamespaceExistsException e) {
       // ignore
     }
     try {
-      connector.tableOperations().create(table);
+      client.tableOperations().create(table);
     } catch (TableExistsException e) {
       // ignore
     }
 
     // write data
-    try (BatchWriter writer = connector.createBatchWriter(table)) {
+    try (BatchWriter writer = client.createBatchWriter(table)) {
       for (int i = 0; i < 10; i++) {
         Mutation m = new Mutation("hello" + i);
         m.put("cf", "cq", new Value("world" + i));
@@ -61,13 +71,13 @@ public static void main(String[] args) throws Exception {
     }
 
     // read data
-    try (Scanner scanner = connector.createScanner(table, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
       for (Entry<Key, Value> entry : scanner) {
         log.info(entry.getKey().toString() + " -> " + entry.getValue().toString());
       }
     }
 
     // delete table
-    connector.tableOperations().delete(table);
+    client.tableOperations().delete(table);
   }
 }
diff --git a/src/main/java/org/apache/accumulo/examples/client/RowOperations.java b/src/main/java/org/apache/accumulo/examples/client/RowOperations.java
index 7787c13..8a256a2 100644
--- a/src/main/java/org/apache/accumulo/examples/client/RowOperations.java
+++ b/src/main/java/org/apache/accumulo/examples/client/RowOperations.java
@@ -18,10 +18,12 @@
 
 import java.util.Map.Entry;
 
+import com.beust.jcommander.Parameter;
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.NamespaceExistsException;
 import org.apache.accumulo.core.client.Scanner;
@@ -32,6 +34,7 @@
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.examples.cli.Help;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -44,16 +47,16 @@
   private static final String namespace = "examples";
   private static final String table = namespace + ".rowops";
 
-  private static void printAll(Connector connector) throws TableNotFoundException {
-    try (Scanner scanner = connector.createScanner("rowops", Authorizations.EMPTY)) {
+  private static void printAll(AccumuloClient client) throws TableNotFoundException {
+    try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
       for (Entry<Key,Value> entry : scanner) {
         log.info("Key: " + entry.getKey().toString() + " Value: " + entry.getValue().toString());
       }
     }
   }
 
-  private static void printRow(String row, Connector connector) throws TableNotFoundException {
-    try (Scanner scanner = connector.createScanner("rowops", Authorizations.EMPTY)) {
+  private static void printRow(String row, AccumuloClient client) throws TableNotFoundException {
+    try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
       scanner.setRange(Range.exact(row));
       for (Entry<Key,Value> entry : scanner) {
         log.info("Key: " + entry.getKey().toString() + " Value: " + entry.getValue().toString());
@@ -61,9 +64,9 @@ private static void printRow(String row, Connector connector) throws TableNotFou
     }
   }
 
-  private static void deleteRow(String row, Connector connector, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException {
+  private static void deleteRow(String row, AccumuloClient client, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException {
     Mutation mut = new Mutation(row);
-    try (Scanner scanner = connector.createScanner("rowops", Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
       scanner.setRange(Range.exact(row));
       for (Entry<Key,Value> entry : scanner) {
         mut.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
@@ -73,15 +76,23 @@ private static void deleteRow(String row, Connector connector, BatchWriter bw) t
     bw.flush();
   }
 
+  static class Opts extends Help {
+    @Parameter(names = "-c")
+    String clientProps = "conf/accumulo-client.properties";
+  }
+
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    Opts opts = new Opts();
+    opts.parseArgs(RowOperations.class.getName(), args);
+
+    AccumuloClient client = Accumulo.newClient().usingProperties(opts.clientProps).build();
     try {
-      connector.namespaceOperations().create(namespace);
+      client.namespaceOperations().create(namespace);
     } catch (NamespaceExistsException e) {
       // ignore
     }
     try {
-      connector.tableOperations().create(table);
+      client.tableOperations().create(table);
     } catch (TableExistsException e) {
       // ignore
     }
@@ -104,7 +115,7 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
     mut3.put("col", "3", "v3");
 
     // Now we'll make a Batch Writer
-    try (BatchWriter bw = connector.createBatchWriter(table)) {
+    try (BatchWriter bw = client.createBatchWriter(table)) {
 
       // And add the mutations
       bw.addMutation(mut1);
@@ -115,22 +126,22 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
       bw.flush();
 
       log.info("This is only row2");
-      printRow("row2", connector);
+      printRow("row2", client);
 
       log.info("This is everything");
-      printAll(connector);
+      printAll(client);
 
-      deleteRow("row2", connector, bw);
+      deleteRow("row2", client, bw);
 
       log.info("This is row1 and row3");
-      printAll(connector);
+      printAll(client);
 
-      deleteRow("row1", connector, bw);
+      deleteRow("row1", client, bw);
    }
 
     log.info("This is just row3");
-    printAll(connector);
+    printAll(client);
 
-    connector.tableOperations().delete(table);
+    client.tableOperations().delete(table);
   }
 }
diff --git a/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java b/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java
index 6f630f9..a7ffc67 100644
--- a/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java
+++ b/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java
@@ -18,15 +18,18 @@
 
 import java.util.Random;
 
+import com.beust.jcommander.Parameter;
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 
+import org.apache.accumulo.examples.cli.Help;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -51,19 +54,27 @@ public static Value createValue(long rowId) {
     return new Value(value);
   }
 
+  static class Opts extends Help {
+    @Parameter(names = "-c")
+    String clientProps = "conf/accumulo-client.properties";
+  }
+
   /**
    * Writes 1000 entries to Accumulo using a {@link BatchWriter}. The rows of the entries will be sequential starting from 0.
    * The column families will be "foo" and column qualifiers will be "1". The values will be random 50 byte arrays.
    */
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    Opts opts = new Opts();
+    opts.parseArgs(SequentialBatchWriter.class.getName(), args);
+
+    AccumuloClient client = Accumulo.newClient().usingProperties(opts.clientProps).build();
     try {
-      connector.tableOperations().create("batch");
+      client.tableOperations().create("batch");
     } catch (TableExistsException e) {
       // ignore
     }
 
-    try (BatchWriter bw = connector.createBatchWriter("batch")) {
+    try (BatchWriter bw = client.createBatchWriter("batch")) {
       for (int i = 0; i < 10000; i++) {
         Mutation m = new Mutation(String.format("row_%010d", i));
         // create a random value that is a function of row id for verification purposes
diff --git a/src/main/java/org/apache/accumulo/examples/client/TraceDumpExample.java b/src/main/java/org/apache/accumulo/examples/client/TraceDumpExample.java
index 08146bb..24fe6d5 100644
--- a/src/main/java/org/apache/accumulo/examples/client/TraceDumpExample.java
+++ b/src/main/java/org/apache/accumulo/examples/client/TraceDumpExample.java
@@ -16,9 +16,9 @@
  */
 package org.apache.accumulo.examples.client;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Range;
@@ -55,18 +55,18 @@ public void dump(Opts opts) throws TableNotFoundException, AccumuloException, Ac
       throw new IllegalArgumentException("--traceid option is required");
     }
 
-    final Connector conn = opts.getConnector();
+    final AccumuloClient client= opts.getAccumuloClient();
     final String principal = opts.getPrincipal();
     final String table = opts.getTableName();
-    if (!conn.securityOperations().hasTablePermission(principal, table, TablePermission.READ)) {
-      conn.securityOperations().grantTablePermission(principal, table, TablePermission.READ);
+    if (!client.securityOperations().hasTablePermission(principal, table, TablePermission.READ)) {
+      client.securityOperations().grantTablePermission(principal, table, TablePermission.READ);
       try {
         Thread.sleep(1000);
       } catch (InterruptedException e) {
         Thread.currentThread().interrupt();
         throw new RuntimeException(e);
       }
-      while (!conn.securityOperations().hasTablePermission(principal, table, TablePermission.READ)) {
+      while (!client.securityOperations().hasTablePermission(principal, table, TablePermission.READ)) {
         log.info("{} didn't propagate read permission on {}", principal, table);
         try {
           Thread.sleep(1000);
@@ -76,7 +76,7 @@ public void dump(Opts opts) throws TableNotFoundException, AccumuloException, Ac
         }
       }
     }
-    Scanner scanner = conn.createScanner(table, opts.auths);
+    Scanner scanner = client.createScanner(table, opts.auths);
     scanner.setRange(new Range(new Text(opts.traceId)));
     TraceDump.printTrace(scanner, new Printer() {
       @Override
diff --git a/src/main/java/org/apache/accumulo/examples/client/TracingExample.java b/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
index 68d4404..78f7011 100644
--- a/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
+++ b/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
@@ -74,7 +74,7 @@ public void enableTracing(Opts opts) throws Exception {
   public void execute(Opts opts) throws TableNotFoundException, InterruptedException, AccumuloException, AccumuloSecurityException, TableExistsException {
 
     if (opts.createtable) {
-      opts.getConnector().tableOperations().create(opts.getTableName());
+      opts.getAccumuloClient().tableOperations().create(opts.getTableName());
     }
 
     if (opts.createEntries) {
@@ -86,7 +86,7 @@ public void execute(Opts opts) throws TableNotFoundException, InterruptedExcepti
     }
 
     if (opts.deletetable) {
-      opts.getConnector().tableOperations().delete(opts.getTableName());
+      opts.getAccumuloClient().tableOperations().delete(opts.getTableName());
     }
   }
 
@@ -98,7 +98,7 @@ private void createEntries(Opts opts) throws TableNotFoundException, AccumuloExc
     TraceScope scope = Trace.startSpan("Client Write", Sampler.ALWAYS);
 
     System.out.println("TraceID: " + Long.toHexString(scope.getSpan().getTraceId()));
-    BatchWriter batchWriter = opts.getConnector().createBatchWriter(opts.getTableName(), new BatchWriterConfig());
+    BatchWriter batchWriter = opts.getAccumuloClient().createBatchWriter(opts.getTableName(), new BatchWriterConfig());
 
     Mutation m = new Mutation("row");
     m.put("cf", "cq", "value");
@@ -114,7 +114,7 @@ private void createEntries(Opts opts) throws TableNotFoundException, AccumuloExc
 
   private void readEntries(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
 
-    Scanner scanner = opts.getConnector().createScanner(opts.getTableName(), opts.auths);
+    Scanner scanner = opts.getAccumuloClient().createScanner(opts.getTableName(), opts.auths);
 
     // Trace the read operation.
     TraceScope readScope = Trace.startSpan("Client Read", Sampler.ALWAYS);
diff --git a/src/main/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraint.java b/src/main/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraint.java
index 0ae0d5f..39ce728 100644
--- a/src/main/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraint.java
+++ b/src/main/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraint.java
@@ -22,10 +22,11 @@
 import java.util.List;
 import java.util.Set;
 
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
@@ -102,9 +103,9 @@ public String getViolationDescription(short violationCode) {
   }
 
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
     try {
-      connector.tableOperations().create("testConstraints");
+      client.tableOperations().create("testConstraints");
     } catch (TableExistsException e) {
       // ignore
     }
@@ -112,10 +113,10 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
     /**
      * Add the {@link AlphaNumKeyConstraint} to the table. Be sure to use the fully qualified class name.
      */
-    int num = connector.tableOperations().addConstraint("testConstraints", "org.apache.accumulo.examples.constraints.AlphaNumKeyConstraint");
+    int num = client.tableOperations().addConstraint("testConstraints", "org.apache.accumulo.examples.constraints.AlphaNumKeyConstraint");
 
     System.out.println("Attempting to write non alpha numeric data to testConstraints");
-    try (BatchWriter bw = connector.createBatchWriter("testConstraints")) {
+    try (BatchWriter bw = client.createBatchWriter("testConstraints")) {
       Mutation m = new Mutation("r1--$$@@%%");
       m.put("cf1", "cq1", new Value(("value1").getBytes()));
       bw.addMutation(m);
@@ -123,6 +124,6 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
       e.getConstraintViolationSummaries().forEach(violationSummary -> System.out.println("Constraint violated: " + violationSummary.constrainClass));
     }
 
-    connector.tableOperations().removeConstraint("testConstraints", num);
+    client.tableOperations().removeConstraint("testConstraints", num);
   }
 }
diff --git a/src/main/java/org/apache/accumulo/examples/constraints/MaxMutationSize.java b/src/main/java/org/apache/accumulo/examples/constraints/MaxMutationSize.java
index 52d1d7c..813a6fd 100644
--- a/src/main/java/org/apache/accumulo/examples/constraints/MaxMutationSize.java
+++ b/src/main/java/org/apache/accumulo/examples/constraints/MaxMutationSize.java
@@ -19,10 +19,11 @@
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
@@ -51,9 +52,9 @@ public String getViolationDescription(short violationCode) {
   }
 
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
     try {
-      connector.tableOperations().create("testConstraints");
+      client.tableOperations().create("testConstraints");
     } catch (TableExistsException e) {
       // ignore
     }
@@ -61,10 +62,10 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
     /**
      * Add the {@link MaxMutationSize} constraint to the table. Be sure to use the fully qualified class name
      */
-    int num = connector.tableOperations().addConstraint("testConstraints", "org.apache.accumulo.examples.constraints.MaxMutationSize");
+    int num = client.tableOperations().addConstraint("testConstraints", "org.apache.accumulo.examples.constraints.MaxMutationSize");
 
     System.out.println("Attempting to write a lot of mutations to testConstraints");
-    try (BatchWriter bw = connector.createBatchWriter("testConstraints")) {
+    try (BatchWriter bw = client.createBatchWriter("testConstraints")) {
       Mutation m = new Mutation("r1");
       for (int i = 0; i < 1_000_000; i++)
         m.put("cf" + i % 5000, "cq" + i, new Value(("value" + i).getBytes()));
@@ -73,7 +74,7 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
       e.getConstraintViolationSummaries().forEach(m -> System.out.println("Constraint violated: " + m.constrainClass));
     }
 
-    connector.tableOperations().removeConstraint("testConstraints", num);
+    client.tableOperations().removeConstraint("testConstraints", num);
   }
 
 }
diff --git a/src/main/java/org/apache/accumulo/examples/constraints/NumericValueConstraint.java b/src/main/java/org/apache/accumulo/examples/constraints/NumericValueConstraint.java
index 998787f..2b97495 100644
--- a/src/main/java/org/apache/accumulo/examples/constraints/NumericValueConstraint.java
+++ b/src/main/java/org/apache/accumulo/examples/constraints/NumericValueConstraint.java
@@ -21,10 +21,11 @@
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
@@ -77,9 +78,9 @@ public String getViolationDescription(short violationCode) {
   }
 
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
     try {
-      connector.tableOperations().create("testConstraints");
+      client.tableOperations().create("testConstraints");
     } catch (TableExistsException e) {
       // ignore
     }
@@ -87,10 +88,10 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
     /**
      * Add the {@link NumericValueConstraint} constraint to the table.  Be sure to use the fully qualified class name
      */
-    int num = connector.tableOperations().addConstraint("testConstraints", "org.apache.accumulo.examples.constraints.NumericValueConstraint");
+    int num = client.tableOperations().addConstraint("testConstraints", "org.apache.accumulo.examples.constraints.NumericValueConstraint");
 
     System.out.println("Attempting to write non numeric data to testConstraints");
-    try (BatchWriter bw = connector.createBatchWriter("testConstraints")) {
+    try (BatchWriter bw = client.createBatchWriter("testConstraints")) {
       Mutation m = new Mutation("r1");
       m.put("cf1", "cq1", new Value(("value1--$$@@%%").getBytes()));
       bw.addMutation(m);
@@ -98,7 +99,7 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
       e.getConstraintViolationSummaries().forEach(m -> System.out.println("Constraint violated: " + m.constrainClass));
     }
 
-    connector.tableOperations().removeConstraint("testConstraints", num);
+    client.tableOperations().removeConstraint("testConstraints", num);
   }
 
 }
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java b/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java
index 4ac1f04..9c9f1a4 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java
@@ -19,8 +19,8 @@
 import java.util.Iterator;
 import java.util.Map.Entry;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
@@ -45,7 +45,7 @@
 
   private ScannerOpts scanOpts;
   private BatchWriterOpts bwOpts;
-  private Connector conn;
+  private AccumuloClient client;
   private String tableName;
   private Authorizations auths;
   private ColumnVisibility visibility;
@@ -237,8 +237,8 @@ private void calculateCounts(Scanner scanner, int depth, BatchWriter batchWriter
     }
   }
 
-  public FileCount(Connector conn, String tableName, Authorizations auths, ColumnVisibility cv, ScannerOpts scanOpts, BatchWriterOpts bwOpts) throws Exception {
-    this.conn = conn;
+  public FileCount(AccumuloClient client, String tableName, Authorizations auths, ColumnVisibility cv, ScannerOpts scanOpts, BatchWriterOpts bwOpts) throws Exception {
+    this.client = client;
     this.tableName = tableName;
     this.auths = auths;
     this.visibility = cv;
@@ -251,9 +251,9 @@ public void run() throws Exception {
     entriesScanned = 0;
     inserts = 0;
 
-    Scanner scanner = conn.createScanner(tableName, auths);
+    Scanner scanner = client.createScanner(tableName, auths);
     scanner.setBatchSize(scanOpts.scanBatchSize);
-    BatchWriter bw = conn.createBatchWriter(tableName, bwOpts.getBatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(tableName, bwOpts.getBatchWriterConfig());
 
     long t1 = System.currentTimeMillis();
 
@@ -290,7 +290,7 @@ public static void main(String[] args) throws Exception {
     String programName = FileCount.class.getName();
     opts.parseArgs(programName, args, scanOpts, bwOpts);
 
-    FileCount fileCount = new FileCount(opts.getConnector(), opts.getTableName(), opts.auths, opts.visibility, scanOpts, bwOpts);
+    FileCount fileCount = new FileCount(opts.getAccumuloClient(), opts.getTableName(), opts.auths, opts.visibility, scanOpts, bwOpts);
     fileCount.run();
   }
 }
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java b/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
index 421ca1e..0fba29a 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
@@ -21,8 +21,8 @@
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
@@ -140,19 +140,19 @@ public static void main(String[] args) throws Exception {
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(Ingest.class.getName(), args, bwOpts);
 
-    Connector conn = opts.getConnector();
-    if (!conn.tableOperations().exists(opts.nameTable))
-      conn.tableOperations().create(opts.nameTable);
-    if (!conn.tableOperations().exists(opts.indexTable))
-      conn.tableOperations().create(opts.indexTable);
-    if (!conn.tableOperations().exists(opts.dataTable)) {
-      conn.tableOperations().create(opts.dataTable);
-      conn.tableOperations().attachIterator(opts.dataTable, new IteratorSetting(1, ChunkCombiner.class));
+    AccumuloClient client= opts.getAccumuloClient();
+    if (!client.tableOperations().exists(opts.nameTable))
+      client.tableOperations().create(opts.nameTable);
+    if (!client.tableOperations().exists(opts.indexTable))
+      client.tableOperations().create(opts.indexTable);
+    if (!client.tableOperations().exists(opts.dataTable)) {
+      client.tableOperations().create(opts.dataTable);
+      client.tableOperations().attachIterator(opts.dataTable, new IteratorSetting(1, ChunkCombiner.class));
     }
 
-    BatchWriter dirBW = conn.createBatchWriter(opts.nameTable, bwOpts.getBatchWriterConfig());
-    BatchWriter indexBW = conn.createBatchWriter(opts.indexTable, bwOpts.getBatchWriterConfig());
-    BatchWriter dataBW = conn.createBatchWriter(opts.dataTable, bwOpts.getBatchWriterConfig());
+    BatchWriter dirBW = client.createBatchWriter(opts.nameTable, bwOpts.getBatchWriterConfig());
+    BatchWriter indexBW = client.createBatchWriter(opts.indexTable, bwOpts.getBatchWriterConfig());
+    BatchWriter dataBW = client.createBatchWriter(opts.dataTable, bwOpts.getBatchWriterConfig());
     FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
     for (String dir : opts.directories) {
       recurse(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java b/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java
index d979106..f7c5f45 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java
@@ -20,9 +20,9 @@
 import java.util.Map.Entry;
 import java.util.TreeMap;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
@@ -41,7 +41,7 @@
  * names.
  */
 public class QueryUtil {
-  private Connector conn = null;
+  private AccumuloClient client= null;
   private String tableName;
   private Authorizations auths;
   public static final Text DIR_COLF = new Text("dir");
@@ -51,7 +51,7 @@
   public static final Text COUNTS_COLQ = new Text("counts");
 
   public QueryUtil(Opts opts) throws AccumuloException, AccumuloSecurityException {
-    conn = opts.getConnector();
+    client = opts.getAccumuloClient();
     this.tableName = opts.getTableName();
     this.auths = opts.auths;
   }
@@ -141,7 +141,7 @@ public static String getType(Text colf) {
   public Map<String,String> getData(String path) throws TableNotFoundException {
     if (path.endsWith("/"))
       path = path.substring(0, path.length() - 1);
-    Scanner scanner = conn.createScanner(tableName, auths);
+    Scanner scanner = client.createScanner(tableName, auths);
     scanner.setRange(new Range(getRow(path)));
     Map<String,String> data = new TreeMap<>();
     for (Entry<Key,Value> e : scanner) {
@@ -162,7 +162,7 @@ public static String getType(Text colf) {
     if (!path.endsWith("/"))
       path = path + "/";
     Map<String,Map<String,String>> fim = new TreeMap<>();
-    Scanner scanner = conn.createScanner(tableName, auths);
+    Scanner scanner = client.createScanner(tableName, auths);
     scanner.setRange(Range.prefix(getRow(path)));
     for (Entry<Key,Value> e : scanner) {
       String name = e.getKey().getRow().toString();
@@ -185,7 +185,7 @@ public static String getType(Text colf) {
    */
   public Iterable<Entry<Key,Value>> exactTermSearch(String term) throws Exception {
     System.out.println("executing exactTermSearch for " + term);
-    Scanner scanner = conn.createScanner(tableName, auths);
+    Scanner scanner = client.createScanner(tableName, auths);
     scanner.setRange(new Range(getForwardIndex(term)));
     return scanner;
   }
@@ -200,7 +200,7 @@ public static String getType(Text colf) {
     if (exp.indexOf("/") >= 0)
       throw new Exception("this method only works with unqualified names");
 
-    Scanner scanner = conn.createScanner(tableName, auths);
+    Scanner scanner = client.createScanner(tableName, auths);
     if (exp.startsWith("*")) {
       System.out.println("executing beginning wildcard search for " + exp);
       exp = exp.substring(1);
@@ -238,7 +238,7 @@ public static String getType(Text colf) {
     String lastPart = exp.substring(starIndex + 1);
     String regexString = ".*/" + exp.replace("*", "[^/]*");
 
-    Scanner scanner = conn.createScanner(tableName, auths);
+    Scanner scanner = client.createScanner(tableName, auths);
     if (firstPart.length() >= lastPart.length()) {
       System.out.println("executing middle wildcard search for " + regexString + " from entries starting with " + firstPart);
       scanner.setRange(Range.prefix(getForwardIndex(firstPart)));
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java b/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java
index b40fe24..858d850 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java
@@ -104,7 +104,7 @@ public Viewer(Opts opts) throws Exception {
     setSize(1000, 800);
     setDefaultCloseOperation(EXIT_ON_CLOSE);
     q = new QueryUtil(opts);
-    fdq = new FileDataQuery(opts.getConnector(), opts.dataTable, opts.auths);
+    fdq = new FileDataQuery(opts.getAccumuloClient(), opts.dataTable, opts.auths);
     this.topPath = opts.path;
   }
 
diff --git a/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java b/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
index 25b78bb..f4c2cbe 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
@@ -24,8 +24,8 @@
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.data.ArrayByteSequence;
@@ -186,12 +186,12 @@ public static void main(String[] args) throws Exception {
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(FileDataIngest.class.getName(), args, bwOpts);
 
-    Connector conn = opts.getConnector();
-    if (!conn.tableOperations().exists(opts.getTableName())) {
-      conn.tableOperations().create(opts.getTableName());
-      conn.tableOperations().attachIterator(opts.getTableName(), new IteratorSetting(1, ChunkCombiner.class));
+    AccumuloClient client= opts.getAccumuloClient();
+    if (!client.tableOperations().exists(opts.getTableName())) {
+      client.tableOperations().create(opts.getTableName());
+      client.tableOperations().attachIterator(opts.getTableName(), new IteratorSetting(1, ChunkCombiner.class));
     }
-    BatchWriter bw = conn.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
     FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
     for (String filename : opts.files) {
       fdi.insertFileData(filename, bw);
diff --git a/src/main/java/org/apache/accumulo/examples/filedata/FileDataQuery.java b/src/main/java/org/apache/accumulo/examples/filedata/FileDataQuery.java
index 00a3dc2..094828b 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/FileDataQuery.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/FileDataQuery.java
@@ -21,9 +21,9 @@
 import java.util.List;
 import java.util.Map.Entry;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Key;
@@ -41,11 +41,11 @@
   private ChunkInputStream cis;
   Scanner scanner;
 
-  public FileDataQuery(Connector conn, String tableName, Authorizations auths)
+  public FileDataQuery(AccumuloClient client, String tableName, Authorizations auths)
       throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     lastRefs = new ArrayList<>();
     cis = new ChunkInputStream();
-    scanner = conn.createScanner(tableName, auths);
+    scanner = client.createScanner(tableName, auths);
   }
 
   public List<Entry<Key,Value>> getLastRefs() {
diff --git a/src/main/java/org/apache/accumulo/examples/helloworld/InsertWithBatchWriter.java b/src/main/java/org/apache/accumulo/examples/helloworld/InsertWithBatchWriter.java
index dfe464e..2afd2b4 100644
--- a/src/main/java/org/apache/accumulo/examples/helloworld/InsertWithBatchWriter.java
+++ b/src/main/java/org/apache/accumulo/examples/helloworld/InsertWithBatchWriter.java
@@ -16,14 +16,17 @@
  */
 package org.apache.accumulo.examples.helloworld;
 
+import com.beust.jcommander.Parameter;
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.examples.cli.Help;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -34,16 +37,23 @@
 
   private static final Logger log = LoggerFactory.getLogger(InsertWithBatchWriter.class);
 
+  static class Opts extends Help {
+    @Parameter(names = "-c")
+    String clientProps = "conf/accumulo-client.properties";
+  }
+
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    Opts opts = new Opts();
+    opts.parseArgs(InsertWithBatchWriter.class.getName(), args);
 
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties(opts.clientProps).build();
     try {
-      connector.tableOperations().create("hellotable");
+      client.tableOperations().create("hellotable");
     } catch (TableExistsException e) {
       // ignore
     }
 
-    try (BatchWriter bw = connector.createBatchWriter("hellotable")) {
+    try (BatchWriter bw = client.createBatchWriter("hellotable")) {
       log.trace("writing ...");
       for (int i = 0; i < 10000; i++) {
         Mutation m = new Mutation(String.format("row_%d", i));
diff --git a/src/main/java/org/apache/accumulo/examples/helloworld/ReadData.java b/src/main/java/org/apache/accumulo/examples/helloworld/ReadData.java
index da5baf8..265f799 100644
--- a/src/main/java/org/apache/accumulo/examples/helloworld/ReadData.java
+++ b/src/main/java/org/apache/accumulo/examples/helloworld/ReadData.java
@@ -18,15 +18,18 @@
 
 import java.util.Map.Entry;
 
+import com.beust.jcommander.Parameter;
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.examples.cli.Help;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -37,11 +40,18 @@
 
   private static final Logger log = LoggerFactory.getLogger(ReadData.class);
 
+  static class Opts extends Help {
+    @Parameter(names = "-c")
+    String clientProps = "conf/accumulo-client.properties";
+  }
+
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    Opts opts = new Opts();
+    opts.parseArgs(ReadData.class.getName(), args);
 
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties(opts.clientProps).build();
 
-    try (Scanner scan = connector.createScanner("hellotable", Authorizations.EMPTY)) {
+    try (Scanner scan = client.createScanner("hellotable", Authorizations.EMPTY)) {
       scan.setRange(new Range(new Key("row_0"), new Key("row_1002")));
       for (Entry<Key, Value> e : scan) {
         Key key = e.getKey();
diff --git a/src/main/java/org/apache/accumulo/examples/isolation/InterferenceTest.java b/src/main/java/org/apache/accumulo/examples/isolation/InterferenceTest.java
index dde5cc4..c4b64b3 100644
--- a/src/main/java/org/apache/accumulo/examples/isolation/InterferenceTest.java
+++ b/src/main/java/org/apache/accumulo/examples/isolation/InterferenceTest.java
@@ -19,8 +19,8 @@
 import java.util.HashSet;
 import java.util.Map.Entry;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IsolatedScanner;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.Scanner;
@@ -159,17 +159,17 @@ public static void main(String[] args) throws Exception {
     if (opts.iterations < 1)
       opts.iterations = Long.MAX_VALUE;
 
-    Connector conn = opts.getConnector();
-    if (!conn.tableOperations().exists(opts.getTableName()))
-      conn.tableOperations().create(opts.getTableName());
+    AccumuloClient client = opts.getAccumuloClient();
+    if (!client.tableOperations().exists(opts.getTableName()))
+      client.tableOperations().create(opts.getTableName());
 
-    Thread writer = new Thread(new Writer(conn.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig()), opts.iterations));
+    Thread writer = new Thread(new Writer(client.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig()), opts.iterations));
     writer.start();
     Reader r;
     if (opts.isolated)
-      r = new Reader(new IsolatedScanner(conn.createScanner(opts.getTableName(), opts.auths)));
+      r = new Reader(new IsolatedScanner(client.createScanner(opts.getTableName(), opts.auths)));
     else
-      r = new Reader(conn.createScanner(opts.getTableName(), opts.auths));
+      r = new Reader(client.createScanner(opts.getTableName(), opts.auths));
     Thread reader;
     reader = new Thread(r);
     reader.start();
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/NGramIngest.java b/src/main/java/org/apache/accumulo/examples/mapreduce/NGramIngest.java
index fdcc434..984f058 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/NGramIngest.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/NGramIngest.java
@@ -84,9 +84,9 @@ public int run(String[] args) throws Exception {
     job.setNumReduceTasks(0);
     job.setSpeculativeExecution(false);
 
-    if (!opts.getConnector().tableOperations().exists(opts.getTableName())) {
+    if (!opts.getAccumuloClient().tableOperations().exists(opts.getTableName())) {
       log.info("Creating table " + opts.getTableName());
-      opts.getConnector().tableOperations().create(opts.getTableName());
+      opts.getAccumuloClient().tableOperations().create(opts.getTableName());
       SortedSet<Text> splits = new TreeSet<>();
       String numbers[] = "1 2 3 4 5 6 7 8 9".split("\\s");
       String lower[] = "a b c d e f g h i j k l m n o p q r s t u v w x y z".split("\\s");
@@ -96,7 +96,7 @@ public int run(String[] args) throws Exception {
           splits.add(new Text(s));
         }
       }
-      opts.getConnector().tableOperations().addSplits(opts.getTableName(), splits);
+      opts.getAccumuloClient().tableOperations().addSplits(opts.getTableName(), splits);
     }
 
     TextInputFormat.addInputPath(job, new Path(opts.inputDirectory));
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/UniqueColumns.java b/src/main/java/org/apache/accumulo/examples/mapreduce/UniqueColumns.java
index e3742dc..29a4a17 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/UniqueColumns.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/UniqueColumns.java
@@ -20,7 +20,7 @@
 import java.util.HashMap;
 import java.util.HashSet;
 
-import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
@@ -94,7 +94,7 @@ public int run(String[] args) throws Exception {
     job.setJarByClass(this.getClass());
 
     String clone = opts.getTableName();
-    Connector conn = null;
+    AccumuloClient client = null;
 
     opts.setAccumuloConfigs(job);
 
@@ -104,10 +104,10 @@ public int run(String[] args) throws Exception {
        * table, clone it, and then keep using the same clone as input for map reduce.
        */
 
-      conn = opts.getConnector();
+      client = opts.getAccumuloClient();
       clone = opts.getTableName() + "_" + jobName;
-      conn.tableOperations().clone(opts.getTableName(), clone, true, new HashMap<String,String>(), new HashSet<String>());
-      conn.tableOperations().offline(clone);
+      client.tableOperations().clone(opts.getTableName(), clone, true, new HashMap<String,String>(), new HashSet<String>());
+      client.tableOperations().offline(clone);
 
       AccumuloInputFormat.setOfflineTableScan(job, true);
       AccumuloInputFormat.setInputTableName(job, clone);
@@ -130,7 +130,7 @@ public int run(String[] args) throws Exception {
     job.waitForCompletion(true);
 
     if (opts.offline) {
-      conn.tableOperations().delete(clone);
+      client.tableOperations().delete(clone);
     }
 
     return job.isSuccessful() ? 0 : 1;
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java
index a4ffe10..1d4261a 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java
@@ -22,8 +22,9 @@
 import java.util.Base64;
 import java.util.Collection;
 
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.ClientInfo;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.mapreduce.AccumuloFileOutputFormat;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
@@ -32,7 +33,6 @@
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.TextUtil;
-import org.apache.accumulo.examples.cli.MapReduceClientOnRequiredTable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -47,8 +47,6 @@
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
-import com.beust.jcommander.Parameter;
-
 /**
  * Example map reduce job that bulk ingest data into an accumulo table. The expected input is text files containing tab separated key value pairs on each line.
  */
@@ -120,8 +118,8 @@ public int run(String[] args) {
       job.setReducerClass(ReduceClass.class);
       job.setOutputFormatClass(AccumuloFileOutputFormat.class);
 
-      ClientInfo info = Connector.builder().usingProperties("conf/accumulo-client.properties").info();
-      Connector connector = Connector.builder().usingClientInfo(info).build();
+      ClientInfo info = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").info();
+      AccumuloClient client = Accumulo.newClient().usingClientInfo(info).build();
       AccumuloInputFormat.setClientInfo(job, info);
       AccumuloInputFormat.setInputTableName(job, SetupTable.tableName);
       AccumuloInputFormat.setScanAuthorizations(job, Authorizations.EMPTY);
@@ -134,7 +132,7 @@ public int run(String[] args) {
       FileSystem fs = FileSystem.get(conf);
       out = new PrintStream(new BufferedOutputStream(fs.create(new Path(workDir + "/splits.txt"))));
 
-      Collection<Text> splits = connector.tableOperations().listSplits(SetupTable.tableName, 100);
+      Collection<Text> splits = client.tableOperations().listSplits(SetupTable.tableName, 100);
       for (Text split : splits)
         out.println(Base64.getEncoder().encodeToString(TextUtil.getBytes(split)));
 
@@ -151,7 +149,7 @@ public int run(String[] args) {
       // With HDFS permissions on, we need to make sure the Accumulo user can read/move the rfiles
       FsShell fsShell = new FsShell(conf);
       fsShell.run(new String[] {"-chmod", "-R", "777", workDir});
-      connector.tableOperations().importDirectory(SetupTable.tableName, workDir + "/files", workDir + "/failures", false);
+      client.tableOperations().importDirectory(SetupTable.tableName, workDir + "/files", workDir + "/failures", false);
 
     } catch (Exception e) {
       throw new RuntimeException(e);
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/SetupTable.java b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/SetupTable.java
index 225be47..e7f3bd5 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/SetupTable.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/SetupTable.java
@@ -20,7 +20,8 @@
 import java.io.PrintStream;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -36,9 +37,9 @@
   static String outputFile = "bulk/test_1.txt";
 
   public static void main(String[] args) throws Exception {
-    Connector conn = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
     try {
-      conn.tableOperations().create(tableName);
+      client.tableOperations().create(tableName);
     } catch (TableExistsException e) {
       //ignore
     }
@@ -48,7 +49,7 @@ public static void main(String[] args) throws Exception {
     for (String split : splits) {
       intialPartitions.add(new Text(split));
     }
-    conn.tableOperations().addSplits(tableName, intialPartitions);
+    client.tableOperations().addSplits(tableName, intialPartitions);
 
     FileSystem fs = FileSystem.get(new Configuration());
     try (PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(new Path(outputFile))))) {
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/VerifyIngest.java b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/VerifyIngest.java
index 6fd1318..09495db 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/VerifyIngest.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/VerifyIngest.java
@@ -19,16 +19,16 @@
 import java.util.Iterator;
 import java.util.Map.Entry;
 
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.io.Text;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,8 +36,8 @@
   private static final Logger log = LoggerFactory.getLogger(VerifyIngest.class);
 
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
-    Scanner scanner = connector.createScanner(SetupTable.tableName, Authorizations.EMPTY);
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
+    Scanner scanner = client.createScanner(SetupTable.tableName, Authorizations.EMPTY);
 
     scanner.setRange(new Range(String.format("row_%010d", 0), null));
 
diff --git a/src/main/java/org/apache/accumulo/examples/reservations/ARS.java b/src/main/java/org/apache/accumulo/examples/reservations/ARS.java
index 47dfe97..1297efc 100644
--- a/src/main/java/org/apache/accumulo/examples/reservations/ARS.java
+++ b/src/main/java/org/apache/accumulo/examples/reservations/ARS.java
@@ -20,15 +20,13 @@
 import java.util.List;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.ConditionalWriter;
 import org.apache.accumulo.core.client.ConditionalWriter.Status;
 import org.apache.accumulo.core.client.ConditionalWriterConfig;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IsolatedScanner;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.data.Condition;
 import org.apache.accumulo.core.data.ConditionalMutation;
 import org.apache.accumulo.core.data.Key;
@@ -56,15 +54,15 @@
 
   private static final Logger log = LoggerFactory.getLogger(ARS.class);
 
-  private Connector conn;
+  private AccumuloClient client;
   private String rTable;
 
   public enum ReservationResult {
     RESERVED, WAIT_LISTED
   }
 
-  public ARS(Connector conn, String rTable) {
-    this.conn = conn;
+  public ARS(AccumuloClient client, String rTable) {
+    this.client = client;
     this.rTable = rTable;
   }
 
@@ -89,8 +87,8 @@ public ReservationResult reserve(String what, String when, String who) throws Ex
     ReservationResult result = ReservationResult.RESERVED;
 
     // it is important to use an isolated scanner so that only whole mutations are seen
-    try (ConditionalWriter cwriter = conn.createConditionalWriter(rTable, new ConditionalWriterConfig());
-        Scanner scanner = new IsolatedScanner(conn.createScanner(rTable, Authorizations.EMPTY))) {
+    try (ConditionalWriter cwriter = client.createConditionalWriter(rTable, new ConditionalWriterConfig());
+        Scanner scanner = new IsolatedScanner(client.createScanner(rTable, Authorizations.EMPTY))) {
       while (true) {
         Status status = cwriter.write(update).getStatus();
         switch (status) {
@@ -162,8 +160,8 @@ public void cancel(String what, String when, String who) throws Exception {
     // when it actually got the reservation.
 
     // its important to use an isolated scanner so that only whole mutations are seen
-    try (ConditionalWriter cwriter = conn.createConditionalWriter(rTable, new ConditionalWriterConfig());
-        Scanner scanner = new IsolatedScanner(conn.createScanner(rTable, Authorizations.EMPTY))) {
+    try (ConditionalWriter cwriter = client.createConditionalWriter(rTable, new ConditionalWriterConfig());
+        Scanner scanner = new IsolatedScanner(client.createScanner(rTable, Authorizations.EMPTY))) {
       while (true) {
         scanner.setRange(new Range(row));
 
@@ -216,7 +214,7 @@ public void cancel(String what, String when, String who) throws Exception {
     String row = what + ":" + when;
 
     // its important to use an isolated scanner so that only whole mutations are seen
-    try (Scanner scanner = new IsolatedScanner(conn.createScanner(rTable, Authorizations.EMPTY))) {
+    try (Scanner scanner = new IsolatedScanner(client.createScanner(rTable, Authorizations.EMPTY))) {
       scanner.setRange(new Range(row));
       scanner.fetchColumnFamily(new Text("res"));
 
@@ -281,10 +279,10 @@ public void run() {
       } else if (tokens[0].equals("quit") && tokens.length == 1) {
         break;
       } else if (tokens[0].equals("connect") && tokens.length == 6 && ars == null) {
-        ZooKeeperInstance zki = new ZooKeeperInstance(ClientConfiguration.create().withInstance(tokens[1]).withZkHosts(tokens[2]));
-        Connector conn = zki.getConnector(tokens[3], new PasswordToken(tokens[4]));
-        if (conn.tableOperations().exists(tokens[5])) {
-          ars = new ARS(conn, tokens[5]);
+        AccumuloClient client = Accumulo.newClient().forInstance(tokens[1], tokens[2])
+            .usingPassword(tokens[3], tokens[4]).build();
+        if (client.tableOperations().exists(tokens[5])) {
+          ars = new ARS(client,  tokens[5]);
           reader.println("  connected");
         } else
           reader.println("  No Such Table");
diff --git a/src/main/java/org/apache/accumulo/examples/sample/SampleExample.java b/src/main/java/org/apache/accumulo/examples/sample/SampleExample.java
index 35e689d..907ad9b 100644
--- a/src/main/java/org/apache/accumulo/examples/sample/SampleExample.java
+++ b/src/main/java/org/apache/accumulo/examples/sample/SampleExample.java
@@ -20,8 +20,8 @@
 import java.util.Collections;
 import java.util.Map.Entry;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.SampleNotPresentException;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.admin.CompactionConfig;
@@ -60,17 +60,17 @@ public static void main(String[] args) throws Exception {
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
 
-    Connector conn = opts.getConnector();
+    AccumuloClient client = opts.getAccumuloClient();
 
-    if (!conn.tableOperations().exists(opts.getTableName())) {
-      conn.tableOperations().create(opts.getTableName());
+    if (!client.tableOperations().exists(opts.getTableName())) {
+      client.tableOperations().create(opts.getTableName());
     } else {
       System.out.println("Table exists, not doing anything.");
       return;
     }
 
     // write some data
-    BatchWriter bw = conn.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
+    BatchWriter bw = client.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
     bw.addMutation(createMutation("9225", "abcde", "file://foo.txt"));
     bw.addMutation(createMutation("8934", "accumulo scales", "file://accumulo_notes.txt"));
     bw.addMutation(createMutation("2317", "milk, eggs, bread, parmigiano-reggiano", "file://groceries/9/txt"));
@@ -80,9 +80,9 @@ public static void main(String[] args) throws Exception {
     SamplerConfiguration sc1 = new SamplerConfiguration(RowSampler.class.getName());
     sc1.setOptions(ImmutableMap.of("hasher", "murmur3_32", "modulus", "3"));
 
-    conn.tableOperations().setSamplerConfiguration(opts.getTableName(), sc1);
+    client.tableOperations().setSamplerConfiguration(opts.getTableName(), sc1);
 
-    Scanner scanner = conn.createScanner(opts.getTableName(), Authorizations.EMPTY);
+    Scanner scanner = client.createScanner(opts.getTableName(), Authorizations.EMPTY);
     System.out.println("Scanning all data :");
     print(scanner);
     System.out.println();
@@ -97,7 +97,7 @@ public static void main(String[] args) throws Exception {
     System.out.println();
 
     // compact table to recreate sample data
-    conn.tableOperations().compact(opts.getTableName(), new CompactionConfig().setCompactionStrategy(NO_SAMPLE_STRATEGY));
+    client.tableOperations().compact(opts.getTableName(), new CompactionConfig().setCompactionStrategy(NO_SAMPLE_STRATEGY));
 
     System.out.println("Scanning after compaction (compaction should have created sample data) : ");
     print(scanner);
@@ -113,9 +113,9 @@ public static void main(String[] args) throws Exception {
     // change tables sampling configuration...
     SamplerConfiguration sc2 = new SamplerConfiguration(RowSampler.class.getName());
     sc2.setOptions(ImmutableMap.of("hasher", "murmur3_32", "modulus", "2"));
-    conn.tableOperations().setSamplerConfiguration(opts.getTableName(), sc2);
+    client.tableOperations().setSamplerConfiguration(opts.getTableName(), sc2);
     // compact table to recreate sample data using new configuration
-    conn.tableOperations().compact(opts.getTableName(), new CompactionConfig().setCompactionStrategy(NO_SAMPLE_STRATEGY));
+    client.tableOperations().compact(opts.getTableName(), new CompactionConfig().setCompactionStrategy(NO_SAMPLE_STRATEGY));
 
     System.out.println("Scanning with old sampler configuration.  Sample data was created using new configuration with a compaction.  Scan should fail.");
     try {
diff --git a/src/main/java/org/apache/accumulo/examples/shard/ContinuousQuery.java b/src/main/java/org/apache/accumulo/examples/shard/ContinuousQuery.java
index 1371964..2c56b23 100644
--- a/src/main/java/org/apache/accumulo/examples/shard/ContinuousQuery.java
+++ b/src/main/java/org/apache/accumulo/examples/shard/ContinuousQuery.java
@@ -22,8 +22,9 @@
 import java.util.Map.Entry;
 import java.util.Random;
 
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
@@ -62,13 +63,13 @@ public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     opts.parseArgs(ContinuousQuery.class.getName(), args);
 
-    Connector conn = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client= Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
 
-    ArrayList<Text[]> randTerms = findRandomTerms(conn.createScanner(opts.doc2Term, Authorizations.EMPTY), opts.numTerms);
+    ArrayList<Text[]> randTerms = findRandomTerms(client.createScanner(opts.doc2Term, Authorizations.EMPTY), opts.numTerms);
 
     Random rand = new Random();
 
-    try (BatchScanner bs = conn.createBatchScanner(opts.tableName, Authorizations.EMPTY, 5)) {
+    try (BatchScanner bs = client.createBatchScanner(opts.tableName, Authorizations.EMPTY, 5)) {
       for (long i = 0; i < opts.iterations; i += 1) {
         Text[] columns = randTerms.get(rand.nextInt(randTerms.size()));
 
diff --git a/src/main/java/org/apache/accumulo/examples/shard/Index.java b/src/main/java/org/apache/accumulo/examples/shard/Index.java
index 5b2d67f..cd34816 100644
--- a/src/main/java/org/apache/accumulo/examples/shard/Index.java
+++ b/src/main/java/org/apache/accumulo/examples/shard/Index.java
@@ -22,12 +22,11 @@
 import java.util.HashSet;
 import java.util.List;
 
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.examples.cli.BatchWriterOpts;
-import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
 import org.apache.accumulo.examples.cli.Help;
 import org.apache.hadoop.io.Text;
 
@@ -111,9 +110,9 @@ public static void main(String[] args) throws Exception {
 
     String splitRegex = "\\W+";
 
-    Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
 
-    try (BatchWriter bw = connector.createBatchWriter(opts.tableName)) {
+    try (BatchWriter bw = client.createBatchWriter(opts.tableName)) {
       for (String filename : opts.files) {
         index(opts.partitions, new File(filename), splitRegex, bw);
       }
diff --git a/src/main/java/org/apache/accumulo/examples/shard/Query.java b/src/main/java/org/apache/accumulo/examples/shard/Query.java
index 16f1530..5377b41 100644
--- a/src/main/java/org/apache/accumulo/examples/shard/Query.java
+++ b/src/main/java/org/apache/accumulo/examples/shard/Query.java
@@ -21,8 +21,9 @@
 import java.util.List;
 import java.util.Map.Entry;
 
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.sample.SamplerConfiguration;
 import org.apache.accumulo.core.data.Key;
@@ -87,13 +88,13 @@ public static void main(String[] args) throws Exception {
     QueryOpts opts = new QueryOpts();
     opts.parseArgs(Query.class.getName(), args);
 
-    Connector conn = Connector.builder().usingProperties("conf/accumulo-client.properties")
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties")
         .build();
 
-    try (BatchScanner bs = conn.createBatchScanner(opts.tableName, Authorizations.EMPTY, 10)) {
+    try (BatchScanner bs = client.createBatchScanner(opts.tableName, Authorizations.EMPTY, 10)) {
       if (opts.useSample) {
-        SamplerConfiguration samplerConfig = conn.tableOperations().getSamplerConfiguration(opts.tableName);
-        CutoffIntersectingIterator.validateSamplerConfig(conn.tableOperations().getSamplerConfiguration(opts.tableName));
+        SamplerConfiguration samplerConfig = client.tableOperations().getSamplerConfiguration(opts.tableName);
+        CutoffIntersectingIterator.validateSamplerConfig(client.tableOperations().getSamplerConfiguration(opts.tableName));
         bs.setSamplerConfiguration(samplerConfig);
       }
       for (String entry : query(bs, opts.terms, opts.sampleCutoff)) {
diff --git a/src/main/java/org/apache/accumulo/examples/shard/Reverse.java b/src/main/java/org/apache/accumulo/examples/shard/Reverse.java
index 19ef6a4..d66a0bd 100644
--- a/src/main/java/org/apache/accumulo/examples/shard/Reverse.java
+++ b/src/main/java/org/apache/accumulo/examples/shard/Reverse.java
@@ -18,17 +18,15 @@
 
 import java.util.Map.Entry;
 
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.examples.cli.BatchWriterOpts;
-import org.apache.accumulo.examples.cli.ClientOpts;
 import org.apache.accumulo.examples.cli.Help;
-import org.apache.accumulo.examples.cli.ScannerOpts;
 import org.apache.hadoop.io.Text;
 
 import com.beust.jcommander.Parameter;
@@ -52,10 +50,10 @@ public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     opts.parseArgs(Reverse.class.getName(), args);
 
-    Connector conn = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
 
-    try (Scanner scanner = conn.createScanner(opts.shardTable, Authorizations.EMPTY);
-         BatchWriter bw = conn.createBatchWriter(opts.doc2TermTable)) {
+    try (Scanner scanner = client.createScanner(opts.shardTable, Authorizations.EMPTY);
+         BatchWriter bw = client.createBatchWriter(opts.doc2TermTable)) {
       for (Entry<Key, Value> entry : scanner) {
         Key key = entry.getKey();
         Mutation m = new Mutation(key.getColumnQualifier());
diff --git a/src/test/java/org/apache/accumulo/examples/ExamplesIT.java b/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
index 022ed80..5b89c8d 100644
--- a/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
+++ b/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
@@ -28,7 +28,6 @@
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Paths;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Iterator;
@@ -41,10 +40,10 @@
 import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
 import org.apache.accumulo.cluster.standalone.StandaloneClusterControl;
 import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
@@ -76,9 +75,6 @@
 import org.apache.accumulo.examples.mapreduce.TableToFile;
 import org.apache.accumulo.examples.mapreduce.TeraSortIngest;
 import org.apache.accumulo.examples.mapreduce.WordCount;
-import org.apache.accumulo.examples.mapreduce.bulk.BulkIngestExample;
-import org.apache.accumulo.examples.mapreduce.bulk.SetupTable;
-import org.apache.accumulo.examples.mapreduce.bulk.VerifyIngest;
 import org.apache.accumulo.examples.shard.ContinuousQuery;
 import org.apache.accumulo.examples.shard.Index;
 import org.apache.accumulo.examples.shard.Query;
@@ -112,7 +108,7 @@
   private static final String visibility = "A|B";
   private static final String auths = "A,B";
 
-  Connector c;
+  AccumuloClient c;
   BatchWriter bw;
   IteratorSetting is;
   String dir;
@@ -128,14 +124,14 @@ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoo
 
   @Before
   public void getClusterInfo() throws Exception {
-    c = getConnector();
+    c = getAccumuloClient();
     String user = c.info().getPrincipal();
     String instance = c.info().getInstanceName();
     String keepers = c.info().getZooKeepers();
     AuthenticationToken token = getAdminToken();
     if (token instanceof PasswordToken) {
       String passwd = new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8);
-      writeConnectionFile(getConnectionFile(), instance, keepers, user, passwd);
+      writeClientPropsFile(getClientPropsFile(), instance, keepers, user, passwd);
     } else {
       Assert.fail("Unknown token type: " + token);
     }
@@ -149,21 +145,22 @@ public void getClusterInfo() throws Exception {
   @After
   public void resetAuths() throws Exception {
     if (null != origAuths) {
-      getConnector().securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths);
+      getAccumuloClient().securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths);
     }
   }
 
-  public static void writeConnectionFile(String file, String instance, String keepers, String user, String password) throws IOException {
+  public static void writeClientPropsFile(String file, String instance, String keepers, String user, String password) throws IOException {
     try (BufferedWriter writer = Files.newBufferedWriter(Paths.get(file))) {
-      writer.write("instance.zookeeper.host=" + keepers + "\n");
       writer.write("instance.name=" + instance + "\n");
-      writer.write("accumulo.examples.principal=" + user + "\n");
-      writer.write("accumulo.examples.password=" + password + "\n");
+      writer.write("instance.zookeepers=" + keepers + "\n");
+      writer.write("auth.type=password\n");
+      writer.write("auth.principal=" + user + "\n");
+      writer.write("auth.token=" + password + "\n");
     }
   }
 
-  private String getConnectionFile() {
-    return System.getProperty("user.dir") + "/target/examples.conf";
+  private String getClientPropsFile() {
+    return System.getProperty("user.dir") + "/target/accumulo-client.properties";
   }
 
   @Override
@@ -180,7 +177,7 @@ public void testTrace() throws Exception {
       while (!c.tableOperations().exists("trace"))
         sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
     }
-    String[] args = new String[] {"-c", getConnectionFile(), "--createtable", "--deletetable", "--create"};
+    String[] args = new String[] {"-c", getClientPropsFile(), "--createtable", "--deletetable", "--create"};
     Entry<Integer,String> pair = cluster.getClusterControl().execWithStdout(TracingExample.class, args);
     Assert.assertEquals("Expected return code of zero. STDOUT=" + pair.getValue(), 0, pair.getKey().intValue());
     String result = pair.getValue();
@@ -188,7 +185,7 @@ public void testTrace() throws Exception {
     Matcher matcher = pattern.matcher(result);
     int count = 0;
     while (matcher.find()) {
-      args = new String[] {"-c", getConnectionFile(), "--traceid", matcher.group(1)};
+      args = new String[] {"-c", getClientPropsFile(), "--traceid", matcher.group(1)};
       pair = cluster.getClusterControl().execWithStdout(TraceDumpExample.class, args);
       count++;
     }
@@ -199,24 +196,6 @@ public void testTrace() throws Exception {
     }
   }
 
-  @Test
-  public void testClasspath() throws Exception {
-    Entry<Integer,String> entry = getCluster().getClusterControl().execWithStdout(Main.class, new String[] {"classpath"});
-    assertEquals(0, entry.getKey().intValue());
-    String result = entry.getValue();
-    int level1 = result.indexOf("Level 1");
-    int level2 = result.indexOf("Level 2");
-    int level3 = result.indexOf("Level 3");
-    int level4 = result.indexOf("Level 4");
-    assertTrue("Level 1 classloader not present.", level1 >= 0);
-    assertTrue("Level 2 classloader not present.", level2 > 0);
-    assertTrue("Level 3 classloader not present.", level3 > 0);
-    assertTrue("Level 4 classloader not present.", level4 > 0);
-    assertTrue(level1 < level2);
-    assertTrue(level2 < level3);
-    assertTrue(level3 < level4);
-  }
-
   @Test
   public void testDirList() throws Exception {
     String[] names = getUniqueNames(3);
@@ -235,7 +214,7 @@ public void testDirList() throws Exception {
     }
     assumeTrue(new File(dirListDirectory).exists());
     // Index a directory listing on /tmp. If this is running against a standalone cluster, we can't guarantee Accumulo source will be there.
-    args = new String[] {"-c", getConnectionFile(), "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable", dataTable, "--vis", visibility,
+    args = new String[] {"-c", getClientPropsFile(), "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable", dataTable, "--vis", visibility,
         "--chunkSize", Integer.toString(10000), dirListDirectory};
 
     Entry<Integer,String> entry = getClusterControl().execWithStdout(Ingest.class, args);
@@ -255,7 +234,7 @@ public void testDirList() throws Exception {
         throw new RuntimeException("Unknown cluster type");
     }
 
-    args = new String[] {"-c", getConnectionFile(), "-t", indexTable, "--auths", auths, "--search", "--path", expectedFile};
+    args = new String[] {"-c", getClientPropsFile(), "-t", indexTable, "--auths", auths, "--search", "--path", expectedFile};
     entry = getClusterControl().execWithStdout(QueryUtil.class, args);
     if (ClusterType.MINI == getClusterType()) {
       MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
@@ -324,36 +303,6 @@ public void testStatsCombiner() throws Exception {
     assertFalse("Iterator had additional results", iter.hasNext());
   }
 
-  @Test
-  public void testBloomFilters() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
-    String[] args = new String[] {"--seed", "7", "-c", getConnectionFile(), "--num", "100000", "--min", "0", "--max", "1000000000", "--size", "50",
-        "--batchMemory", "2M", "--batchLatency", "60", "--batchThreads", "3", "-t", tableName};
-
-    goodExec(RandomBatchWriter.class, args);
-    c.tableOperations().flush(tableName, null, null, true);
-    long diff = 0, diff2 = 0;
-    // try the speed test a couple times in case the system is loaded with other tests
-    for (int i = 0; i < 2; i++) {
-      long now = System.currentTimeMillis();
-      args = new String[] {"--seed", "7", "-c", getConnectionFile(), "--num", "10000", "--min", "0", "--max", "1000000000", "--size", "50", "--scanThreads",
-          "4", "-t", tableName};
-      goodExec(RandomBatchScanner.class, args);
-      diff = System.currentTimeMillis() - now;
-      now = System.currentTimeMillis();
-      args = new String[] {"--seed", "8", "-c", getConnectionFile(), "--num", "10000", "--min", "0", "--max", "1000000000", "--size", "50", "--scanThreads",
-          "4", "-t", tableName};
-      int retCode = getClusterControl().exec(RandomBatchScanner.class, args);
-      assertEquals(1, retCode);
-      diff2 = System.currentTimeMillis() - now;
-      if (diff2 < diff)
-        break;
-    }
-    assertTrue(diff2 < diff);
-  }
-
   @Test
   public void testShardedIndex() throws Exception {
     File src = new File(System.getProperty("user.dir") + "/src");
@@ -376,11 +325,11 @@ public void testShardedIndex() throws Exception {
     }
     assertTrue(thisFile);
 
-    String[] args = new String[] {"-c", getConnectionFile(), "--shardTable", shard, "--doc2Term", index};
+    String[] args = new String[] {"-c", getClientPropsFile(), "--shardTable", shard, "--doc2Term", index};
 
     // create a reverse index
     goodExec(Reverse.class, args);
-    args = new String[] {"-c", getConnectionFile(), "--shardTable", shard, "--doc2Term", index, "--terms", "5", "--count", "1000"};
+    args = new String[] {"-c", getClientPropsFile(), "--shardTable", shard, "--doc2Term", index, "--terms", "5", "--count", "1000"};
     // run some queries
     goodExec(ContinuousQuery.class, args);
   }
@@ -407,22 +356,22 @@ public void testTeraSortAndRead() throws Exception {
     // TODO Figure out a way to run M/R with Kerberos
     assumeTrue(getAdminToken() instanceof PasswordToken);
     String tableName = getUniqueNames(1)[0];
-    String[] args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-c", getConnectionFile(),
+    String[] args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-c", getClientPropsFile(),
         "--splits", "4"};
     goodExec(TeraSortIngest.class, args);
     Path output = new Path(dir, "tmp/nines");
     if (fs.exists(output)) {
       fs.delete(output, true);
     }
-    args = new String[] {"-c", getConnectionFile(), "-t", tableName, "--rowRegex", ".*999.*", "--output", output.toString()};
+    args = new String[] {"-c", getClientPropsFile(), "-t", tableName, "--rowRegex", ".*999.*", "--output", output.toString()};
     goodExec(RegexExample.class, args);
-    args = new String[] {"-c", getConnectionFile(), "-t", tableName, "--column", "c:"};
+    args = new String[] {"-c", getClientPropsFile(), "-t", tableName, "--column", "c:"};
     goodExec(RowHash.class, args);
     output = new Path(dir, "tmp/tableFile");
     if (fs.exists(output)) {
       fs.delete(output, true);
     }
-    args = new String[] {"-c", getConnectionFile(), "-t", tableName, "--output", output.toString()};
+    args = new String[] {"-c", getClientPropsFile(), "-t", tableName, "--output", output.toString()};
     goodExec(TableToFile.class, args);
   }
 
@@ -443,15 +392,14 @@ public void testWordCount() throws Exception {
     }
     fs.copyFromLocalFile(readme, new Path(dir + "/tmp/wc/README.md"));
     String[] args;
-    args = new String[] {"-c", getConnectionFile(), "--input", dir + "/tmp/wc", "-t", tableName};
+    args = new String[] {"-c", getClientPropsFile(), "--input", dir + "/tmp/wc", "-t", tableName};
     goodExec(WordCount.class, args);
   }
 
   @Test
   public void testInsertWithBatchWriterAndReadData() throws Exception {
-    String tableName = getUniqueNames(1)[0];
     String[] args;
-    args = new String[] {"-c", getConnectionFile(), "-t", tableName};
+    args = new String[] {"-c", getClientPropsFile()};
     goodExec(InsertWithBatchWriter.class, args);
     goodExec(ReadData.class, args);
   }
@@ -459,60 +407,35 @@ public void testInsertWithBatchWriterAndReadData() throws Exception {
   @Test
   public void testIsolatedScansWithInterference() throws Exception {
     String[] args;
-    args = new String[] {"-c", getConnectionFile(), "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"};
+    args = new String[] {"-c", getClientPropsFile(), "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"};
     goodExec(InterferenceTest.class, args);
   }
 
   @Test
   public void testScansWithInterference() throws Exception {
     String[] args;
-    args = new String[] {"-c", getConnectionFile(), "-t", getUniqueNames(1)[0], "--iterations", "100000"};
+    args = new String[] {"-c", getClientPropsFile(), "-t", getUniqueNames(1)[0], "--iterations", "100000"};
     goodExec(InterferenceTest.class, args);
   }
 
   @Test
   public void testRowOperations() throws Exception {
-    String[] args;
-    args = new String[] {"-c", getConnectionFile()};
-    goodExec(RowOperations.class, args);
+    goodExec(RowOperations.class, "-c", getClientPropsFile());
   }
 
   @Test
-  public void testBatchWriter() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    String[] args;
-    args = new String[] {"-c", getConnectionFile(), "-t", tableName, "--start", "0", "--num", "100000", "--size", "50", "--batchMemory", "10000000",
-        "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    goodExec(SequentialBatchWriter.class, args);
-
+  public void testSequentialBatchWriter() throws Exception {
+    goodExec(SequentialBatchWriter.class, "-c", getClientPropsFile());
   }
 
   @Test
   public void testReadWriteAndDelete() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    String[] args;
-    args = new String[] {"-c", getConnectionFile(), "--auths", auths, "--table", tableName, "--createtable", "--create"};
-    goodExec(ReadWriteExample.class, args);
-    args = new String[] {"-c", getConnectionFile(), "--auths", auths, "--table", tableName, "--delete"};
-    goodExec(ReadWriteExample.class, args);
-
+    goodExec(ReadWriteExample.class,"-c", getClientPropsFile());
   }
 
   @Test
-  public void testRandomBatchesAndFlush() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    String[] args = new String[] {"-c", getConnectionFile(), "--table", tableName, "--num", "100000", "--min", "0", "--max", "100000", "--size", "100",
-        "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    goodExec(RandomBatchWriter.class, args);
-
-    args = new String[] {"-c", getConnectionFile(), "--table", tableName, "--num", "10000", "--min", "0", "--max", "100000", "--size", "100", "--scanThreads",
-        "4", "--auths", auths};
-    goodExec(RandomBatchScanner.class, args);
-
-    args = new String[] {"-c", getConnectionFile(), "--table", tableName};
-    goodExec(Flush.class, args);
+  public void testRandomBatchScanner() throws Exception {
+    goodExec(RandomBatchScanner.class, "-c", getClientPropsFile());
   }
 
   private void goodExec(Class<?> theClass, String... args) throws InterruptedException, IOException {
diff --git a/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java b/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java
index f080728..4533c9b 100644
--- a/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java
+++ b/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java
@@ -22,9 +22,9 @@
 import java.util.ArrayList;
 import java.util.Map.Entry;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
@@ -43,7 +43,7 @@
 
 public class CountIT extends ConfigurableMacBase {
 
-  private Connector conn;
+  private AccumuloClient client;
   private String tableName;
 
   @Override
@@ -54,9 +54,9 @@ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSit
   @Before
   public void setupInstance() throws Exception {
     tableName = getUniqueNames(1)[0];
-    conn = getConnector();
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    client = getClient();
+    client.tableOperations().create(tableName);
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
     ColumnVisibility cv = new ColumnVisibility();
     // / has 1 dir
     // /local has 2 dirs 1 file
@@ -73,13 +73,13 @@ public void setupInstance() throws Exception {
 
   @Test
   public void test() throws Exception {
-    Scanner scanner = conn.createScanner(tableName, new Authorizations());
+    Scanner scanner = client.createScanner(tableName, new Authorizations());
     scanner.fetchColumn(new Text("dir"), new Text("counts"));
     assertFalse(scanner.iterator().hasNext());
 
     ScannerOpts scanOpts = new ScannerOpts();
     BatchWriterOpts bwOpts = new BatchWriterOpts();
-    FileCount fc = new FileCount(conn, tableName, Authorizations.EMPTY, new ColumnVisibility(), scanOpts, bwOpts);
+    FileCount fc = new FileCount(client,  tableName, Authorizations.EMPTY, new ColumnVisibility(), scanOpts, bwOpts);
     fc.run();
 
     ArrayList<Pair<String,String>> expected = new ArrayList<>();
diff --git a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputFormatIT.java b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputFormatIT.java
index 5ed7d5d..e8e87a9 100644
--- a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputFormatIT.java
+++ b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputFormatIT.java
@@ -29,9 +29,9 @@
 import java.util.List;
 import java.util.Map.Entry;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
@@ -69,14 +69,14 @@ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoo
   private static List<Entry<Key,Value>> data;
   private static List<Entry<Key,Value>> baddata;
 
-  private Connector conn;
+  private AccumuloClient client;
   private String tableName;
 
   @Before
   public void setupInstance() throws Exception {
-    conn = getConnector();
+    client = getAccumuloClient();
     tableName = getUniqueNames(1)[0];
-    conn.securityOperations().changeUserAuthorizations(conn.whoami(), AUTHS);
+    client.securityOperations().changeUserAuthorizations(client.whoami(), AUTHS);
   }
 
   @BeforeClass
@@ -273,8 +273,8 @@ public static int main(String... args) throws Exception {
 
   @Test
   public void test() throws Exception {
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    client.tableOperations().create(tableName);
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     for (Entry<Key,Value> e : data) {
       Key k = e.getKey();
@@ -290,8 +290,8 @@ public void test() throws Exception {
 
   @Test
   public void testErrorOnNextWithoutClose() throws Exception {
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    client.tableOperations().create(tableName);
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     for (Entry<Key,Value> e : data) {
       Key k = e.getKey();
@@ -309,8 +309,8 @@ public void testErrorOnNextWithoutClose() throws Exception {
 
   @Test
   public void testInfoWithoutChunks() throws Exception {
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    client.tableOperations().create(tableName);
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
     for (Entry<Key,Value> e : baddata) {
       Key k = e.getKey();
       Mutation m = new Mutation(k.getRow());
diff --git a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamIT.java b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamIT.java
index e45762a..5720276 100644
--- a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamIT.java
+++ b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamIT.java
@@ -25,11 +25,11 @@
 import java.util.List;
 import java.util.Map.Entry;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
@@ -49,6 +49,7 @@
 import org.junit.Test;
 
 public class ChunkInputStreamIT extends AccumuloClusterHarness {
+
   @Override
   public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
     cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
@@ -56,7 +57,7 @@ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoo
 
   private static final Authorizations AUTHS = new Authorizations("A", "B", "C", "D");
 
-  private Connector conn;
+  private AccumuloClient client;
   private String tableName;
   private List<Entry<Key,Value>> data;
   private List<Entry<Key,Value>> baddata;
@@ -64,9 +65,9 @@ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoo
 
   @Before
   public void setupInstance() throws Exception {
-    conn = getConnector();
+    client = getAccumuloClient();
     tableName = getUniqueNames(1)[0];
-    conn.securityOperations().changeUserAuthorizations(conn.whoami(), AUTHS);
+    client.securityOperations().changeUserAuthorizations(client.whoami(), AUTHS);
   }
 
   @Before
@@ -126,8 +127,8 @@ static void addData(List<Entry<Key,Value>> data, String row, String cf, int chun
 
   @Test
   public void testWithAccumulo() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException {
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+    client.tableOperations().create(tableName);
+    BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     for (Entry<Key,Value> e : data) {
       Key k = e.getKey();
@@ -137,7 +138,7 @@ public void testWithAccumulo() throws AccumuloException, AccumuloSecurityExcepti
     }
     bw.close();
 
-    Scanner scan = conn.createScanner(tableName, AUTHS);
+    Scanner scan = client.createScanner(tableName, AUTHS);
 
     ChunkInputStream cis = new ChunkInputStream();
     byte[] b = new byte[20];
diff --git a/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java b/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
index 744b002..7bf9c86 100644
--- a/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
+++ b/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
@@ -25,11 +25,11 @@
 import java.util.Collections;
 import java.util.Map.Entry;
 
+import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableExistsException;
@@ -71,13 +71,13 @@ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSit
   @Test
   public void test() throws Exception {
     String confFile = System.getProperty("user.dir")+"/target/examples.conf";
-    String instance = getConnector().getInstance().getInstanceName();
-    String keepers = getConnector().getInstance().getZooKeepers();
-    ExamplesIT.writeConnectionFile(confFile, instance, keepers, "root", ROOT_PASSWORD);
-    runTest(confFile, getConnector(), getCluster());
+    String instance = getClientInfo().getInstanceName();
+    String keepers = getClientInfo().getZooKeepers();
+    ExamplesIT.writeClientPropsFile(confFile, instance, keepers, "root", ROOT_PASSWORD);
+    runTest(confFile, getClient(), getCluster());
   }
 
-  static void runTest(String confFile, Connector c, MiniAccumuloClusterImpl cluster) throws AccumuloException, AccumuloSecurityException, TableExistsException,
+  static void runTest(String confFile, AccumuloClient c, MiniAccumuloClusterImpl cluster) throws AccumuloException, AccumuloSecurityException, TableExistsException,
       TableNotFoundException, MutationsRejectedException, IOException, InterruptedException, NoSuchAlgorithmException {
     c.tableOperations().create(tablename);
     BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig());


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

Mime
View raw message