accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vi...@apache.org
Subject svn commit: r1433166 [10/20] - in /accumulo/branches/ACCUMULO-259: ./ assemble/ assemble/platform/ assemble/scripts/ assemble/scripts/init.d/ bin/ conf/examples/1GB/native-standalone/ conf/examples/1GB/standalone/ conf/examples/2GB/native-standalone/ c...
Date Mon, 14 Jan 2013 22:03:34 GMT
Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/Flush.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/Flush.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/Flush.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/Flush.java Mon Jan 14 22:03:24 2013
@@ -16,8 +16,8 @@
  */
 package org.apache.accumulo.examples.simple.client;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 
 /**
  * Simple example for using tableOperations() (like create, delete, flush, etc).
@@ -25,21 +25,11 @@ import org.apache.accumulo.core.client.Z
 public class Flush {
   
   public static void main(String[] args) {
-    if (args.length != 5) {
-      System.err.println("Usage: accumulo accumulo.examples.client.Flush <instance name> <zoo keepers> <username> <password> <tableName>");
-      return;
-    }
-    String instanceName = args[0];
-    String zooKeepers = args[1];
-    String user = args[2];
-    String password = args[3];
-    String table = args[4];
-    
-    Connector connector;
+    ClientOnRequiredTable opts = new ClientOnRequiredTable();
+    opts.parseArgs(Flush.class.getName(), args);
     try {
-      ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-      connector = instance.getConnector(user, password.getBytes());
-      connector.tableOperations().flush(table, null, null, true);
+      Connector connector = opts.getConnector();
+      connector.tableOperations().flush(opts.tableName, null, null, true);
     } catch (Exception e) {
       throw new RuntimeException(e);
     }

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java Mon Jan 14 22:03:24 2013
@@ -20,21 +20,24 @@ import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
 import java.util.Random;
 
+import org.apache.accumulo.core.cli.BatchScannerOpts;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Internal class used to verify validity of data read.
  */
@@ -165,6 +168,19 @@ public class RandomBatchScanner {
     printRowsNotFound(expectedRows);
   }
   
+  public static class Opts  extends ClientOnRequiredTable {
+    @Parameter(names="--min", description="miniumum row that will be generated")
+    long min = 0;
+    @Parameter(names="--max", description="maximum ow that will be generated")
+    long max = 0;
+    @Parameter(names="--num", required=true, description="number of ranges to generate")
+    int num = 0;
+    @Parameter(names="--size", required=true, description="size of the value to write")
+    int size = 0;
+    @Parameter(names="--seed", description="seed for pseudo-random number generator")
+    Long seed = null;
+  }
+  
   /**
    * Scans over a specified number of entries to Accumulo using a {@link BatchScanner}. Completes scans twice to compare times for a fresh query with those for
    * a repeated query which has cached metadata and connections already established.
@@ -175,65 +191,35 @@ public class RandomBatchScanner {
    * @throws TableNotFoundException
    */
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    String seed = null;
-    
-    int index = 0;
-    String processedArgs[] = new String[11];
-    for (int i = 0; i < args.length; i++) {
-      if (args[i].equals("-s")) {
-        seed = args[++i];
-      } else {
-        processedArgs[index++] = args[i];
-      }
-    }
-    
-    if (index != 11) {
-      System.out
-          .println("Usage : RandomBatchScanner [-s <seed>] <instance name> <zoo keepers> <username> <password> <table> <num> <min> <max> <expected value size> <num threads> <auths>");
-      return;
-    }
-    
-    String instanceName = processedArgs[0];
-    String zooKeepers = processedArgs[1];
-    String user = processedArgs[2];
-    byte[] pass = processedArgs[3].getBytes();
-    String table = processedArgs[4];
-    int num = Integer.parseInt(processedArgs[5]);
-    long min = Long.parseLong(processedArgs[6]);
-    long max = Long.parseLong(processedArgs[7]);
-    int expectedValueSize = Integer.parseInt(processedArgs[8]);
-    int numThreads = Integer.parseInt(processedArgs[9]);
-    String auths = processedArgs[10];
-    
-    // Uncomment the following lines for detailed debugging info
-    // Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
-    // logger.setLevel(Level.TRACE);
-    
-    ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    Connector connector = instance.getConnector(user, pass);
-    BatchScanner tsbr = connector.createBatchScanner(table, new Authorizations(auths.split(",")), numThreads);
+    Opts opts = new Opts();
+    BatchScannerOpts bsOpts = new BatchScannerOpts();
+    opts.parseArgs(RandomBatchScanner.class.getName(), args, bsOpts);
+    
+    Connector connector = opts.getConnector();
+    BatchScanner batchReader = connector.createBatchScanner(opts.tableName, opts.auths, bsOpts.scanThreads);
+    batchReader.setTimeout(bsOpts.scanTimeout, TimeUnit.MILLISECONDS);
     
     Random r;
-    if (seed == null)
+    if (opts.seed == null)
       r = new Random();
     else
-      r = new Random(Long.parseLong(seed));
+      r = new Random(opts.seed);
     
     // do one cold
-    doRandomQueries(num, min, max, expectedValueSize, r, tsbr);
+    doRandomQueries(opts.num, opts.min, opts.max, opts.size, r, batchReader);
     
     System.gc();
     System.gc();
     System.gc();
     
-    if (seed == null)
+    if (opts.seed == null)
       r = new Random();
     else
-      r = new Random(Long.parseLong(seed));
+      r = new Random(opts.seed);
     
     // do one hot (connections already established, metadata table cached)
-    doRandomQueries(num, min, max, expectedValueSize, r, tsbr);
+    doRandomQueries(opts.num, opts.min, opts.max, opts.size, r, batchReader);
     
-    tsbr.close();
+    batchReader.close();
   }
 }

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java Mon Jan 14 22:03:24 2013
@@ -16,22 +16,29 @@
  */
 package org.apache.accumulo.examples.simple.client;
 
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Random;
+import java.util.Set;
+import java.util.Map.Entry;
 
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.security.thrift.SecurityErrorCode;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Simple example for writing random data to Accumulo. See docs/examples/README.batch for instructions.
  * 
@@ -88,6 +95,21 @@ public class RandomBatchWriter {
     return m;
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--num", required=true)
+    int num = 0;
+    @Parameter(names="--min")
+    long min = 0;
+    @Parameter(names="--max")
+    long max = Long.MAX_VALUE;
+    @Parameter(names="--size", required=true, description="size of the value to write")
+    int size = 0;
+    @Parameter(names="--vis", converter=VisibilityConverter.class)
+    ColumnVisibility visiblity = new ColumnVisibility("");
+    @Parameter(names="--seed", description="seed for pseudo-random number generator")
+    Long seed = null;
+  }
+ 
   /**
    * Writes a specified number of entries to Accumulo using a {@link BatchWriter}.
    * 
@@ -96,62 +118,28 @@ public class RandomBatchWriter {
    * @throws TableNotFoundException
    */
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    
-    String seed = null;
-    
-    int index = 0;
-    String processedArgs[] = new String[13];
-    for (int i = 0; i < args.length; i++) {
-      if (args[i].equals("-s")) {
-        seed = args[++i];
-      } else {
-        processedArgs[index++] = args[i];
-      }
-    }
-    
-    if (index != 13) {
-      System.out
-          .println("Usage : RandomBatchWriter [-s <seed>] <instance name> <zoo keepers> <username> <password> <table> <num> <min> <max> <value size> <max memory> <max latency> <num threads> <visibility>");
-      return;
-    }
-    
-    String instanceName = processedArgs[0];
-    String zooKeepers = processedArgs[1];
-    String user = processedArgs[2];
-    byte[] pass = processedArgs[3].getBytes();
-    String table = processedArgs[4];
-    int num = Integer.parseInt(processedArgs[5]);
-    long min = Long.parseLong(processedArgs[6]);
-    long max = Long.parseLong(processedArgs[7]);
-    int valueSize = Integer.parseInt(processedArgs[8]);
-    long maxMemory = Long.parseLong(processedArgs[9]);
-    long maxLatency = Long.parseLong(processedArgs[10]) == 0 ? Long.MAX_VALUE : Long.parseLong(processedArgs[10]);
-    int numThreads = Integer.parseInt(processedArgs[11]);
-    String visiblity = processedArgs[12];
-    
-    // Uncomment the following lines for detailed debugging info
-    // Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
-    // logger.setLevel(Level.TRACE);
+    Opts opts = new Opts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
     
     Random r;
-    if (seed == null)
+    if (opts.seed == null)
       r = new Random();
     else {
-      r = new Random(Long.parseLong(seed));
+      r = new Random(opts.seed);
     }
     
-    ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    Connector connector = instance.getConnector(user, pass);
-    BatchWriter bw = connector.createBatchWriter(table, maxMemory, maxLatency, numThreads);
+    Connector connector = opts.getConnector();
+    BatchWriter bw = connector.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());
     
     // reuse the ColumnVisibility object to improve performance
-    ColumnVisibility cv = new ColumnVisibility(visiblity);
+    ColumnVisibility cv = opts.visiblity;
     
-    for (int i = 0; i < num; i++) {
+    for (int i = 0; i < opts.num; i++) {
       
-      long rowid = (Math.abs(r.nextLong()) % (max - min)) + min;
+      long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) + opts.min;
       
-      Mutation m = createMutation(rowid, valueSize, cv);
+      Mutation m = createMutation(rowid, opts.size, cv);
       
       bw.addMutation(m);
       
@@ -161,9 +149,14 @@ public class RandomBatchWriter {
       bw.close();
     } catch (MutationsRejectedException e) {
       if (e.getAuthorizationFailures().size() > 0) {
-        HashSet<String> tables = new HashSet<String>();
-        for (KeyExtent ke : e.getAuthorizationFailures()) {
-          tables.add(ke.getTableId().toString());
+        HashMap<String,Set<SecurityErrorCode>> tables = new HashMap<String,Set<SecurityErrorCode>>();
+        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke : e.getAuthorizationFailures().entrySet()) {
+          Set<SecurityErrorCode> secCodes = tables.get(ke.getKey().getTableId().toString());
+          if (secCodes == null) {
+            secCodes = new HashSet<SecurityErrorCode>();
+            tables.put(ke.getKey().getTableId().toString(), secCodes);
+          }
+          secCodes.addAll(ke.getValue());
         }
         System.err.println("ERROR : Not authorized to write to tables : " + tables);
       }

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/ReadWriteExample.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/ReadWriteExample.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/ReadWriteExample.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/ReadWriteExample.java Mon Jan 14 22:03:24 2013
@@ -20,183 +20,109 @@ import java.util.Map.Entry;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.cli.ClientOnDefaultTable;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.io.Text;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
+
+import com.beust.jcommander.Parameter;
 
 public class ReadWriteExample {
   // defaults
-  private static final String DEFAULT_INSTANCE_NAME = "test";
-  private static final String DEFAULT_ZOOKEEPERS = "localhost";
   private static final String DEFAULT_AUTHS = "LEVEL1,GROUP1";
   private static final String DEFAULT_TABLE_NAME = "test";
   
-  // options
-  private Option instanceOpt = new Option("i", "instance", true, "instance name");
-  private Option zooKeepersOpt = new Option("z", "zooKeepers", true, "zoo keepers");
-  
-  private Option usernameOpt = new Option("u", "user", true, "user name");
-  private Option passwordOpt = new Option("p", "password", true, "password");
-  private Option scanAuthsOpt = new Option("s", "scanauths", true, "comma-separated scan authorizations");
-  
-  private Option tableNameOpt = new Option("t", "table", true, "table name");
-  private Option createtableOpt = new Option("C", "createtable", false, "create table before doing anything");
-  private Option deletetableOpt = new Option("D", "deletetable", false, "delete table when finished");
-  
-  private Option createEntriesOpt = new Option("e", "create", false, "create entries before any deletes");
-  private Option deleteEntriesOpt = new Option("d", "delete", false, "delete entries after any creates");
-  private Option readEntriesOpt = new Option("r", "read", false, "read entries after any creates/deletes");
-  
-  private Option debugOpt = new Option("dbg", "debug", false, "enable debugging");
-  
-  private Options opts;
-  private CommandLine cl;
   private Connector conn;
   
-  // hidden constructor
-  private ReadWriteExample() {}
-  
-  // setup
-  private void configure(String[] args) throws ParseException, AccumuloException, AccumuloSecurityException {
-    usernameOpt.setRequired(true);
-    passwordOpt.setRequired(true);
-    opts = new Options();
-    addOptions(instanceOpt, zooKeepersOpt, usernameOpt, passwordOpt, scanAuthsOpt, tableNameOpt, createtableOpt, deletetableOpt, createEntriesOpt,
-        deleteEntriesOpt, readEntriesOpt, debugOpt);
-    
-    // parse command line
-    cl = new BasicParser().parse(opts, args);
-    if (cl.getArgs().length != 0)
-      throw new ParseException("unrecognized options " + cl.getArgList());
-    
-    // optionally enable debugging
-    if (hasOpt(debugOpt))
-      Logger.getLogger(Constants.CORE_PACKAGE_NAME).setLevel(Level.TRACE);
-    
-    Instance inst = new ZooKeeperInstance(getOpt(instanceOpt, DEFAULT_INSTANCE_NAME), getOpt(zooKeepersOpt, DEFAULT_ZOOKEEPERS));
-    conn = inst.getConnector(getRequiredOpt(usernameOpt), getRequiredOpt(passwordOpt).getBytes());
-  }
-  
-  // for setup
-  private void addOptions(Option... addOpts) {
-    for (Option opt : addOpts)
-      opts.addOption(opt);
-  }
-  
-  // for checking for and getting options
-  private boolean hasOpt(Option opt) {
-    return cl.hasOption(opt.getOpt());
-  }
-  
-  private String getRequiredOpt(Option opt) {
-    return getOpt(opt, null);
-  }
-  
-  private String getOpt(Option opt, String defaultValue) {
-    return cl.getOptionValue(opt.getOpt(), defaultValue);
+  static class Opts extends ClientOnDefaultTable {
+    @Parameter(names={"-C", "--createtable"}, description="create table before doing anything")
+    boolean createtable = false;
+    @Parameter(names={"-D", "--deletetable"}, description="delete table when finished")
+    boolean deletetable = false;
+    @Parameter(names={"-c", "--create"}, description="create entries before any deletes")
+    boolean createEntries = false;
+    @Parameter(names={"-r", "--read"}, description="read entries after any creates/deletes")
+    boolean readEntries = false;
+    @Parameter(names={"-d", "--delete"}, description="delete entries after any creates")
+    boolean deleteEntries = false;
+    
+    public Opts() { 
+      super(DEFAULT_TABLE_NAME); 
+      auths = new Authorizations(DEFAULT_AUTHS.split(","));
+    }
   }
   
-  // for usage
-  private void printHelp() {
-    HelpFormatter hf = new HelpFormatter();
-    instanceOpt.setArgName("name");
-    zooKeepersOpt.setArgName("hosts");
-    usernameOpt.setArgName("user");
-    passwordOpt.setArgName("pass");
-    scanAuthsOpt.setArgName("scanauths");
-    tableNameOpt.setArgName("name");
-    hf.printHelp("accumulo accumulo.examples.client.ReadWriteExample", opts, true);
-  }
+  // hidden constructor
+  private ReadWriteExample() {}
   
-  private void execute() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, MutationsRejectedException {
+  private void execute(Opts opts, ScannerOpts scanOpts) throws Exception {
+    conn = opts.getConnector();
     // create table
-    if (hasOpt(createtableOpt)) {
+    if (opts.createtable) {
       SortedSet<Text> partitionKeys = new TreeSet<Text>();
       for (int i = Byte.MIN_VALUE; i < Byte.MAX_VALUE; i++)
         partitionKeys.add(new Text(new byte[] {(byte) i}));
-      conn.tableOperations().create(getOpt(tableNameOpt, DEFAULT_TABLE_NAME));
-      conn.tableOperations().addSplits(getOpt(tableNameOpt, DEFAULT_TABLE_NAME), partitionKeys);
+      conn.tableOperations().create(opts.getTableName());
+      conn.tableOperations().addSplits(opts.getTableName(), partitionKeys);
     }
-    
-    // create entries
-    if (hasOpt(createEntriesOpt))
-      createEntries(false);
-    
-    // delete entries
-    if (hasOpt(deleteEntriesOpt))
-      createEntries(true);
+
+    // send mutations
+    createEntries(opts);
     
     // read entries
-    if (hasOpt(readEntriesOpt)) {
+    if (opts.readEntries) {
       // Note that the user needs to have the authorizations for the specified scan authorizations
       // by an administrator first
-      Authorizations scanauths = new Authorizations(getOpt(scanAuthsOpt, DEFAULT_AUTHS).split(","));
-      
-      Scanner scanner = conn.createScanner(getOpt(tableNameOpt, DEFAULT_TABLE_NAME), scanauths);
+      Scanner scanner = conn.createScanner(opts.getTableName(), opts.auths);
+      scanner.setBatchSize(scanOpts.scanBatchSize);
       for (Entry<Key,Value> entry : scanner)
         System.out.println(entry.getKey().toString() + " -> " + entry.getValue().toString());
     }
     
     // delete table
-    if (hasOpt(deletetableOpt))
-      conn.tableOperations().delete(getOpt(tableNameOpt, DEFAULT_TABLE_NAME));
+    if (opts.deletetable)
+      conn.tableOperations().delete(opts.getTableName());
   }
   
-  private void createEntries(boolean delete) throws AccumuloException, TableNotFoundException, MutationsRejectedException {
-    BatchWriter writer = conn.createBatchWriter(getOpt(tableNameOpt, DEFAULT_TABLE_NAME), 10000, Long.MAX_VALUE, 1);
-    ColumnVisibility cv = new ColumnVisibility(DEFAULT_AUTHS.replace(',', '|'));
-    
-    Text cf = new Text("datatypes");
-    Text cq = new Text("xml");
-    byte[] row = {'h', 'e', 'l', 'l', 'o', '\0'};
-    byte[] value = {'w', 'o', 'r', 'l', 'd', '\0'};
-    
-    for (int i = 0; i < 10; i++) {
-      row[row.length - 1] = (byte) i;
-      Mutation m = new Mutation(new Text(row));
-      if (delete) {
-        m.putDelete(cf, cq, cv);
-      } else {
-        value[value.length - 1] = (byte) i;
-        m.put(cf, cq, cv, new Value(value));
+  private void createEntries(Opts opts) throws Exception {
+    if (opts.createEntries || opts.deleteEntries) {
+      BatchWriter writer = conn.createBatchWriter(opts.getTableName(), new BatchWriterConfig());
+      ColumnVisibility cv = new ColumnVisibility(opts.auths.toString().replace(',', '|'));
+      
+      Text cf = new Text("datatypes");
+      Text cq = new Text("xml");
+      byte[] row = {'h', 'e', 'l', 'l', 'o', '\0'};
+      byte[] value = {'w', 'o', 'r', 'l', 'd', '\0'};
+      
+      for (int i = 0; i < 10; i++) {
+        row[row.length - 1] = (byte) i;
+        Mutation m = new Mutation(new Text(row));
+        if (opts.deleteEntries) {
+          m.putDelete(cf, cq, cv);
+        }
+        if (opts.createEntries) {
+          value[value.length - 1] = (byte) i;
+          m.put(cf, cq, cv, new Value(value));
+        }
+        writer.addMutation(m);
       }
-      writer.addMutation(m);
+      writer.close();
     }
-    writer.close();
   }
   
   public static void main(String[] args) throws Exception {
     ReadWriteExample rwe = new ReadWriteExample();
-    
-    try {
-      rwe.configure(args);
-      rwe.execute();
-    } catch (ParseException e) {
-      System.err.println(e.getMessage());
-      rwe.printHelp();
-      System.exit(1);
-    }
+    Opts opts = new Opts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    opts.parseArgs(ReadWriteExample.class.getName(), args, scanOpts);
+    rwe.execute(opts, scanOpts);
   }
 }

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java Mon Jan 14 22:03:24 2013
@@ -20,6 +20,9 @@ import java.io.IOException;
 import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -28,7 +31,6 @@ import org.apache.accumulo.core.client.M
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
@@ -49,13 +51,14 @@ public class RowOperations {
   
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException,
       MutationsRejectedException {
-    if (args.length != 4) {
-      log.error("Usage: <instance name> <zoo keepers> <username> <password>");
-      return;
-    }
+    
+    ClientOpts opts = new ClientOpts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(RowOperations.class.getName(), args, scanOpts, bwOpts);
     
     // First the setup work
-    connector = new ZooKeeperInstance(args[0], args[1]).getConnector(args[2], args[3].getBytes());
+    connector = opts.getConnector();
     
     // lets create an example table
     connector.tableOperations().create(table);
@@ -93,7 +96,7 @@ public class RowOperations {
     mut3.put(new Text("column"), col4, System.currentTimeMillis(), new Value("This is the value for this key".getBytes()));
     
     // Now we'll make a Batch Writer
-    bw = connector.createBatchWriter(table, 100000l, 30l, 1);
+    bw = connector.createBatchWriter(table, bwOpts.getBatchWriterConfig());
     
     // And add the mutations
     bw.addMutation(mut1);
@@ -104,9 +107,9 @@ public class RowOperations {
     bw.flush();
     
     // Now lets look at the rows
-    Scanner rowThree = getRow(new Text("row3"));
-    Scanner rowTwo = getRow(new Text("row2"));
-    Scanner rowOne = getRow(new Text("row1"));
+    Scanner rowThree = getRow(opts, scanOpts, new Text("row3"));
+    Scanner rowTwo = getRow(opts, scanOpts, new Text("row2"));
+    Scanner rowOne = getRow(opts, scanOpts, new Text("row1"));
     
     // And print them
     log.info("This is everything");
@@ -116,13 +119,13 @@ public class RowOperations {
     System.out.flush();
     
     // Now lets delete rowTwo with the iterator
-    rowTwo = getRow(new Text("row2"));
+    rowTwo = getRow(opts, scanOpts, new Text("row2"));
     deleteRow(rowTwo);
     
     // Now lets look at the rows again
-    rowThree = getRow(new Text("row3"));
-    rowTwo = getRow(new Text("row2"));
-    rowOne = getRow(new Text("row1"));
+    rowThree = getRow(opts, scanOpts, new Text("row3"));
+    rowTwo = getRow(opts, scanOpts, new Text("row2"));
+    rowOne = getRow(opts, scanOpts, new Text("row1"));
     
     // And print them
     log.info("This is row1 and row3");
@@ -134,12 +137,12 @@ public class RowOperations {
     // Should only see the two rows
     // Now lets delete rowOne without passing in the iterator
     
-    deleteRow(row1);
+    deleteRow(opts, scanOpts, row1);
     
     // Now lets look at the rows one last time
-    rowThree = getRow(new Text("row3"));
-    rowTwo = getRow(new Text("row2"));
-    rowOne = getRow(new Text("row1"));
+    rowThree = getRow(opts, scanOpts, new Text("row3"));
+    rowTwo = getRow(opts, scanOpts, new Text("row2"));
+    rowOne = getRow(opts, scanOpts, new Text("row1"));
     
     // And print them
     log.info("This is just row3");
@@ -163,14 +166,15 @@ public class RowOperations {
   
   /**
    * Deletes a row given a text object
+   * @param opts 
    * 
    * @param row
    * @throws TableNotFoundException
    * @throws AccumuloSecurityException
    * @throws AccumuloException
    */
-  private static void deleteRow(Text row) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    deleteRow(getRow(row));
+  private static void deleteRow(ClientOpts opts, ScannerOpts scanOpts, Text row) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    deleteRow(getRow(opts, scanOpts, row));
   }
   
   /**
@@ -205,6 +209,7 @@ public class RowOperations {
   
   /**
    * Gets a scanner over one row
+   * @param opts 
    * 
    * @param row
    * @return
@@ -213,9 +218,10 @@ public class RowOperations {
    * @throws AccumuloException
    * @throws IOException
    */
-  private static Scanner getRow(Text row) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+  private static Scanner getRow(ClientOpts opts, ScannerOpts scanOpts, Text row) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     // Create a scanner
     Scanner scanner = connector.createScanner(table, Constants.NO_AUTHS);
+    scanner.setBatchSize(scanOpts.scanBatchSize);
     // Say start key is the one with key of row
     // and end key is the one that immediately follows the row
     scanner.setRange(new Range(row));

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java Mon Jan 14 22:03:24 2013
@@ -16,20 +16,35 @@
  */
 package org.apache.accumulo.examples.simple.client;
 
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.security.ColumnVisibility;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Simple example for writing random data in sequential order to Accumulo. See docs/examples/README.batch for instructions.
  */
 public class SequentialBatchWriter {
+  
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--start")
+    long start = 0;
+    @Parameter(names="--num", required=true)
+    long num = 0;
+    @Parameter(names="--size", required=true, description="size of the value to write")
+    int valueSize = 0;
+    @Parameter(names="--vis", converter=VisibilityConverter.class)
+    ColumnVisibility vis = new ColumnVisibility();
+  }
+  
   /**
    * Writes a specified number of entries to Accumulo using a {@link BatchWriter}. The rows of the entries will be sequential starting at a specified number.
    * The column families will be "foo" and column qualifiers will be "1". The values will be random byte arrays of a specified size.
@@ -40,40 +55,16 @@ public class SequentialBatchWriter {
    * @throws MutationsRejectedException
    */
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
-    if (args.length != 12) {
-      System.out
-          .println("Usage : SequentialBatchWriter <instance name> <zoo keepers> <username> <password> <table> <start> <num> <value size> <max memory> <max latency> <num threads> <visibility>");
-      return;
-    }
-    
-    String instanceName = args[0];
-    String zooKeepers = args[1];
-    String user = args[2];
-    byte[] pass = args[3].getBytes();
-    String table = args[4];
-    long start = Long.parseLong(args[5]);
-    long num = Long.parseLong(args[6]);
-    int valueSize = Integer.parseInt(args[7]);
-    long maxMemory = Long.parseLong(args[8]);
-    long maxLatency = Long.parseLong(args[9]) == 0 ? Long.MAX_VALUE : Long.parseLong(args[9]);
-    int numThreads = Integer.parseInt(args[10]);
-    String visibility = args[11];
-    
-    // Uncomment the following lines for detailed debugging info
-    // Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
-    // logger.setLevel(Level.TRACE);
-    
-    ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    Connector connector = instance.getConnector(user, pass);
-    BatchWriter bw = connector.createBatchWriter(table, maxMemory, maxLatency, numThreads);
-    
-    long end = start + num;
+    Opts opts = new Opts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(SequentialBatchWriter.class.getName(), args, bwOpts);
+    Connector connector = opts.getConnector();
+    BatchWriter bw = connector.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());
     
-    // reuse ColumnVisibility object for better performance
-    ColumnVisibility cv = new ColumnVisibility(visibility);
+    long end = opts.start + opts.num;
     
-    for (long i = start; i < end; i++) {
-      Mutation m = RandomBatchWriter.createMutation(i, valueSize, cv);
+    for (long i = opts.start; i < end; i++) {
+      Mutation m = RandomBatchWriter.createMutation(i, opts.valueSize, opts.vis);
       bw.addMutation(m);
     }
     

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java Mon Jan 14 22:03:24 2013
@@ -19,20 +19,21 @@ package org.apache.accumulo.examples.sim
 import java.util.Iterator;
 import java.util.Map.Entry;
 
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Computes recursive counts over file system information and stores them back into the same Accumulo table. See docs/examples/README.dirlist for instructions.
  */
@@ -41,10 +42,10 @@ public class FileCount {
   private int entriesScanned;
   private int inserts;
   
-  private Connector conn;
-  private Authorizations auths;
-  private ColumnVisibility colvis;
-  private String table;
+  private Opts opts;
+  private ScannerOpts scanOpts;
+  private BatchWriterOpts bwOpts;
+  
   
   private static class CountValue {
     int dirCount = 0;
@@ -171,7 +172,7 @@ public class FileCount {
   
   private Mutation createMutation(int depth, String dir, CountValue countVal) {
     Mutation m = new Mutation(String.format("%03d%s", depth, dir));
-    m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, colvis, countVal.toValue());
+    m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, opts.visibility, countVal.toValue());
     return m;
   }
   
@@ -201,7 +202,6 @@ public class FileCount {
       } else if (!currentDir.equals(dir)) {
         batchWriter.addMutation(createMutation(depth - 1, currentDir, countVal));
         inserts++;
-        
         currentDir = dir;
         countVal.clear();
       }
@@ -215,7 +215,7 @@ public class FileCount {
           // in this case the higher depth will not insert anything if the
           // dir has no children, so insert something here
           Mutation m = new Mutation(key.getRow());
-          m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, colvis, tmpCount.toValue());
+          m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, opts.visibility, tmpCount.toValue());
           batchWriter.addMutation(m);
           inserts++;
         }
@@ -234,20 +234,10 @@ public class FileCount {
     }
   }
   
-  FileCount(String instance, String zookeepers, String user, String password, String table, String auths, String colvis, boolean mock) throws Exception {
-    Instance inst;
-    if (mock) {
-      inst = new MockInstance(instance);
-    } else {
-      inst = new ZooKeeperInstance(instance, zookeepers);
-    }
-    this.conn = inst.getConnector(user, password);
-    if (auths.length() > 0)
-      this.auths = new Authorizations(auths.split(","));
-    else
-      this.auths = new Authorizations();
-    this.colvis = new ColumnVisibility(colvis);
-    this.table = table;
+  FileCount(Opts opts, ScannerOpts scanOpts, BatchWriterOpts bwOpts) throws Exception {
+    this.opts = opts;
+    this.scanOpts = scanOpts;
+    this.bwOpts = bwOpts;
   }
   
   public void run() throws Exception {
@@ -255,8 +245,10 @@ public class FileCount {
     entriesScanned = 0;
     inserts = 0;
     
-    Scanner scanner = conn.createScanner(table, auths);
-    BatchWriter bw = conn.createBatchWriter(table, 10000000, 60000l, 3);
+    Connector conn = opts.getConnector();
+    Scanner scanner = conn.createScanner(opts.tableName, opts.auths);
+    scanner.setBatchSize(scanOpts.scanBatchSize);
+    BatchWriter bw = conn.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());
     
     long t1 = System.currentTimeMillis();
     
@@ -274,21 +266,26 @@ public class FileCount {
     
     long t3 = System.currentTimeMillis();
     
-    System.out.printf("Max depth              : %d\n", depth);
-    System.out.printf("Time to find max depth : %,d ms\n", (t2 - t1));
-    System.out.printf("Time to compute counts : %,d ms\n", (t3 - t2));
-    System.out.printf("Entries scanned        : %,d \n", entriesScanned);
-    System.out.printf("Counts inserted        : %,d \n", inserts);
+    System.out.printf("Max depth              : %d%n", depth);
+    System.out.printf("Time to find max depth : %,d ms%n", (t2 - t1));
+    System.out.printf("Time to compute counts : %,d ms%n", (t3 - t2));
+    System.out.printf("Entries scanned        : %,d %n", entriesScanned);
+    System.out.printf("Counts inserted        : %,d %n", inserts);
+  }
+  
+  public static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--vis", description="use a given visibility for the new counts", converter=VisibilityConverter.class)
+    ColumnVisibility visibility = new ColumnVisibility();
   }
   
   public static void main(String[] args) throws Exception {
-    if (args.length != 7) {
-      System.out.println("usage: " + FileCount.class.getSimpleName() + " <instance> <zookeepers> <user> <pass> <table> <auths> <colvis>");
-      System.exit(1);
-    }
-    
-    FileCount fileCount = new FileCount(args[0], args[1], args[2], args[3], args[4], args[5], args[6], false);
-    
+    Opts opts = new Opts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    String programName = FileCount.class.getName();
+    opts.parseArgs(programName, args, scanOpts, bwOpts);
+
+    FileCount fileCount = new FileCount(opts, scanOpts, bwOpts);
     fileCount.run();
   }
 }

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Ingest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Ingest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Ingest.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Ingest.java Mon Jan 14 22:03:24 2013
@@ -18,11 +18,14 @@ package org.apache.accumulo.examples.sim
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOpts;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.LongCombiner;
@@ -32,6 +35,8 @@ import org.apache.accumulo.examples.simp
 import org.apache.accumulo.examples.simple.filedata.FileDataIngest;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Recursively lists the files and directories under a given path, ingests their names and file info into one Accumulo table, indexes the file names in a
  * separate table, and the file data into a third table. See docs/examples/README.dirlist for instructions.
@@ -115,50 +120,53 @@ public class Ingest {
     }
   }
   
+  static class Opts extends ClientOpts {
+    @Parameter(names="--dirTable", description="a table to hold the directory information")
+    String nameTable = "dirTable";
+    @Parameter(names="--indexTable", description="an index over the ingested data")
+    String indexTable = "indexTable";
+    @Parameter(names="--dataTable", description="the file data, chunked into parts")
+    String dataTable = "dataTable";
+    @Parameter(names="--vis", description="the visibility to mark the data", converter=VisibilityConverter.class)
+    ColumnVisibility visibility = new ColumnVisibility();
+    @Parameter(names="--chunkSize", description="the size of chunks when breaking down files")
+    int chunkSize = 100000;
+    @Parameter(description="<dir> { <dir> ... }")
+    List<String> directories = new ArrayList<String>();
+  }
+  
+  
   public static void main(String[] args) throws Exception {
-    if (args.length < 10) {
-      System.out.println("usage: " + Ingest.class.getSimpleName()
-          + " <instance> <zoo> <user> <pass> <dir table> <index table> <data table> <visibility> <data chunk size> <dir>{ <dir>}");
-      System.exit(1);
-    }
-    
-    String instance = args[0];
-    String zooKeepers = args[1];
-    String user = args[2];
-    String pass = args[3];
-    String nameTable = args[4];
-    String indexTable = args[5];
-    String dataTable = args[6];
-    ColumnVisibility colvis = new ColumnVisibility(args[7]);
-    int chunkSize = Integer.parseInt(args[8]);
-    
-    Connector conn = new ZooKeeperInstance(instance, zooKeepers).getConnector(user, pass.getBytes());
-    if (!conn.tableOperations().exists(nameTable))
-      conn.tableOperations().create(nameTable);
-    if (!conn.tableOperations().exists(indexTable))
-      conn.tableOperations().create(indexTable);
-    if (!conn.tableOperations().exists(dataTable)) {
-      conn.tableOperations().create(dataTable);
-      conn.tableOperations().attachIterator(dataTable, new IteratorSetting(1, ChunkCombiner.class));
-    }
-    
-    BatchWriter dirBW = conn.createBatchWriter(nameTable, 50000000, 300000l, 4);
-    BatchWriter indexBW = conn.createBatchWriter(indexTable, 50000000, 300000l, 4);
-    BatchWriter dataBW = conn.createBatchWriter(dataTable, 50000000, 300000l, 4);
-    FileDataIngest fdi = new FileDataIngest(chunkSize, colvis);
-    for (int i = 9; i < args.length; i++) {
-      recurse(new File(args[i]), colvis, dirBW, indexBW, fdi, dataBW);
+    Opts opts = new Opts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(Ingest.class.getName(), args, bwOpts);
+    
+    Connector conn = opts.getConnector();
+    if (!conn.tableOperations().exists(opts.nameTable))
+      conn.tableOperations().create(opts.nameTable);
+    if (!conn.tableOperations().exists(opts.indexTable))
+      conn.tableOperations().create(opts.indexTable);
+    if (!conn.tableOperations().exists(opts.dataTable)) {
+      conn.tableOperations().create(opts.dataTable);
+      conn.tableOperations().attachIterator(opts.dataTable, new IteratorSetting(1, ChunkCombiner.class));
+    }
+    
+    BatchWriter dirBW = conn.createBatchWriter(opts.nameTable, bwOpts.getBatchWriterConfig());
+    BatchWriter indexBW = conn.createBatchWriter(opts.indexTable, bwOpts.getBatchWriterConfig());
+    BatchWriter dataBW = conn.createBatchWriter(opts.dataTable, bwOpts.getBatchWriterConfig());
+    FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
+    for (String dir : opts.directories) {
+      recurse(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
       
       // fill in parent directory info
-      String file = args[i];
       int slashIndex = -1;
-      while ((slashIndex = file.lastIndexOf("/")) > 0) {
-        file = file.substring(0, slashIndex);
-        ingest(new File(file), colvis, dirBW, indexBW, fdi, dataBW);
+      while ((slashIndex = dir.lastIndexOf("/")) > 0) {
+        dir = dir.substring(0, slashIndex);
+        ingest(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
       }
     }
-    ingest(new File("/"), colvis, dirBW, indexBW, fdi, dataBW);
-    
+    ingest(new File("/"), opts.visibility, dirBW, indexBW, fdi, dataBW);
+
     dirBW.close();
     indexBW.close();
     dataBW.close();

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/QueryUtil.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/QueryUtil.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/QueryUtil.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/QueryUtil.java Mon Jan 14 22:03:24 2013
@@ -20,13 +20,13 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.TreeMap;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
@@ -34,6 +34,8 @@ import org.apache.accumulo.core.iterator
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Provides utility methods for getting the info for a file, listing the contents of a directory, and performing single wild card searches on file or directory
  * names. See docs/examples/README.dirlist for instructions.
@@ -48,12 +50,11 @@ public class QueryUtil {
   public static final Text INDEX_COLF = new Text("i");
   public static final Text COUNTS_COLQ = new Text("counts");
   
-  public QueryUtil(String instanceName, String zooKeepers, String user, String password, String tableName, Authorizations auths) throws AccumuloException,
+  public QueryUtil(Opts opts) throws AccumuloException,
       AccumuloSecurityException {
-    ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    conn = instance.getConnector(user, password.getBytes());
-    this.tableName = tableName;
-    this.auths = auths;
+    conn = opts.getConnector();
+    this.tableName = opts.tableName;
+    this.auths = opts.auths;
   }
   
   /**
@@ -252,6 +253,13 @@ public class QueryUtil {
     return scanner;
   }
   
+  public static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--path", description="the directory to list")
+    String path = "/";
+    @Parameter(names="--search", description="find a file or directorys with the given name")
+    boolean search = false;
+  }
+  
   /**
    * Lists the contents of a directory using the directory table, or searches for file or directory names (if the -search flag is included).
    * 
@@ -259,18 +267,17 @@ public class QueryUtil {
    * @throws Exception
    */
   public static void main(String[] args) throws Exception {
-    if (args.length != 7 && (args.length != 8 || !args[7].equals("-search"))) {
-      System.out.println("usage: " + QueryUtil.class.getSimpleName() + " <instance> <zookeepers> <user> <pass> <table> <auths> <path> [-search]");
-      System.exit(1);
-    }
-    QueryUtil q = new QueryUtil(args[0], args[1], args[2], args[3], args[4], new Authorizations(args[5].split(",")));
-    if (args.length == 8) {
-      for (Entry<Key,Value> e : q.singleWildCardSearch(args[6])) {
+    Opts opts = new Opts();
+    opts.parseArgs(QueryUtil.class.getName(), args);
+    QueryUtil q = new QueryUtil(opts);
+    if (opts.search) {
+      for (Entry<Key,Value> e : q.singleWildCardSearch(opts.path)) {
         System.out.println(e.getKey().getColumnQualifier());
       }
-    }
-    for (Entry<String,Map<String,String>> e : q.getDirList(args[6]).entrySet()) {
-      System.out.println(e);
+    } else {
+      for (Entry<String,Map<String,String>> e : q.getDirList(opts.path).entrySet()) {
+        System.out.println(e);
+      }
     }
   }
 }

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Viewer.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Viewer.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Viewer.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Viewer.java Mon Jan 14 22:03:24 2013
@@ -36,10 +36,11 @@ import javax.swing.tree.DefaultTreeModel
 import javax.swing.tree.TreePath;
 
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.examples.simple.filedata.FileDataQuery;
 import org.apache.log4j.Logger;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Provides a GUI for browsing the file system information stored in Accumulo. See docs/examples/README.dirlist for instructions.
  */
@@ -96,14 +97,14 @@ public class Viewer extends JFrame imple
     }
   }
   
-  public Viewer(String instanceName, String zooKeepers, String user, String password, String tableName, String dataTableName, Authorizations auths, String path)
+  public Viewer(Opts opts)
       throws Exception {
     super("File Viewer");
     setSize(1000, 800);
     setDefaultCloseOperation(EXIT_ON_CLOSE);
-    q = new QueryUtil(instanceName, zooKeepers, user, password, tableName, auths);
-    fdq = new FileDataQuery(instanceName, zooKeepers, user, password, dataTableName, auths);
-    this.topPath = path;
+    q = new QueryUtil(opts);
+    fdq = new FileDataQuery(opts.instance, opts.zookeepers, opts.user, opts.getPassword(), opts.dataTable, opts.auths);
+    this.topPath = opts.path;
   }
   
   public void populate(DefaultMutableTreeNode node) throws TableNotFoundException {
@@ -200,15 +201,16 @@ public class Viewer extends JFrame imple
     }
   }
   
+  static class Opts extends QueryUtil.Opts {
+    @Parameter(names="--dataTable")
+    String dataTable = "dataTable";
+  }
+  
   public static void main(String[] args) throws Exception {
-    if (args.length != 7 && args.length != 8) {
-      System.out.println("usage: " + Viewer.class.getSimpleName() + " <instance> <zoo> <user> <pass> <table> <datatable> <auths> [rootpath]");
-      System.exit(1);
-    }
-    String rootpath = "/";
-    if (args.length == 8)
-      rootpath = args[7];
-    Viewer v = new Viewer(args[0], args[1], args[2], args[3], args[4], args[5], new Authorizations(args[6].split(",")), rootpath);
+    Opts opts = new Opts();
+    opts.parseArgs(Viewer.class.getName(), args);
+    
+    Viewer v = new Viewer(opts);
     v.init();
     v.setVisible(true);
   }

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/CharacterHistogram.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/CharacterHistogram.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/CharacterHistogram.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/CharacterHistogram.java Mon Jan 14 22:03:24 2013
@@ -22,12 +22,12 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Map.Entry;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.SummingArrayCombiner;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.hadoop.conf.Configured;
@@ -37,6 +37,8 @@ import org.apache.hadoop.mapreduce.Mappe
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * A MapReduce that computes a histogram of byte frequency for each file and stores the histogram alongside the file data. The {@link ChunkInputFormat} is used
  * to read the file data from Accumulo. See docs/examples/README.filedata for instructions.
@@ -72,15 +74,23 @@ public class CharacterHistogram extends 
     }
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--vis")
+    String visibilities = "";
+  }
+  
+  
   @Override
   public int run(String[] args) throws Exception {
     Job job = new Job(getConf(), this.getClass().getSimpleName());
     job.setJarByClass(this.getClass());
-    
+
+    Opts opts = new Opts();
+    opts.parseArgs(CharacterHistogram.class.getName(), args);
+
     job.setInputFormatClass(ChunkInputFormat.class);
-    ChunkInputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    ChunkInputFormat.setInputInfo(job.getConfiguration(), args[2], args[3].getBytes(), args[4], new Authorizations(args[5].split(",")));
-    job.getConfiguration().set(VIS, args[6]);
+    opts.setAccumuloConfigs(job);
+    job.getConfiguration().set(VIS, opts.visibilities.toString());
     
     job.setMapperClass(HistMapper.class);
     job.setMapOutputKeyClass(Text.class);
@@ -89,8 +99,6 @@ public class CharacterHistogram extends 
     job.setNumReduceTasks(0);
     
     job.setOutputFormatClass(AccumuloOutputFormat.class);
-    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), args[2], args[3].getBytes(), false, args[4]);
     
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java Mon Jan 14 22:03:24 2013
@@ -21,12 +21,15 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+import java.util.ArrayList;
+import java.util.List;
 
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.ArrayByteSequence;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Mutation;
@@ -34,6 +37,8 @@ import org.apache.accumulo.core.data.Val
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Takes a list of files and archives them into Accumulo keyed on the SHA1 hashes of the files. See docs/examples/README.filedata for instructions.
  */
@@ -153,31 +158,34 @@ public class FileDataIngest {
     return sb.toString();
   }
   
+  public static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--vis", description="use a given visibility for the new counts", converter=VisibilityConverter.class)
+    ColumnVisibility visibility = new ColumnVisibility();
+    
+    @Parameter(names="--chunk", description="size of the chunks used to store partial files")
+    int chunkSize = 64*1024;
+    
+    @Parameter(description="<file> { <file> ... }")
+    List<String> files = new ArrayList<String>();
+  }
+  
+  
   public static void main(String[] args) throws Exception {
-    if (args.length < 8) {
-      System.out.println("usage: " + FileDataIngest.class.getSimpleName()
-          + " <instance> <zoo> <user> <pass> <data table> <visibility> <data chunk size> <file>{ <file>}");
-      System.exit(1);
-    }
-    
-    String instance = args[0];
-    String zooKeepers = args[1];
-    String user = args[2];
-    String pass = args[3];
-    String dataTable = args[4];
-    ColumnVisibility colvis = new ColumnVisibility(args[5]);
-    int chunkSize = Integer.parseInt(args[6]);
-    
-    Connector conn = new ZooKeeperInstance(instance, zooKeepers).getConnector(user, pass.getBytes());
-    if (!conn.tableOperations().exists(dataTable)) {
-      conn.tableOperations().create(dataTable);
-      conn.tableOperations().attachIterator(dataTable, new IteratorSetting(1, ChunkCombiner.class));
-    }
-    BatchWriter bw = conn.createBatchWriter(dataTable, 50000000, 300000l, 4);
-    FileDataIngest fdi = new FileDataIngest(chunkSize, colvis);
-    for (int i = 7; i < args.length; i++) {
-      fdi.insertFileData(args[i], bw);
+    Opts opts = new Opts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(FileDataIngest.class.getName(), args, bwOpts);
+    
+    Connector conn = opts.getConnector();
+    if (!conn.tableOperations().exists(opts.tableName)) {
+      conn.tableOperations().create(opts.tableName);
+      conn.tableOperations().attachIterator(opts.tableName, new IteratorSetting(1, ChunkCombiner.class));
+    }
+    BatchWriter bw = conn.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());
+    FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
+    for (String filename : opts.files) {
+      fdi.insertFileData(filename, bw);
     }
     bw.close();
+    opts.stopTracing();
   }
 }

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java Mon Jan 14 22:03:24 2013
@@ -43,10 +43,10 @@ public class FileDataQuery {
   private ChunkInputStream cis;
   Scanner scanner;
   
-  public FileDataQuery(String instanceName, String zooKeepers, String user, String password, String tableName, Authorizations auths) throws AccumuloException,
+  public FileDataQuery(String instanceName, String zooKeepers, String user, byte[] password, String tableName, Authorizations auths) throws AccumuloException,
       AccumuloSecurityException, TableNotFoundException {
     ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    conn = instance.getConnector(user, password.getBytes());
+    conn = instance.getConnector(user, password);
     lastRefs = new ArrayList<Entry<Key,Value>>();
     cis = new ChunkInputStream();
     scanner = conn.createScanner(tableName, auths);

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java Mon Jan 14 22:03:24 2013
@@ -16,6 +16,8 @@
  */
 package org.apache.accumulo.examples.simple.helloworld;
 
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -24,7 +26,6 @@ import org.apache.accumulo.core.client.M
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.hadoop.io.Text;
@@ -33,29 +34,19 @@ import org.apache.hadoop.io.Text;
  * Inserts 10K rows (50K entries) into accumulo with each row having 5 entries.
  */
 public class InsertWithBatchWriter {
+  
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, MutationsRejectedException, TableExistsException,
       TableNotFoundException {
-    if (args.length != 5) {
-      System.out
-          .println("Usage: accumulo examples-simplejar accumulo.examples.helloworld.InsertWithBatchWriter <instance name> <zoo keepers> <username> <password> <tableName>");
-      System.exit(1);
-    }
-    
-    String instanceName = args[0];
-    String zooKeepers = args[1];
-    String user = args[2];
-    byte[] pass = args[3].getBytes();
-    String tableName = args[4];
-    
-    ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    Connector connector = instance.getConnector(user, pass);
-    MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(200000l, 300, 4);
-    
-    BatchWriter bw = null;
-    
-    if (!connector.tableOperations().exists(tableName))
-      connector.tableOperations().create(tableName);
-    bw = mtbw.getBatchWriter(tableName);
+    ClientOnRequiredTable opts = new ClientOnRequiredTable();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(InsertWithBatchWriter.class.getName(), args, bwOpts);
+    
+    Connector connector = opts.getConnector();
+    MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(bwOpts.getBatchWriterConfig());
+    
+    if (!connector.tableOperations().exists(opts.tableName))
+      connector.tableOperations().create(opts.tableName);
+    BatchWriter bw = mtbw.getBatchWriter(opts.tableName);
     
     Text colf = new Text("colfam");
     System.out.println("writing ...");
@@ -68,7 +59,6 @@ public class InsertWithBatchWriter {
       if (i % 100 == 0)
         System.out.println(i);
     }
-    
     mtbw.close();
   }
   

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithOutputFormat.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithOutputFormat.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithOutputFormat.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithOutputFormat.java Mon Jan 14 22:03:24 2013
@@ -16,6 +16,7 @@
  */
 package org.apache.accumulo.examples.simple.helloworld;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
@@ -33,20 +34,18 @@ import org.apache.hadoop.util.ToolRunner
  * Inserts 10K rows (50K entries) into accumulo with each row having 5 entries using an OutputFormat.
  */
 public class InsertWithOutputFormat extends Configured implements Tool {
+
   // this is a tool because when you run a mapreduce, you will need to use the
   // ToolRunner
   // if you want libjars to be passed properly to the map and reduce tasks
   // even though this class isn't a mapreduce
   @Override
   public int run(String[] args) throws Exception {
-    if (args.length != 5) {
-      System.out.println("Usage: bin/tool.sh " + this.getClass().getName() + " <instance name> <zoo keepers> <username> <password> <tablename>");
-      return 1;
-    }
-    Text tableName = new Text(args[4]);
+    ClientOnRequiredTable opts = new ClientOnRequiredTable();
+    opts.parseArgs(this.getClass().getName(), args);
+    
     Job job = new Job(getConf());
-    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), args[2], args[3].getBytes(), true, null);
+    opts.setAccumuloConfigs(job);
     job.setOutputFormatClass(AccumuloOutputFormat.class);
     
     // when running a mapreduce, you won't need to instantiate the output
@@ -55,7 +54,7 @@ public class InsertWithOutputFormat exte
     // output.collect(tableName, mutation)
     TaskAttemptContext context = ContextFactory.createTaskAttemptContext(job);
     RecordWriter<Text,Mutation> rw = new AccumuloOutputFormat().getRecordWriter(context);
-    
+    Text table = new Text(opts.tableName);
     Text colf = new Text("colfam");
     System.out.println("writing ...");
     for (int i = 0; i < 10000; i++) {
@@ -63,7 +62,7 @@ public class InsertWithOutputFormat exte
       for (int j = 0; j < 5; j++) {
         m.put(colf, new Text(String.format("colqual_%d", j)), new Value((String.format("value_%d_%d", i, j)).getBytes()));
       }
-      rw.write(tableName, m); // repeat until done
+      rw.write(table, m); // repeat until done
       if (i % 100 == 0)
         System.out.println(i);
     }

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java Mon Jan 14 22:03:24 2013
@@ -19,45 +19,47 @@ package org.apache.accumulo.examples.sim
 import java.util.Iterator;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Reads all data between two rows; all data after a given row; or all data in a table, depending on the number of arguments given.
  */
 public class ReadData {
+  
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--startKey")
+    String startKey;
+    @Parameter(names="--endKey")
+    String endKey;
+  }
+  
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    if (args.length < 5 || args.length > 7) {
-      System.out
-          .println("bin/accumulo accumulo.examples.helloworld.ReadData <instance name> <zoo keepers> <username> <password> <tablename> [startkey [endkey]]");
-      System.exit(1);
-    }
-    
-    String instanceName = args[0];
-    String zooKeepers = args[1];
-    String user = args[2];
-    byte[] pass = args[3].getBytes();
-    String tableName = args[4];
+    Opts opts = new Opts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    opts.parseArgs(ReadData.class.getName(), args, scanOpts);
     
-    ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    Connector connector = instance.getConnector(user, pass);
+    Connector connector = opts.getConnector();
     
-    Scanner scan = connector.createScanner(tableName, Constants.NO_AUTHS);
+    Scanner scan = connector.createScanner(opts.tableName, opts.auths);
+    scan.setBatchSize(scanOpts.scanBatchSize);
     Key start = null;
-    if (args.length > 5)
-      start = new Key(new Text(args[5]));
+    if (opts.startKey != null)
+      start = new Key(new Text(opts.startKey));
     Key end = null;
-    if (args.length > 6)
-      end = new Key(new Text(args[6]));
+    if (opts.endKey != null)
+      end = new Key(new Text(opts.endKey));
     scan.setRange(new Range(start, end));
     Iterator<Entry<Key,Value>> iter = scan.iterator();
     

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java Mon Jan 14 22:03:24 2013
@@ -19,13 +19,13 @@ package org.apache.accumulo.examples.sim
 import java.util.HashSet;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IsolatedScanner;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
@@ -33,6 +33,8 @@ import org.apache.accumulo.core.data.Val
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * This example shows how a concurrent reader and writer can interfere with each other. It creates two threads that run forever reading and writing to the same
  * table.
@@ -47,15 +49,16 @@ public class InterferenceTest {
   
   private static final int NUM_ROWS = 500;
   private static final int NUM_COLUMNS = 113; // scanner batches 1000 by default, so make num columns not a multiple of 10
-  private static long iterations;
   private static final Logger log = Logger.getLogger(InterferenceTest.class);
   
   static class Writer implements Runnable {
     
-    private BatchWriter bw;
+    private final BatchWriter bw;
+    private final long iterations;
     
-    Writer(BatchWriter bw) {
+    Writer(BatchWriter bw, long iterations) {
       this.bw = bw;
+      this.iterations = iterations; 
     }
     
     @Override
@@ -140,31 +143,33 @@ public class InterferenceTest {
     }
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--iterations", description="number of times to run", required=true)
+    long iterations = 0;
+    @Parameter(names="--isolated", description="use isolated scans")
+    boolean isolated = false;
+  }
+  
+  
   public static void main(String[] args) throws Exception {
+    Opts opts = new Opts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(InterferenceTest.class.getName(), args, bwOpts);
+    
+    if (opts.iterations < 1)
+      opts.iterations = Long.MAX_VALUE;
+    
+    Connector conn = opts.getConnector();
+    if (!conn.tableOperations().exists(opts.tableName))
+      conn.tableOperations().create(opts.tableName);
     
-    if (args.length != 7) {
-      System.out.println("Usage : " + InterferenceTest.class.getName() + " <instance name> <zookeepers> <user> <password> <table> <iterations> true|false");
-      System.out.println("          The last argument determines if scans should be isolated.  When false, expect to see errors");
-      return;
-    }
-    
-    ZooKeeperInstance zki = new ZooKeeperInstance(args[0], args[1]);
-    Connector conn = zki.getConnector(args[2], args[3].getBytes());
-    
-    String table = args[4];
-    iterations = Long.parseLong(args[5]);
-    if (iterations < 1)
-      iterations = Long.MAX_VALUE;
-    if (!conn.tableOperations().exists(table))
-      conn.tableOperations().create(table);
-    
-    Thread writer = new Thread(new Writer(conn.createBatchWriter(table, 10000000, 60000l, 3)));
+    Thread writer = new Thread(new Writer(conn.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig()), opts.iterations));
     writer.start();
     Reader r;
-    if (Boolean.parseBoolean(args[6]))
-      r = new Reader(new IsolatedScanner(conn.createScanner(table, Constants.NO_AUTHS)));
+    if (opts.isolated)
+      r = new Reader(new IsolatedScanner(conn.createScanner(opts.tableName, opts.auths)));
     else
-      r = new Reader(conn.createScanner(table, Constants.NO_AUTHS));
+      r = new Reader(conn.createScanner(opts.tableName, opts.auths));
     Thread reader;
     reader = new Thread(r);
     reader.start();

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java Mon Jan 14 22:03:24 2013
@@ -18,12 +18,12 @@ package org.apache.accumulo.examples.sim
 
 import java.io.IOException;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
@@ -33,6 +33,8 @@ import org.apache.hadoop.mapreduce.lib.o
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 public class RegexExample extends Configured implements Tool {
   public static class RegexMapper extends Mapper<Key,Value,Key,Value> {
     public void map(Key row, Value data, Context context) throws IOException, InterruptedException {
@@ -40,16 +42,31 @@ public class RegexExample extends Config
     }
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--rowRegex")
+    String rowRegex;
+    @Parameter(names="--columnFamilyRegex")
+    String columnFamilyRegex;
+    @Parameter(names="--columnQualifierRegex")
+    String columnQualifierRegex;
+    @Parameter(names="--valueRegex")
+    String valueRegex;
+    @Parameter(names="--output", required=true)
+    String destination;
+  }
+  
   public int run(String[] args) throws Exception {
-    Job job = new Job(getConf(), this.getClass().getSimpleName());
-    job.setJarByClass(this.getClass());
+    Opts opts = new Opts();
+    opts.parseArgs(getClass().getName(), args);
+    
+    Job job = new Job(getConf(), getClass().getSimpleName());
+    job.setJarByClass(getClass());
     
     job.setInputFormatClass(AccumuloInputFormat.class);
-    AccumuloInputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    AccumuloInputFormat.setInputInfo(job.getConfiguration(), args[2], args[3].getBytes(), args[4], new Authorizations());
+    opts.setAccumuloConfigs(job);
     
     IteratorSetting regex = new IteratorSetting(50, "regex", RegExFilter.class);
-    RegExFilter.setRegexs(regex, args[5], args[6], args[7], args[8], false);
+    RegExFilter.setRegexs(regex, opts.rowRegex, opts.columnFamilyRegex, opts.columnQualifierRegex, opts.valueRegex, false);
     AccumuloInputFormat.addIterator(job.getConfiguration(), regex);
     
     job.setMapperClass(RegexMapper.class);
@@ -59,12 +76,12 @@ public class RegexExample extends Config
     job.setNumReduceTasks(0);
     
     job.setOutputFormatClass(TextOutputFormat.class);
-    TextOutputFormat.setOutputPath(job, new Path(args[9]));
+    TextOutputFormat.setOutputPath(job, new Path(opts.destination));
     
-    System.out.println("setRowRegex: " + args[5]);
-    System.out.println("setColumnFamilyRegex: " + args[6]);
-    System.out.println("setColumnQualifierRegex: " + args[7]);
-    System.out.println("setValueRegex: " + args[8]);
+    System.out.println("setRowRegex: " + opts.rowRegex);
+    System.out.println("setColumnFamilyRegex: " + opts.columnFamilyRegex);
+    System.out.println("setColumnQualifierRegex: " + opts.columnQualifierRegex);
+    System.out.println("setValueRegex: " + opts.valueRegex);
     
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java Mon Jan 14 22:03:24 2013
@@ -19,12 +19,12 @@ package org.apache.accumulo.examples.sim
 import java.io.IOException;
 import java.util.Collections;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.commons.codec.binary.Base64;
@@ -36,6 +36,8 @@ import org.apache.hadoop.mapreduce.Mappe
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 public class RowHash extends Configured implements Tool {
   /**
    * The Mapper class that given a row number, will generate the appropriate output line.
@@ -52,24 +54,27 @@ public class RowHash extends Configured 
     public void setup(Context job) {}
   }
   
+  private static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--column", required=true)
+    String column = null;
+  }
+  
   @Override
   public int run(String[] args) throws Exception {
     Job job = new Job(getConf(), this.getClass().getName());
     job.setJarByClass(this.getClass());
-    
+    Opts opts = new Opts();
+    opts.parseArgs(RowHash.class.getName(), args);
     job.setInputFormatClass(AccumuloInputFormat.class);
-    AccumuloInputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    AccumuloInputFormat.setInputInfo(job.getConfiguration(), args[2], args[3].getBytes(), args[4], new Authorizations());
+    opts.setAccumuloConfigs(job);
     
-    String col = args[5];
+    String col = opts.column;
     int idx = col.indexOf(":");
     Text cf = new Text(idx < 0 ? col : col.substring(0, idx));
     Text cq = idx < 0 ? null : new Text(col.substring(idx + 1));
     if (cf.getLength() > 0)
       AccumuloInputFormat.fetchColumns(job.getConfiguration(), Collections.singleton(new Pair<Text,Text>(cf, cq)));
     
-    // AccumuloInputFormat.setLogLevel(job, Level.TRACE);
-    
     job.setMapperClass(HashDataMapper.class);
     job.setMapOutputKeyClass(Text.class);
     job.setMapOutputValueClass(Mutation.class);
@@ -77,9 +82,6 @@ public class RowHash extends Configured 
     job.setNumReduceTasks(0);
     
     job.setOutputFormatClass(AccumuloOutputFormat.class);
-    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), args[2], args[3].getBytes(), true, args[6]);
-    // AccumuloOutputFormat.setLogLevel(job, Level.TRACE);
     
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;



Mime
View raw message