accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e..@apache.org
Subject svn commit: r1423923 [4/8] - in /accumulo/trunk: ./ bin/ core/ core/src/main/java/org/apache/accumulo/core/cli/ core/src/main/java/org/apache/accumulo/core/client/impl/ core/src/main/java/org/apache/accumulo/core/client/mapreduce/ core/src/main/java/or...
Date Wed, 19 Dec 2012 16:25:07 GMT
Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java Wed Dec 19 16:25:03 2012
@@ -38,6 +38,7 @@ import org.apache.accumulo.cloudtrace.in
 import org.apache.accumulo.cloudtrace.instrument.thrift.TraceWrap;
 import org.apache.accumulo.cloudtrace.thrift.TInfo;
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -84,11 +85,6 @@ import org.apache.accumulo.server.util.O
 import org.apache.accumulo.server.util.TServerUtils;
 import org.apache.accumulo.server.util.TabletIterator;
 import org.apache.accumulo.server.zookeeper.ZooLock;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -96,20 +92,21 @@ import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException;
 
+import com.beust.jcommander.Parameter;
+
 public class SimpleGarbageCollector implements Iface {
   private static final Text EMPTY_TEXT = new Text();
   
-  static final Options OPTS = new Options();
-  static final Option OPT_VERBOSE_MODE = new Option("v", "verbose", false, "extra information will get printed to stdout also");
-  static final Option OPT_SAFE_MODE = new Option("s", "safemode", false, "safe mode will not delete files");
-  static final Option OPT_OFFLINE = new Option("o", "offline", false,
-      "offline mode will run once and check data files directly; this is dangerous if accumulo is running or not shut down properly");
-  static final Option OPT_ADDRESS = new Option("a", "address", true, "specify our local address");
-  static {
-    OPTS.addOption(OPT_VERBOSE_MODE);
-    OPTS.addOption(OPT_SAFE_MODE);
-    OPTS.addOption(OPT_OFFLINE);
-    OPTS.addOption(OPT_ADDRESS);
+  static class Opts extends Help {
+    @Parameter(names={"-v", "--verbose"}, description="extra information will get printed to stdout also")
+    boolean verbose = false;
+    @Parameter(names={"-s", "--safemode"}, description="safe mode will not delete files")
+    boolean safeMode = false;
+    @Parameter(names={"-o", "--offline"}, description=
+      "offline mode will run once and check data files directly; this is dangerous if accumulo is running or not shut down properly")
+    boolean offline = false;
+    @Parameter(names={"-a", "--address"}, description="specify our local address")
+    String address = null;
   }
 
   // how much of the JVM's available memory should it use gathering candidates
@@ -142,25 +139,17 @@ public class SimpleGarbageCollector impl
     Accumulo.init(fs, serverConf, "gc");
     String address = "localhost";
     SimpleGarbageCollector gc = new SimpleGarbageCollector();
-    try {
-      final CommandLine commandLine = new BasicParser().parse(OPTS, args);
-      if (commandLine.getArgs().length != 0)
-        throw new ParseException("Extraneous arguments");
-      
-      if (commandLine.hasOption(OPT_SAFE_MODE.getOpt()))
-        gc.setSafeMode();
-      if (commandLine.hasOption(OPT_OFFLINE.getOpt()))
-        gc.setOffline();
-      if (commandLine.hasOption(OPT_VERBOSE_MODE.getOpt()))
-        gc.setVerbose();
-      address = commandLine.getOptionValue(OPT_ADDRESS.getOpt());
-      if (address != null)
-        gc.useAddress(address);
-    } catch (ParseException e) {
-      String str = "Can't parse the command line options";
-      log.fatal(str, e);
-      throw new IllegalArgumentException(str, e);
-    }
+    Opts opts = new Opts();
+    opts.parseArgs(SimpleGarbageCollector.class.getName(), args);
+    
+    if (opts.safeMode)
+      gc.setSafeMode();
+    if (opts.offline)
+      gc.setOffline();
+    if (opts.verbose)
+      gc.setVerbose();
+    if (opts.address != null)
+      gc.useAddress(address);
     
     gc.init(fs, instance, SecurityConstants.getSystemCredentials());
     Accumulo.enableTracing(address, "gc");
@@ -377,8 +366,9 @@ public class SimpleGarbageCollector impl
   private InetSocketAddress startStatsService() throws UnknownHostException {
     Processor<Iface> processor = new Processor<Iface>(TraceWrap.service(this));
     int port = instance.getConfiguration().getPort(Property.GC_PORT);
+    long maxMessageSize = instance.getConfiguration().getMemoryInBytes(Property.GENERAL_MAX_MESSAGE_SIZE);
     try {
-      TServerUtils.startTServer(port, processor, this.getClass().getSimpleName(), "GC Monitor Service", 2, 1000);
+      TServerUtils.startTServer(port, processor, this.getClass().getSimpleName(), "GC Monitor Service", 2, 1000, maxMessageSize);
     } catch (Exception ex) {
       log.fatal(ex, ex);
       throw new RuntimeException(ex);

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/logger/LogReader.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/logger/LogReader.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/logger/LogReader.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/logger/LogReader.java Wed Dec 19 16:25:03 2012
@@ -18,11 +18,14 @@ package org.apache.accumulo.server.logge
 
 import java.io.EOFException;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.file.FileUtil;
@@ -30,10 +33,6 @@ import org.apache.accumulo.core.util.Cac
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.tabletserver.log.MultiReader;
 import org.apache.accumulo.server.trace.TraceFileSystem;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -41,9 +40,22 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+
 public class LogReader {
-  public static void usage() {
-    System.err.println("Usage : " + LogReader.class.getName() + " [-r <row>] [-m <maxColumns] [-t <key extent>] [-p <row regex>] <log file>");
+  
+  static class Opts extends Help {
+    @Parameter(names="-r", description="print only mutations associated with the given row")
+    String row;
+    @Parameter(names="-m", description="limit the number of mutations printed per row")
+    int maxMutations = 5;
+    @Parameter(names="-t", description="print only mutations that fall within the given key extent")
+    String extent;
+    @Parameter(names="-p", description="search for a row that matches the given regex")
+    String regexp;
+    @Parameter(description="<logfile> { <logfile> ... }")
+    List<String> files = new ArrayList<String>();
   }
   
   /**
@@ -55,52 +67,33 @@ public class LogReader {
    * @throws ParseException
    */
   public static void main(String[] args) throws IOException {
+    Opts opts = new Opts();
+    opts.parseArgs(LogReader.class.getName(), args);
     Configuration conf = CachedConfiguration.getInstance();
     FileSystem fs = TraceFileSystem.wrap(FileUtil.getFileSystem(conf, ServerConfiguration.getSiteConfiguration()));
     FileSystem local = TraceFileSystem.wrap(FileSystem.getLocal(conf));
-    Option rowOpt = new Option("r", "row", true, "search for a specific row");
-    Option maxOpt = new Option("m", "max-mutations", true, "the maximum number of mutations to print per log entry");
-    Option tabletOpt = new Option("t", "tablet", true, "key extent");
-    Option rowPatternOpt = new Option("p", "row-pattern", true, "search for a row that matches the given regex");
-    Options options = new Options();
-    options.addOption(rowOpt);
-    options.addOption(maxOpt);
-    options.addOption(tabletOpt);
-    options.addOption(rowPatternOpt);
-    CommandLine cl;
-    try {
-      cl = new BasicParser().parse(options, args);
-    } catch (ParseException ex) {
-      usage();
-      return;
-    }
     
     Matcher rowMatcher = null;
     KeyExtent ke = null;
     Text row = null;
-    int max = 5;
-    String[] files = cl.getArgs();
-    if (files.length == 0) {
-      usage();
+    if (opts.files.isEmpty()) {
+      new JCommander(opts).usage();
       return;
     }
-    if (cl.hasOption(rowOpt.getOpt()))
-      row = new Text(cl.getOptionValue(rowOpt.getOpt()));
-    if (cl.hasOption(maxOpt.getOpt()))
-      max = Integer.parseInt(cl.getOptionValue(maxOpt.getOpt()));
-    if (cl.hasOption(tabletOpt.getOpt())) {
-      String extent = cl.getOptionValue(tabletOpt.getOpt());
-      String sa[] = extent.split(";");
+    if (opts.row != null)
+      row = new Text(opts.row);
+    if (opts.extent != null) {
+      String sa[] = opts.extent.split(";");
       ke = new KeyExtent(new Text(sa[0]), new Text(sa[1]), new Text(sa[2]));
     }
-    if (cl.hasOption(rowPatternOpt.getOpt())) {
-      Pattern pattern = Pattern.compile(cl.getOptionValue(rowPatternOpt.getOpt()));
+    if (opts.regexp != null) {
+      Pattern pattern = Pattern.compile(opts.regexp);
       rowMatcher = pattern.matcher("");
     }
     
     Set<Integer> tabletIds = new HashSet<Integer>();
 
-    for (String file : files) {
+    for (String file : opts.files) {
       
       Path path = new Path(file);
       LogFileKey key = new LogFileKey();
@@ -116,7 +109,7 @@ public class LogReader {
           } catch (EOFException ex) {
             break;
           }
-          printLogEvent(key, value, row, rowMatcher, ke, tabletIds, max);
+          printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
         }
       } else if (local.isFile(path)) {
         // read log entries from a simple file
@@ -128,13 +121,13 @@ public class LogReader {
           } catch (EOFException ex) {
             break;
           }
-          printLogEvent(key, value, row, rowMatcher, ke, tabletIds, max);
+          printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
         }
       } else {
         // read the log entries sorted in a map file
         MultiReader input = new MultiReader(fs, conf, file);
         while (input.next(key, value)) {
-          printLogEvent(key, value, row, rowMatcher, ke, tabletIds, max);
+          printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
         }
       }
     }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/EventCoordinator.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/EventCoordinator.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/EventCoordinator.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/EventCoordinator.java Wed Dec 19 16:25:03 2012
@@ -39,7 +39,7 @@ public class EventCoordinator {
   }
   
   synchronized public void event(String msg, Object... args) {
-    log.info(String.format(msg, args));
+    log.info(String.format(msg, args), new Throwable());
     eventCounter++;
     notifyAll();
   }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/Master.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/Master.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/Master.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/Master.java Wed Dec 19 16:25:03 2012
@@ -260,6 +260,7 @@ public class Master implements LiveTServ
         public void run() {
           // This frees the main thread and will cause the master to exit
           clientService.stop();
+          Master.this.nextEvent.event("stopped event loop");
         }
         
       }, 100l, 1000l);
@@ -2162,7 +2163,7 @@ public class Master implements LiveTServ
     
     Processor<Iface> processor = new Processor<Iface>(TraceWrap.service(new MasterClientServiceHandler()));
     clientService = TServerUtils.startServer(getSystemConfiguration(), Property.MASTER_CLIENTPORT, processor, "Master", "Master Client Service Handler", null,
-        Property.MASTER_MINTHREADS, Property.MASTER_THREADCHECK).server;
+        Property.MASTER_MINTHREADS, Property.MASTER_THREADCHECK, Property.GENERAL_MAX_MESSAGE_SIZE).server;
     
     while (!clientService.isServing()) {
       UtilWaitThread.sleep(100);

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MergeStats.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MergeStats.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MergeStats.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MergeStats.java Wed Dec 19 16:25:03 2012
@@ -21,8 +21,8 @@ import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Key;
@@ -30,8 +30,6 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.Text;
@@ -223,11 +221,14 @@ public class MergeStats {
   }
   
   public static void main(String[] args) throws Exception {
-    Instance instance = HdfsZooInstance.getInstance();
-    Map<String,String> tableIdMap = instance.getConnector(SecurityConstants.getSystemCredentials()).tableOperations().tableIdMap();
+    ClientOpts opts = new ClientOpts();
+    opts.parseArgs(MergeStats.class.getName(), args);
+    
+    Connector conn = opts.getConnector();
+    Map<String,String> tableIdMap = conn.tableOperations().tableIdMap();
     for (String table : tableIdMap.keySet()) {
       String tableId = tableIdMap.get(table);
-      String path = ZooUtil.getRoot(instance.getInstanceID()) + Constants.ZTABLES + "/" + tableId.toString() + "/merge";
+      String path = ZooUtil.getRoot(conn.getInstance().getInstanceID()) + Constants.ZTABLES + "/" + tableId.toString() + "/merge";
       MergeInfo info = new MergeInfo();
       if (ZooReaderWriter.getInstance().exists(path)) {
         byte[] data = ZooReaderWriter.getInstance().getData(path, new Stat());

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java Wed Dec 19 16:25:03 2012
@@ -18,60 +18,35 @@ package org.apache.accumulo.server.metan
 
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.TextUtil;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Finds tablet creation events.
  */
 public class FindTablet {
-  public static void main(String[] args) throws Exception {
-    
-    Options options = new Options();
-    options.addOption("r", "row", true, "find tablets that contain this row");
-    
-    GnuParser parser = new GnuParser();
-    CommandLine cmd = null;
-    try {
-      cmd = parser.parse(options, args);
-      if (cmd.getArgs().length != 5) {
-        throw new ParseException("Command takes no arguments");
-      }
-    } catch (ParseException e) {
-      System.err.println("Failed to parse command line " + e.getMessage());
-      System.err.println();
-      HelpFormatter formatter = new HelpFormatter();
-      formatter.printHelp(FindTablet.class.getSimpleName() + " <instance> <zookeepers> <user> <pass> <table ID>", options);
-      System.exit(-1);
-    }
-    
-    String instance = cmd.getArgs()[0];
-    String zookeepers = cmd.getArgs()[1];
-    String user = cmd.getArgs()[2];
-    String pass = cmd.getArgs()[3];
-    String tableID = cmd.getArgs()[4];
+  
+  static public class Opts extends ClientOpts {
+    @Parameter(names={"-r", "--row"}, required=true, description="find tablets that contain this row")
+    String row = null;
     
-    ZooKeeperInstance zki = new ZooKeeperInstance(instance, zookeepers);
-    Connector conn = zki.getConnector(user, pass);
+    @Parameter(names="--tableId", required=true, description="table id")
+    String tableId = null;
+  }
+  
+  public static void main(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(FindTablet.class.getName(), args);
     
-    if (cmd.hasOption('r')) {
-      findContainingTablets(conn, tableID, cmd.getOptionValue('r'));
-    } else {
-      System.err.println("ERROR :  No search criteria given");
-    }
+    findContainingTablets(opts);
   }
 
   /**
@@ -80,16 +55,16 @@ public class FindTablet {
    * @param tableID
    * @param option
    */
-  private static void findContainingTablets(Connector conn, String tableID, String row) throws Exception {
-    Range range = new KeyExtent(new Text(tableID), null, null).toMetadataRange();
+  private static void findContainingTablets(Opts opts) throws Exception {
+    Range range = new KeyExtent(new Text(opts.tableId), null, null).toMetadataRange();
 
-    Scanner scanner = conn.createScanner("createEvents", new Authorizations());
-    
+    Scanner scanner = opts.getConnector().createScanner("createEvents", opts.auths);
     scanner.setRange(range);
-    
+
+    Text row = new Text(opts.row);
     for (Entry<Key,Value> entry : scanner) {
       KeyExtent ke = new KeyExtent(entry.getKey().getRow(), new Value(TextUtil.getBytes(entry.getKey().getColumnFamily())));
-      if (ke.contains(new Text(row))) {
+      if (ke.contains(row)) {
         System.out.println(entry.getKey().getColumnQualifier() + " " + ke + " " + entry.getValue());
       }
     }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java Wed Dec 19 16:25:03 2012
@@ -17,15 +17,16 @@
 package org.apache.accumulo.server.metanalysis;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.KeyExtent;
@@ -45,6 +46,8 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.Logger;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * A map reduce job that takes write ahead logs containing mutations for the metadata table and indexes them into Accumulo tables for analysis.
  * 
@@ -113,19 +116,15 @@ public class IndexMeta extends Configure
     }
   }
 
-  
+  static class Opts extends ClientOpts {
+    @Parameter(description="<logfile> { <logfile> ...}")
+    List<String> logFiles = new ArrayList<String>();
+  }
   
   @Override
   public int run(String[] args) throws Exception {
-    if (args.length < 5) {
-      System.err.println("Usage : " + IndexMeta.class + " <instance> <zookeepers> <user> <pass> <logfile> {<logfile>}");
-      return -1;
-    }
-    
-    String instance = args[0];
-    String zookeepers = args[1];
-    String user = args[2];
-    String pass = args[3];
+    Opts opts = new Opts();
+    opts.parseArgs(IndexMeta.class.getName(), args);
 
     String jobName = this.getClass().getSimpleName() + "_" + System.currentTimeMillis();
     
@@ -145,13 +144,12 @@ public class IndexMeta extends Configure
     job.setNumReduceTasks(0);
     
     job.setOutputFormatClass(AccumuloOutputFormat.class);
-    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), instance, zookeepers);
-    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), user, pass.getBytes(), false, null);
+    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), opts.instance, opts.zookeepers);
+    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), opts.user, opts.getPassword(), false, null);
     
     job.setMapperClass(IndexMapper.class);
 
-    ZooKeeperInstance zki = new ZooKeeperInstance(instance, zookeepers);
-    Connector conn = zki.getConnector(user, pass);
+    Connector conn = opts.getConnector();
     
     try {
       conn.tableOperations().create("createEvents");

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java Wed Dec 19 16:25:03 2012
@@ -23,9 +23,9 @@ import java.util.List;
 import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
@@ -36,36 +36,32 @@ import org.apache.accumulo.core.security
 import org.apache.accumulo.server.logger.LogFileValue;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Looks up and prints mutations indexed by IndexMeta
  */
 public class PrintEvents {
   
+  static class Opts extends ClientOpts {
+    @Parameter(names={"-t", "--tableId"}, description="table id", required=true)
+    String tableId;
+    @Parameter(names={"-e", "--endRow"}, description="end row")
+    String endRow;
+    @Parameter(names={"-t", "--time"}, description="time, in milliseconds", required=true)
+    long time;
+  }
+  
   /**
    * @param args
    */
   public static void main(String[] args) throws Exception {
-    if (args.length != 7) {
-      System.err.println("Usage : " + IndexMeta.class + " <instance> <zookeepers> <user> <pass> <tableId> <endRow> <time>");
-      return;
-    }
+    Opts opts = new Opts();
+    opts.parseArgs(PrintEvents.class.getName(), args);
     
-    String instance = args[0];
-    String zookeepers = args[1];
-    String user = args[2];
-    String pass = args[3];
-    String tableId = args[4];
-    String endRow = args[5];
-    Long time = Long.parseLong(args[6]);
+    Connector conn = opts.getConnector();
     
-    ZooKeeperInstance zki = new ZooKeeperInstance(instance, zookeepers);
-    Connector conn = zki.getConnector(user, pass);
-    
-    if (endRow.equals("null")) {
-      endRow = null;
-    }
-
-    printEvents(conn, tableId, endRow, time);
+    printEvents(conn, opts.tableId, opts.endRow, opts.time);
   }
   
   /**

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/Basic.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/Basic.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/Basic.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/monitor/servlets/trace/Basic.java Wed Dec 19 16:25:03 2012
@@ -27,7 +27,6 @@ import org.apache.accumulo.core.client.S
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.trace.TraceDump;
 import org.apache.accumulo.core.trace.TraceFormatter;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.monitor.Monitor;
@@ -69,7 +68,7 @@ abstract class Basic extends BasicServle
     String table = conf.get(Property.TRACE_TABLE);
     try {
       Connector conn = HdfsZooInstance.getInstance().getConnector(user, passwd);
-      if (!conn.tableOperations().exists(TraceDump.TRACE_TABLE)) {
+      if (!conn.tableOperations().exists(table)) {
         return new NullScanner();
       }
       Scanner scanner = conn.createScanner(table, conn.securityOperations().getUserAuthorizations(user));

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java Wed Dec 19 16:25:03 2012
@@ -917,17 +917,18 @@ public class TabletServer extends Abstra
       @Override
       public void run() {
         
-        ScanSession scanSession = (ScanSession) sessionManager.getSession(scanID);
+        final ScanSession scanSession = (ScanSession) sessionManager.getSession(scanID);
         String oldThreadName = Thread.currentThread().getName();
         
         try {
           runState.set(ScanRunState.RUNNING);
-          Thread.currentThread().setName(
-              "User: " + scanSession.user + " Start: " + scanSession.startTime + " Client: " + scanSession.client + " Tablet: " + scanSession.extent);
 
           if (isCancelled() || scanSession == null)
             return;
           
+          Thread.currentThread().setName(
+              "User: " + scanSession.user + " Start: " + scanSession.startTime + " Client: " + scanSession.client + " Tablet: " + scanSession.extent);
+
           Tablet tablet = onlineTablets.get(scanSession.extent);
           
           if (tablet == null) {
@@ -2625,7 +2626,7 @@ public class TabletServer extends Abstra
 
   private int startServer(AccumuloConfiguration conf, Property portHint, TProcessor processor, String threadName) throws UnknownHostException {
     ServerPort sp = TServerUtils.startServer(conf, portHint, processor, this.getClass().getSimpleName(), threadName, Property.TSERV_PORTSEARCH,
-        Property.TSERV_MINTHREADS, Property.TSERV_THREADCHECK);
+        Property.TSERV_MINTHREADS, Property.TSERV_THREADCHECK, Property.GENERAL_MAX_MESSAGE_SIZE);
     this.server = sp.server;
     return sp.port;
   }
@@ -2884,8 +2885,13 @@ public class TabletServer extends Abstra
           return set;
         }
         
-        List<ColumnFQ> columnsToFetch = Arrays.asList(new ColumnFQ[] {Constants.METADATA_DIRECTORY_COLUMN, Constants.METADATA_PREV_ROW_COLUMN,
-            Constants.METADATA_SPLIT_RATIO_COLUMN, Constants.METADATA_OLD_PREV_ROW_COLUMN, Constants.METADATA_TIME_COLUMN});
+        List<ColumnFQ> columnsToFetch = Arrays.asList(new ColumnFQ[] {
+            Constants.METADATA_DIRECTORY_COLUMN, 
+            Constants.METADATA_PREV_ROW_COLUMN,
+            Constants.METADATA_SPLIT_RATIO_COLUMN, 
+            Constants.METADATA_OLD_PREV_ROW_COLUMN, 
+            Constants.METADATA_TIME_COLUMN
+        });
         
         if (tabletsKeyValues == null) {
           tabletsKeyValues = new TreeMap<Key,Value>();

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/BulkImportDirectory.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/BulkImportDirectory.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/BulkImportDirectory.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/BulkImportDirectory.java Wed Dec 19 16:25:03 2012
@@ -17,7 +17,10 @@
 package org.apache.accumulo.server.test;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 
+import org.apache.accumulo.server.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
@@ -26,20 +29,38 @@ import org.apache.accumulo.server.client
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
+import com.beust.jcommander.Parameter;
+
 public class BulkImportDirectory {
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names={"-s","--source"}, description="directory to import from")
+    String source = null;
+    @Parameter(names={"-f","--failures"}, description="directory to copy failures into: will be deleted before the bulk import")
+    String failures = null;
+    @Parameter(description="<username> <password> <tablename> <sourcedir> <failuredir>")
+    List<String> args = new ArrayList<String>();
+  }
+  
+  
   public static void main(String[] args) throws IOException, AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    if (args.length != 5)
-      throw new RuntimeException("Usage: bin/accumulo " + BulkImportDirectory.class.getName() + " <username> <password> <tablename> <sourcedir> <failuredir>");
-    
-    final String user = args[0];
-    final byte[] pass = args[1].getBytes();
-    final String tableName = args[2];
-    final String dir = args[3];
-    final String failureDir = args[4];
-    final Path failureDirPath = new Path(failureDir);
     final FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
-    fs.delete(failureDirPath, true);
-    fs.mkdirs(failureDirPath);
-    HdfsZooInstance.getInstance().getConnector(user, pass).tableOperations().importDirectory(tableName, dir, failureDir, false);
+    Opts opts = new Opts();
+    if (args.length == 5) {
+      System.err.println("Deprecated syntax for BulkImportDirectory, please use the new style (see --help)");
+      final String user = args[0];
+      final byte[] pass = args[1].getBytes();
+      final String tableName = args[2];
+      final String dir = args[3];
+      final String failureDir = args[4];
+      final Path failureDirPath = new Path(failureDir);
+      fs.delete(failureDirPath, true);
+      fs.mkdirs(failureDirPath);
+      HdfsZooInstance.getInstance().getConnector(user, pass).tableOperations().importDirectory(tableName, dir, failureDir, false);
+    } else {
+      opts.parseArgs(BulkImportDirectory.class.getName(), args);
+      fs.delete(new Path(opts.failures), true);
+      fs.mkdirs(new Path(opts.failures));
+      opts.getConnector().tableOperations().importDirectory(opts.tableName, opts.source, opts.failures, false);
+    }
   }
 }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/CreateRFiles.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/CreateRFiles.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/CreateRFiles.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/CreateRFiles.java Wed Dec 19 16:25:03 2012
@@ -20,26 +20,45 @@ import java.util.concurrent.ExecutorServ
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.accumulo.core.cli.Help;
+
+import com.beust.jcommander.Parameter;
+
 public class CreateRFiles {
   
+  static class Opts extends Help {
+    
+    @Parameter(names="--output", description="the destiation directory")
+    String outputDirectory;
+    
+    @Parameter(names="--numThreads", description="number of threads to use when generating files")
+    int numThreads = 4;
+    
+    @Parameter(names="--start", description="the start number for test data")
+    long start = 0;
+    
+    @Parameter(names="--end", description="the maximum number for test data")
+    long end = 10*1000*1000;
+    
+    @Parameter(names="--splits", description="the number of splits in the data")
+    long numsplits = 4;
+  }
+  
   public static void main(String[] args) {
-    String dir = args[0];
-    int numThreads = Integer.parseInt(args[1]);
-    long start = Long.parseLong(args[2]);
-    long end = Long.parseLong(args[3]);
-    long numsplits = Long.parseLong(args[4]);
+    Opts opts = new Opts();
+    opts.parseArgs(CreateRFiles.class.getName(), args);
     
-    long splitSize = Math.round((end - start) / (double) numsplits);
+    long splitSize = Math.round((opts.end - opts.start) / (double) opts.numsplits);
     
-    long currStart = start;
-    long currEnd = start + splitSize;
+    long currStart = opts.start;
+    long currEnd = opts.start + splitSize;
     
-    ExecutorService threadPool = Executors.newFixedThreadPool(numThreads);
+    ExecutorService threadPool = Executors.newFixedThreadPool(opts.numThreads);
     
     int count = 0;
-    while (currEnd <= end && currStart < currEnd) {
+    while (currEnd <= opts.end && currStart < currEnd) {
       
-      final String tia = String.format("-rFile /%s/mf%05d -timestamp 1 -size 50 -random 56 %d %d 1", dir, count, currEnd - currStart, currStart);
+      final String tia = String.format("--rfile /%s/mf%05d --timestamp 1 --size 50 --random 56 --rows %d --start %d --user root", opts.outputDirectory, count, currEnd - currStart, currStart);
       
       Runnable r = new Runnable() {
         
@@ -58,7 +77,7 @@ public class CreateRFiles {
       
       count++;
       currStart = currEnd;
-      currEnd = Math.min(end, currStart + splitSize);
+      currEnd = Math.min(opts.end, currStart + splitSize);
     }
     
     threadPool.shutdown();

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/CreateRandomRFile.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/CreateRandomRFile.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/CreateRandomRFile.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/CreateRandomRFile.java Wed Dec 19 16:25:03 2012
@@ -49,6 +49,10 @@ public class CreateRandomRFile {
   }
   
   public static void main(String[] args) {
+    if (args.length != 2) {
+      System.err.println("Usage CreateRandomRFile <filename> <size>");
+      System.exit(-1);
+    }
     file = args[0];
     num = Integer.parseInt(args[1]);
     long rands[] = new long[num];

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/CreateTestTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/CreateTestTable.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/CreateTestTable.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/CreateTestTable.java Wed Dec 19 16:25:03 2012
@@ -19,110 +19,68 @@ package org.apache.accumulo.server.test;
 import java.util.Map.Entry;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOnDefaultTable;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.Parser;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 public class CreateTestTable {
-  private static Option usernameOpt;
-  private static Option passwordOpt;
-  private static Option readonlyOpt;
-  
-  private static Options opts;
   
-  // root user is needed for tests
-  private static String user;
-  private static String password;
-  private static boolean readOnly = false;
-  private static int count = 10000;
+  static class Opts extends ClientOnDefaultTable {
+    @Parameter(names={"-readonly", "--readonly"}, description="read only")
+    boolean readOnly = false;
+    @Parameter(names={"-count", "--count"}, description="count", required = true)
+    int count = 10000;
+    Opts() { super("mrtest1"); }
+  }
   
-  private static void readBack(Connector conn, int last) throws Exception {
-    Scanner scanner = conn.createScanner("mrtest1", Constants.NO_AUTHS);
+  private static void readBack(Connector conn, Opts opts) throws Exception {
+    Scanner scanner = conn.createScanner("mrtest1", opts.auths);
+    scanner.setBatchSize(opts.scanBatchSize);
     int count = 0;
     for (Entry<Key,Value> elt : scanner) {
       String expected = String.format("%05d", count);
       assert (elt.getKey().getRow().toString().equals(expected));
       count++;
     }
-    assert (last == count);
-  }
-  
-  public static void setupOptions() {
-    usernameOpt = new Option("username", "username", true, "username");
-    passwordOpt = new Option("password", "password", true, "password");
-    readonlyOpt = new Option("readonly", "readonly", false, "read only");
-    
-    opts = new Options();
-    
-    opts.addOption(usernameOpt);
-    opts.addOption(passwordOpt);
-    opts.addOption(readonlyOpt);
+    assert (opts.count == count);
   }
   
   public static void main(String[] args) throws Exception {
-    setupOptions();
-    
-    Parser p = new BasicParser();
-    CommandLine cl = null;
-    
-    try {
-      cl = p.parse(opts, args);
-    } catch (ParseException e) {
-      throw new RuntimeException(e);
-    }
-    String[] rargs = cl.getArgs();
-    if (rargs.length != 1) {
-      HelpFormatter hf = new HelpFormatter();
-      hf.printHelp(" <count> ", opts);
-    }
-    count = Integer.parseInt(rargs[0]);
-    readOnly = cl.hasOption(readonlyOpt.getOpt());
-    user = cl.getOptionValue(usernameOpt.getOpt(), "root");
-    password = cl.getOptionValue(passwordOpt.getOpt(), "secret");
+    String program = CreateTestTable.class.getName();
+    Opts opts = new Opts();
+    opts.parseArgs(program, args);
     
     // create the test table within accumulo
-    String table = "mrtest1";
-    Connector connector;
-    
-    connector = HdfsZooInstance.getInstance().getConnector(user, password.getBytes());
+    Connector connector = opts.getConnector();
     
-    if (!readOnly) {
+    if (!opts.readOnly) {
       TreeSet<Text> keys = new TreeSet<Text>();
-      for (int i = 0; i < count / 100; i++) {
+      for (int i = 0; i < opts.count / 100; i++) {
         keys.add(new Text(String.format("%05d", i * 100)));
       }
       
       // presplit
-      connector.tableOperations().create(table);
-      connector.tableOperations().addSplits(table, keys);
-      BatchWriter b = connector.createBatchWriter(table, new BatchWriterConfig());
+      connector.tableOperations().create(opts.getTableName());
+      connector.tableOperations().addSplits(opts.getTableName(), keys);
+      BatchWriter b = connector.createBatchWriter(opts.getTableName(), opts.getBatchWriterConfig());
       
       // populate
-      for (int i = 0; i < count; i++) {
+      for (int i = 0; i < opts.count; i++) {
         Mutation m = new Mutation(new Text(String.format("%05d", i)));
         m.put(new Text("col" + Integer.toString((i % 3) + 1)), new Text("qual"), new Value("junk".getBytes()));
         b.addMutation(m);
       }
-      
       b.close();
-      
     }
     
-    readBack(connector, count);
-    
+    readBack(connector, opts);
+    opts.stopTracing();
   }
 }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/GCLotsOfCandidatesTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/GCLotsOfCandidatesTest.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/GCLotsOfCandidatesTest.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/GCLotsOfCandidatesTest.java Wed Dec 19 16:25:03 2012
@@ -17,14 +17,13 @@
 package org.apache.accumulo.server.test;
 
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.TablePermission;
@@ -32,17 +31,12 @@ import org.apache.hadoop.io.Text;
 
 public class GCLotsOfCandidatesTest {
   public static void main(String args[]) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
-    if (args.length != 4)
-      throw new IllegalArgumentException("Expected arguments: <instance name> <zookeeper server> <username> <password>");
+    ClientOpts opts = new ClientOpts();
+    opts.parseArgs(GCLotsOfCandidatesTest.class.getName(), args);
     
-    Connector conn = new ZooKeeperInstance(args[0], args[1]).getConnector(args[2], args[3].getBytes());
-    generateCandidates(conn);
-  }
-  
-  private static void generateCandidates(Connector conn) throws AccumuloException, AccumuloSecurityException, TableNotFoundException,
-      MutationsRejectedException {
+    Connector conn = opts.getConnector();
     conn.securityOperations().grantTablePermission(conn.whoami(), Constants.METADATA_TABLE_NAME, TablePermission.WRITE);
-    BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, opts.getBatchWriterConfig());
     
     for (int i = 0; i < 10000; ++i) {
       final Text emptyText = new Text("");

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/ListTables.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/ListTables.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/ListTables.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/ListTables.java Wed Dec 19 16:25:03 2012
@@ -18,15 +18,17 @@ package org.apache.accumulo.server.test;
 
 import java.util.Map.Entry;
 
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 
 /**
  * This little program is used by the functional test to get a list of table ids.
  */
 public class ListTables {
-  public static void main(String[] args) {
-    for (Entry<String,String> table : Tables.getNameToIdMap(HdfsZooInstance.getInstance()).entrySet())
+  public static void main(String[] args) throws Exception {
+    ClientOpts opts = new ClientOpts();
+    opts.parseArgs(ListTables.class.getName(), args);
+    for (Entry<String,String> table : Tables.getNameToIdMap(opts.getInstance()).entrySet())
       System.out.println(table.getKey() + " => " + table.getValue());
   }
 }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/NativeMapConcurrencyTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/NativeMapConcurrencyTest.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/NativeMapConcurrencyTest.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/NativeMapConcurrencyTest.java Wed Dec 19 16:25:03 2012
@@ -25,6 +25,9 @@ import org.apache.accumulo.core.data.Val
 import org.apache.accumulo.server.tabletserver.NativeMap;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+
 public class NativeMapConcurrencyTest {
   
   private static final byte ROW_PREFIX[] = new byte[] {'r'};
@@ -70,14 +73,30 @@ public class NativeMapConcurrencyTest {
     return num / (ms / 1000.0);
   }
   
+  static class Opts {
+    @Parameter(names="--rows", description="rows", required = true)
+    int rows = 0;
+    @Parameter(names="--cols", description="cols")
+    int cols = 1;
+    @Parameter(names="--threads", description="threads")
+    int threads = 1;
+    @Parameter(names="--writeThreads", description="write threads")
+    int writeThreads = 1;
+    @Parameter(names="-help", help=true)
+    boolean help = false;
+  }
+  
   public static void main(String[] args) {
-    int rows = Integer.parseInt(args[0]);
-    int cols = Integer.parseInt(args[1]);
-    int threads = Integer.parseInt(args[2]);
-    int writeThreads = Integer.parseInt(args[3]);
-    
-    NativeMap nm = create(rows, cols);
-    runTest(nm, rows, cols, threads, writeThreads);
+    Opts opts = new Opts();
+    JCommander jc = new JCommander(opts);
+    jc.setProgramName(NativeMapConcurrencyTest.class.getName());
+    jc.parse(args);
+    if (opts.help) {
+      jc.usage();
+      return;
+    }
+    NativeMap nm = create(opts.rows, opts.cols);
+    runTest(nm, opts.rows, opts.cols, opts.threads, opts.writeThreads);
     nm.delete();
   }
   

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/QueryMetadataTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/QueryMetadataTable.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/QueryMetadataTable.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/QueryMetadataTable.java Wed Dec 19 16:25:03 2012
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.server.test;
 
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.Map.Entry;
@@ -26,6 +25,7 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
@@ -37,15 +37,10 @@ import org.apache.accumulo.core.data.Ran
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.thrift.AuthInfo;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.Parser;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 public class QueryMetadataTable {
   private static AuthInfo credentials;
   
@@ -86,39 +81,20 @@ public class QueryMetadataTable {
     }
   }
   
+  static class Opts extends ClientOpts {
+    @Parameter(names="--numQueries", description="number of queries to run")
+    int numQueries = 1;
+    @Parameter(names="--numThreads", description="number of threads used to run the queries")
+    int numThreads = 1;
+  }
+  
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Option usernameOpt = new Option("username", "username", true, "username");
-    Option passwordOpt = new Option("password", "password", true, "password");
+    Opts opts = new Opts();
+    opts.parseArgs(QueryMetadataTable.class.getName(), args);
     
-    Options opts = new Options();
-    
-    opts.addOption(usernameOpt);
-    opts.addOption(passwordOpt);
-    
-    Parser p = new BasicParser();
-    CommandLine cl = null;
-    try {
-      cl = p.parse(opts, args);
-    } catch (ParseException e1) {
-      System.out.println("Parse Exception, exiting.");
-      return;
-    }
-    
-    if (cl.getArgs().length != 2) {
-      HelpFormatter hf = new HelpFormatter();
-      hf.printHelp("queryMetadataTable <numQueries> <numThreads> ", opts);
-      return;
-    }
-    String[] rargs = cl.getArgs();
-    
-    int numQueries = Integer.parseInt(rargs[0]);
-    int numThreads = Integer.parseInt(rargs[1]);
-    credentials = new AuthInfo(cl.getOptionValue("username", "root"), ByteBuffer.wrap(cl.getOptionValue("password", "secret").getBytes()), HdfsZooInstance
-        .getInstance().getInstanceID());
-    
-    Connector connector = HdfsZooInstance.getInstance().getConnector(credentials.user, credentials.password);
-    Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
-    scanner.setBatchSize(20000);
+    Connector connector = opts.getConnector();
+    Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, opts.auths);
+    scanner.setBatchSize(opts.scanBatchSize);
     Text mdrow = new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), null));
     
     HashSet<Text> rowSet = new HashSet<Text>();
@@ -138,7 +114,6 @@ public class QueryMetadataTable {
       if (!entry.getKey().getRow().toString().startsWith(Constants.METADATA_TABLE_ID))
         rowSet.add(entry.getKey().getRow());
       count++;
-      
     }
     
     System.out.printf(" %,d%n", count);
@@ -147,11 +122,11 @@ public class QueryMetadataTable {
     
     Random r = new Random();
     
-    ExecutorService tp = Executors.newFixedThreadPool(numThreads);
+    ExecutorService tp = Executors.newFixedThreadPool(opts.numThreads);
     
     long t1 = System.currentTimeMillis();
     
-    for (int i = 0; i < numQueries; i++) {
+    for (int i = 0; i < opts.numQueries; i++) {
       int index = r.nextInt(rows.size());
       MDTQuery mdtq = new MDTQuery(rows.get(index));
       tp.submit(mdtq);
@@ -168,6 +143,6 @@ public class QueryMetadataTable {
     
     long t2 = System.currentTimeMillis();
     double delta = (t2 - t1) / 1000.0;
-    System.out.println("time : " + delta + "  queries per sec : " + (numQueries / delta));
+    System.out.println("time : " + delta + "  queries per sec : " + (opts.numQueries / delta));
   }
 }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/TestBinaryRows.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/TestBinaryRows.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/TestBinaryRows.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/TestBinaryRows.java Wed Dec 19 16:25:03 2012
@@ -21,9 +21,8 @@ import java.util.Map.Entry;
 import java.util.Random;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
@@ -31,18 +30,11 @@ import org.apache.accumulo.core.data.Mut
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.TextUtil;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.hadoop.io.Text;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
+
+import com.beust.jcommander.Parameter;
 
 public class TestBinaryRows {
-  private static String username = "root";
-  private static byte[] passwd = "secret".getBytes();
-  private static String mode = null;
-  private static String table = null;
-  private static long start = 0;
-  private static long num = 0;
   private static final long byteOnes;
   
   static {
@@ -76,28 +68,29 @@ public class TestBinaryRows {
     return l;
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--mode", description="either 'ingest', 'delete', 'randomLookups', 'split', 'verify', 'verifyDeleted'", required=true)
+    String mode;
+    @Parameter(names="--start", description="the lowest numbered row")
+    long start = 0;
+    @Parameter(names="--count", description="number of rows to ingest", required=true)
+    long num = 0;
+  }
+  
   public static void main(String[] args) {
-    mode = args[0];
-    if (args.length < 4) {
-      System.err.println("ERROR : " + mode + " is not a valid operation or insufficient arguments.");
-      throw new RuntimeException("config error");
-    }
-    table = args[1];
-    start = Long.parseLong(args[2]);
-    num = Long.parseLong(args[3]);
+    Opts opts = new Opts();
+    opts.parseArgs(TestBinaryRows.class.getName(), args);
     
     try {
-      Connector connector = HdfsZooInstance.getInstance().getConnector(username, passwd);
-      
-      Logger.getLogger(Constants.CORE_PACKAGE_NAME).setLevel(Level.DEBUG);
+      Connector connector = opts.getConnector();
       
-      if (mode.equals("ingest") || mode.equals("delete")) {
-        BatchWriter bw = connector.createBatchWriter(table, new BatchWriterConfig());
-        boolean delete = mode.equals("delete");
-        
-        for (long i = 0; i < num; i++) {
-          byte[] row = encodeLong(i + start);
-          String value = "" + (i + start);
+      if (opts.mode.equals("ingest") || opts.mode.equals("delete")) {
+        BatchWriter bw = connector.createBatchWriter(opts.tableName, opts.getBatchWriterConfig());
+        boolean delete = opts.mode.equals("delete");
+        
+        for (long i = 0; i < opts.num; i++) {
+          byte[] row = encodeLong(i + opts.start);
+          String value = "" + (i + opts.start);
           
           Mutation m = new Mutation(new Text(row));
           if (delete) {
@@ -109,10 +102,11 @@ public class TestBinaryRows {
         }
         
         bw.close();
-      } else if (mode.equals("verifyDeleted")) {
-        Scanner s = connector.createScanner(table, Constants.NO_AUTHS);
-        Key startKey = new Key(encodeLong(start), "cf".getBytes(), "cq".getBytes(), new byte[0], Long.MAX_VALUE);
-        Key stopKey = new Key(encodeLong(start + num - 1), "cf".getBytes(), "cq".getBytes(), new byte[0], 0);
+      } else if (opts.mode.equals("verifyDeleted")) {
+        Scanner s = connector.createScanner(opts.tableName, opts.auths);
+        s.setBatchSize(opts.scanBatchSize);
+        Key startKey = new Key(encodeLong(opts.start), "cf".getBytes(), "cq".getBytes(), new byte[0], Long.MAX_VALUE);
+        Key stopKey = new Key(encodeLong(opts.start + opts.num - 1), "cf".getBytes(), "cq".getBytes(), new byte[0], 0);
         s.setBatchSize(50000);
         s.setRange(new Range(startKey, stopKey));
         
@@ -122,16 +116,16 @@ public class TestBinaryRows {
           System.exit(1);
         }
         
-      } else if (mode.equals("verify")) {
+      } else if (opts.mode.equals("verify")) {
         long t1 = System.currentTimeMillis();
         
-        Scanner s = connector.createScanner(table, Constants.NO_AUTHS);
-        Key startKey = new Key(encodeLong(start), "cf".getBytes(), "cq".getBytes(), new byte[0], Long.MAX_VALUE);
-        Key stopKey = new Key(encodeLong(start + num - 1), "cf".getBytes(), "cq".getBytes(), new byte[0], 0);
-        s.setBatchSize(50000);
+        Scanner s = connector.createScanner(opts.tableName, opts.auths);
+        Key startKey = new Key(encodeLong(opts.start), "cf".getBytes(), "cq".getBytes(), new byte[0], Long.MAX_VALUE);
+        Key stopKey = new Key(encodeLong(opts.start + opts.num - 1), "cf".getBytes(), "cq".getBytes(), new byte[0], 0);
+        s.setBatchSize(opts.scanBatchSize);
         s.setRange(new Range(startKey, stopKey));
         
-        long i = start;
+        long i = opts.start;
         
         for (Entry<Key,Value> e : s) {
           Key k = e.getKey();
@@ -144,8 +138,8 @@ public class TestBinaryRows {
           i++;
         }
         
-        if (i != start + num) {
-          System.err.println("ERROR : did not see expected number of rows, saw " + (i - start) + " expected " + num);
+        if (i != opts.start + opts.num) {
+          System.err.println("ERROR : did not see expected number of rows, saw " + (i - opts.start) + " expected " + opts.num);
           System.err.println("exiting... ARGHHHHHH");
           System.exit(1);
           
@@ -154,9 +148,9 @@ public class TestBinaryRows {
         long t2 = System.currentTimeMillis();
         
         System.out.printf("time : %9.2f secs%n", ((t2 - t1) / 1000.0));
-        System.out.printf("rate : %9.2f entries/sec%n", num / ((t2 - t1) / 1000.0));
+        System.out.printf("rate : %9.2f entries/sec%n", opts.num / ((t2 - t1) / 1000.0));
         
-      } else if (mode.equals("randomLookups")) {
+      } else if (opts.mode.equals("randomLookups")) {
         int numLookups = 1000;
         
         Random r = new Random();
@@ -164,9 +158,10 @@ public class TestBinaryRows {
         long t1 = System.currentTimeMillis();
         
         for (int i = 0; i < numLookups; i++) {
-          long row = (Math.abs(r.nextLong()) % num) + start;
+          long row = (Math.abs(r.nextLong()) % opts.num) + opts.start;
           
-          Scanner s = connector.createScanner(table, Constants.NO_AUTHS);
+          Scanner s = connector.createScanner(opts.tableName, opts.auths);
+          s.setBatchSize(opts.scanBatchSize);
           Key startKey = new Key(encodeLong(row), "cf".getBytes(), "cq".getBytes(), new byte[0], Long.MAX_VALUE);
           Key stopKey = new Key(encodeLong(row), "cf".getBytes(), "cq".getBytes(), new byte[0], 0);
           s.setRange(new Range(startKey, stopKey));
@@ -199,10 +194,10 @@ public class TestBinaryRows {
         System.out.printf("lookups : %9d keys%n", numLookups);
         System.out.printf("rate    : %9.2f lookups/sec%n", numLookups / ((t2 - t1) / 1000.0));
         
-      } else if (mode.equals("split")) {
+      } else if (opts.mode.equals("split")) {
         TreeSet<Text> splits = new TreeSet<Text>();
-        int shift = (int) start;
-        int count = (int) num;
+        int shift = (int) opts.start;
+        int count = (int) opts.num;
         
         for (long i = 0; i < count; i++) {
           long splitPoint = i << shift;
@@ -211,11 +206,11 @@ public class TestBinaryRows {
           System.out.printf("added split point 0x%016x  %,12d%n", splitPoint, splitPoint);
         }
         
-        connector.tableOperations().create(table);
-        connector.tableOperations().addSplits(table, splits);
+        connector.tableOperations().create(opts.tableName);
+        connector.tableOperations().addSplits(opts.tableName, splits);
         
       } else {
-        System.err.println("ERROR : " + mode + " is not a valid operation.");
+        System.err.println("ERROR : " + opts.mode + " is not a valid operation.");
         System.exit(1);
       }
     } catch (Exception e) {

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/TestIngest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/TestIngest.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/TestIngest.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/TestIngest.java Wed Dec 19 16:25:03 2012
@@ -16,20 +16,15 @@
  */
 package org.apache.accumulo.server.test;
 
-import java.nio.ByteBuffer;
-import java.util.Arrays;
 import java.util.Random;
 import java.util.TreeSet;
 
 import org.apache.accumulo.cloudtrace.instrument.Trace;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.TabletServerBatchWriter;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
@@ -43,177 +38,101 @@ import org.apache.accumulo.core.file.Fil
 import org.apache.accumulo.core.file.rfile.RFile;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.security.thrift.AuthInfo;
 import org.apache.accumulo.core.trace.DistributedTrace;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.security.Authenticator;
-import org.apache.accumulo.server.security.ZKAuthenticator;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.Parser;
+import org.apache.accumulo.server.cli.ClientOnDefaultTable;
+import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 
+import com.beust.jcommander.Parameter;
+
 
 public class TestIngest {
   public static final Authorizations AUTHS = new Authorizations("L1", "L2", "G1", "GROUP2");
   
-  @SuppressWarnings("unused")
-  private static final Logger log = Logger.getLogger(TestIngest.class);
-  private static AuthInfo rootCredentials;
-  private static String username;
-  private static String passwd;
-  
-  public static class CreateTable {
-    public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableExistsException {
-      long start = Long.parseLong(args[0]);
-      long end = Long.parseLong(args[1]);
-      long numsplits = Long.parseLong(args[2]);
-      String username = args[3];
-      byte[] passwd = args[4].getBytes();
-      
-      TreeSet<Text> splits = getSplitPoints(start, end, numsplits);
-      
-      Connector conn = HdfsZooInstance.getInstance().getConnector(username, passwd);
-      conn.tableOperations().create("test_ingest");
-      try {
-        conn.tableOperations().addSplits("test_ingest", splits);
-      } catch (TableNotFoundException ex) {
-        // unlikely
-        throw new RuntimeException(ex);
-      }
-    }
+  static class Opts extends ClientOnDefaultTable {
     
-    public static TreeSet<Text> getSplitPoints(long start, long end, long numsplits) {
-      long splitSize = (end - start) / numsplits;
-      
-      long pos = start + splitSize;
-      
-      TreeSet<Text> splits = new TreeSet<Text>();
-      
-      while (pos < end) {
-        splits.add(new Text(String.format("row_%010d", pos)));
-        pos += splitSize;
-      }
-      return splits;
-    }
-  }
-  
-  public static class IngestArgs {
-    int rows;
-    int startRow;
-    int cols;
+    @Parameter(names="--createTable")
+    boolean createTable = false;
+    
+    @Parameter(names="--splits", description="the number of splits to use when creating the table")
+    int numsplits = 1;
+    
+    @Parameter(names="--start", description="the starting row number")
+    int startRow = 0;
+    
+    @Parameter(names="--rows", description="the number of rows to ingest")
+    int rows = 100000;
+    
+    @Parameter(names="--cols", description="the number of columns to ingest per row")
+    int cols = 1;
+    
+    @Parameter(names="--random", description="insert random rows and use the given number to seed the psuedo-random number generator")
+    Integer random = null;
     
-    boolean random = false;
-    int seed = 0;
+    @Parameter(names="--size", description="the size of the value to ingest")
     int dataSize = 1000;
     
+    @Parameter(names="--delete", description="delete values instead of inserting them")
     boolean delete = false;
-    long timestamp = 0;
-    boolean hasTimestamp = false;
-    boolean useGet = false;
     
-    public boolean unique;
+    @Parameter(names={"-ts", "--timestamp"}, description="timestamp to use for all values")
+    long timestamp = -1;
     
-    boolean outputToRFile = false;
-    String outputFile;
+    @Parameter(names="--rfile", description="generate data into a file that can be imported")
+    String outputFile = null;
     
+    @Parameter(names="--stride", description="the difference between successive row ids")
     int stride;
-    public boolean useTsbw = false;
-    
+
+    @Parameter(names={"-cf","--columnFamily"}, description="place columns in this column family")
     String columnFamily = "colf";
-    
-    boolean trace = false;
+
+    @Parameter(names={"-cv","--columnVisibility"}, description="place columns in this column family", converter=VisibilityConverter.class)
+    ColumnVisibility columnVisibility = new ColumnVisibility();
+
+    Opts() { super("test_ingest"); }
   }
   
-  public static Options getOptions() {
-    Options opts = new Options();
-    opts.addOption(new Option("size", "size", true, "size"));
-    opts.addOption(new Option("colf", "colf", true, "colf"));
-    opts.addOption(new Option("delete", "delete", false, "delete"));
-    opts.addOption(new Option("random", "random", true, "random"));
-    opts.addOption(new Option("timestamp", "timestamp", true, "timestamp"));
-    opts.addOption(new Option("stride", "stride", true, "stride"));
-    opts.addOption(new Option("useGet", "useGet", false, "use get"));
-    opts.addOption(new Option("tsbw", "tsbw", false, "tsbw"));
-    opts.addOption(new Option("username", "username", true, "username"));
-    opts.addOption(new Option("password", "password", true, "password"));
-    opts.addOption(new Option("trace", "trace", false, "turn on distributed tracing"));
-    opts.addOption(new Option("rFile", "rFile", true, "relative-key file"));
-    return opts;
-  }
+  @SuppressWarnings("unused")
+  private static final Logger log = Logger.getLogger(TestIngest.class);
   
-  public static IngestArgs parseArgs(String args[]) {
-    
-    Parser p = new BasicParser();
-    Options opts = getOptions();
-    CommandLine cl;
-    
-    try {
-      cl = p.parse(opts, args);
-    } catch (ParseException e) {
-      System.out.println("Parse Error, exiting.");
-      throw new RuntimeException(e);
-    }
-    
-    if (cl.getArgs().length != 3) {
-      HelpFormatter hf = new HelpFormatter();
-      hf.printHelp("test_ingest <rows> <start_row> <num_columns>", getOptions());
-      throw new RuntimeException();
-    }
-    
-    IngestArgs ia = new IngestArgs();
-    
-    if (cl.hasOption("size")) {
-      ia.dataSize = Integer.parseInt(cl.getOptionValue("size"));
-    }
-    if (cl.hasOption("colf")) {
-      ia.columnFamily = cl.getOptionValue("colf");
-    }
-    if (cl.hasOption("timestamp")) {
-      ia.timestamp = Long.parseLong(cl.getOptionValue("timestamp"));
-      ia.hasTimestamp = true;
-    }
-    if (cl.hasOption("rFile")) {
-      ia.outputToRFile = true;
-      ia.outputFile = cl.getOptionValue("rFile");
-    }
-    ia.delete = cl.hasOption("delete");
-    ia.useGet = cl.hasOption("useGet");
-    if (cl.hasOption("random")) {
-      ia.random = true;
-      ia.seed = Integer.parseInt(cl.getOptionValue("random"));
-    }
-    if (cl.hasOption("stride")) {
-      ia.stride = Integer.parseInt(cl.getOptionValue("stride"));
+  public static void createTable(Opts args) throws Exception {
+    if (args.createTable) {
+      TreeSet<Text> splits = getSplitPoints(args.startRow, args.startRow + args.rows, args.numsplits);
+      
+      Connector conn = args.getConnector();
+      if (!conn.tableOperations().exists(args.getTableName()))
+        conn.tableOperations().create(args.getTableName());
+      try {
+        conn.tableOperations().addSplits(args.getTableName(), splits);
+      } catch (TableNotFoundException ex) {
+        // unlikely
+        throw new RuntimeException(ex);
+      }
     }
-    ia.useTsbw = cl.hasOption("tsbw");
-    
-    username = cl.getOptionValue("username", "root");
-    passwd = cl.getOptionValue("password", "secret");
+  }
+  
+  public static TreeSet<Text> getSplitPoints(long start, long end, long numsplits) {
+    long splitSize = (end - start) / numsplits;
     
-    String[] requiredArgs = cl.getArgs();
+    long pos = start + splitSize;
     
-    ia.rows = Integer.parseInt(requiredArgs[0]);
-    ia.startRow = Integer.parseInt(requiredArgs[1]);
-    ia.cols = Integer.parseInt(requiredArgs[2]);
+    TreeSet<Text> splits = new TreeSet<Text>();
     
-    if (cl.hasOption("trace")) {
-      ia.trace = true;
+    while (pos < end) {
+      splits.add(new Text(String.format("row_%010d", pos)));
+      pos += splitSize;
     }
-    return ia;
+    return splits;
   }
   
-  public static byte[][] generateValues(IngestArgs ingestArgs) {
+  public static byte[][] generateValues(Opts ingestArgs) {
     
     byte[][] bytevals = new byte[10][];
     
@@ -224,7 +143,6 @@ public class TestIngest {
       for (int j = 0; j < ingestArgs.dataSize; j++)
         bytevals[i][j] = letters[i];
     }
-    
     return bytevals;
   }
   
@@ -250,19 +168,21 @@ public class TestIngest {
     }
   }
   
-  public static void main(String[] args) {
-    // log.error("usage : test_ingest [-delete] [-size <value size>] [-random <seed>] [-timestamp <ts>] [-stride <size>] <rows> <start row> <# cols> ");
+  public static void main(String[] args) throws Exception {
     
-    IngestArgs ingestArgs = parseArgs(args);
-    Instance instance = HdfsZooInstance.getInstance();
+    Opts opts = new Opts();
+    opts.parseArgs(TestIngest.class.getName(), args);
+    opts.getInstance().setConfiguration(ServerConfiguration.getSiteConfiguration());
+
+    createTable(opts);
+    
+    Instance instance = opts.getInstance();
+    
+    String name = TestIngest.class.getSimpleName();
+    DistributedTrace.enable(instance, new ZooReader(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut()), name, null);
     
     try {
-      if (ingestArgs.trace) {
-        String name = TestIngest.class.getSimpleName();
-        DistributedTrace.enable(instance, new ZooReader(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut()), name, null);
-        Trace.on(name);
-        Trace.currentTrace().data("cmdLine", Arrays.asList(args).toString());
-      }
+      opts.startTracing(name);
       
       Logger.getLogger(TabletServerBatchWriter.class.getName()).setLevel(Level.TRACE);
       
@@ -270,61 +190,53 @@ public class TestIngest {
       
       long stopTime;
       
-      byte[][] bytevals = generateValues(ingestArgs);
+      byte[][] bytevals = generateValues(opts);
       
-      byte randomValue[] = new byte[ingestArgs.dataSize];
+      byte randomValue[] = new byte[opts.dataSize];
       Random random = new Random();
       
       long bytesWritten = 0;
       
       BatchWriter bw = null;
       FileSKVWriter writer = null;
+      Connector connector = opts.getConnector();
       
-      rootCredentials = new AuthInfo(username, ByteBuffer.wrap(passwd.getBytes()), instance.getInstanceID());
-      if (ingestArgs.outputToRFile) {
+      if (opts.outputFile != null) {
         Configuration conf = CachedConfiguration.getInstance();
         FileSystem fs = FileSystem.get(conf);
-        writer = FileOperations.getInstance().openWriter(ingestArgs.outputFile + "." + RFile.EXTENSION, fs, conf,
+        writer = FileOperations.getInstance().openWriter(opts.outputFile + "." + RFile.EXTENSION, fs, conf,
             AccumuloConfiguration.getDefaultConfiguration());
         writer.startDefaultLocalityGroup();
       } else {
-        Connector connector = instance.getConnector(rootCredentials.user, rootCredentials.password);
         bw = connector.createBatchWriter("test_ingest", new BatchWriterConfig());
       }
-      
-      Authenticator authenticator = ZKAuthenticator.getInstance();
-      authenticator.changeAuthorizations(rootCredentials, rootCredentials.user, AUTHS);
-      ColumnVisibility le = new ColumnVisibility("L1&L2&G1&GROUP2");
-      Text labBA = new Text(le.getExpression());
-      
-      // int step = 100;
+      connector.securityOperations().changeUserAuthorizations(opts.user, AUTHS);
+      Text labBA = new Text(opts.columnVisibility.getExpression());
       
       long startTime = System.currentTimeMillis();
-      for (int i = 0; i < ingestArgs.rows; i++) {
-        
+      for (int i = 0; i < opts.rows; i++) {
         int rowid;
-        
-        if (ingestArgs.stride > 0) {
-          rowid = ((i % ingestArgs.stride) * (ingestArgs.rows / ingestArgs.stride)) + (i / ingestArgs.stride);
+        if (opts.stride > 0) {
+          rowid = ((i % opts.stride) * (opts.rows / opts.stride)) + (i / opts.stride);
         } else {
           rowid = i;
         }
         
-        Text row = generateRow(rowid, ingestArgs.startRow);
+        Text row = generateRow(rowid, opts.startRow);
         Mutation m = new Mutation(row);
-        for (int j = 0; j < ingestArgs.cols; j++) {
-          Text colf = new Text(ingestArgs.columnFamily);
+        for (int j = 0; j < opts.cols; j++) {
+          Text colf = new Text(opts.columnFamily);
           Text colq = new Text(FastFormat.toZeroPaddedString(j, 5, 10, COL_PREFIX));
           
           if (writer != null) {
             Key key = new Key(row, colf, colq, labBA);
-            if (ingestArgs.hasTimestamp) {
-              key.setTimestamp(ingestArgs.timestamp);
+            if (opts.timestamp >= 0) {
+              key.setTimestamp(opts.timestamp);
             } else {
-              key.setTimestamp(System.currentTimeMillis());
+              key.setTimestamp(startTime);
             }
             
-            if (ingestArgs.delete) {
+            if (opts.delete) {
               key.setDeleted(true);
             } else {
               key.setDeleted(false);
@@ -332,12 +244,12 @@ public class TestIngest {
             
             bytesWritten += key.getSize();
             
-            if (ingestArgs.delete) {
+            if (opts.delete) {
               writer.append(key, new Value(new byte[0]));
             } else {
               byte value[];
-              if (ingestArgs.random) {
-                value = genRandomValue(random, randomValue, ingestArgs.seed, rowid + ingestArgs.startRow, j);
+              if (opts.random != null) {
+                value = genRandomValue(random, randomValue, opts.random.intValue(), rowid + opts.startRow, j);
               } else {
                 value = bytevals[j % bytevals.length];
               }
@@ -351,24 +263,24 @@ public class TestIngest {
             Key key = new Key(row, colf, colq, labBA);
             bytesWritten += key.getSize();
             
-            if (ingestArgs.delete) {
-              if (ingestArgs.hasTimestamp)
-                m.putDelete(colf, colq, le, ingestArgs.timestamp);
+            if (opts.delete) {
+              if (opts.timestamp >= 0)
+                m.putDelete(colf, colq, opts.columnVisibility, opts.timestamp);
               else
-                m.putDelete(colf, colq, le);
+                m.putDelete(colf, colq, opts.columnVisibility);
             } else {
               byte value[];
-              if (ingestArgs.random) {
-                value = genRandomValue(random, randomValue, ingestArgs.seed, rowid + ingestArgs.startRow, j);
+              if (opts.random != null) {
+                value = genRandomValue(random, randomValue, opts.random.intValue(), rowid + opts.startRow, j);
               } else {
                 value = bytevals[j % bytevals.length];
               }
               bytesWritten += value.length;
               
-              if (ingestArgs.hasTimestamp) {
-                m.put(colf, colq, le, ingestArgs.timestamp, new Value(value, true));
+              if (opts.timestamp >= 0) {
+                m.put(colf, colq, opts.columnVisibility, opts.timestamp, new Value(value, true));
               } else {
-                m.put(colf, colq, le, new Value(value, true));
+                m.put(colf, colq, opts.columnVisibility, new Value(value, true));
                 
               }
             }
@@ -404,7 +316,7 @@ public class TestIngest {
       
       stopTime = System.currentTimeMillis();
       
-      int totalValues = ingestArgs.rows * ingestArgs.cols;
+      int totalValues = opts.rows * opts.cols;
       double elapsed = (stopTime - startTime) / 1000.0;
       
       System.out.printf("%,12d records written | %,8d records/sec | %,12d bytes written | %,8d bytes/sec | %6.3f secs   %n", totalValues,

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/TestMultiTableIngest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/TestMultiTableIngest.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/TestMultiTableIngest.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/test/TestMultiTableIngest.java Wed Dec 19 16:25:03 2012
@@ -19,10 +19,9 @@ package org.apache.accumulo.server.test;
 import java.util.ArrayList;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MultiTableBatchWriter;
 import org.apache.accumulo.core.client.MutationsRejectedException;
@@ -30,105 +29,68 @@ import org.apache.accumulo.core.client.S
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.Parser;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 public class TestMultiTableIngest {
   
   private static ArrayList<String> tableNames = new ArrayList<String>();
   
-  private static Option usernameOpt = new Option("username", true, "username");
-  private static Option passwordOpt = new Option("password", true, "password");
-  private static Option readonlyOpt = new Option("readonly", false, "read only");
-  private static Option tablesOpt = new Option("tables", true, "number of tables to create");
-  private static Option countOpt = new Option("count", true, "number of entries to create");
-  
-  private static Options opts = new Options();
-  
-  static {
-    opts.addOption(usernameOpt);
-    opts.addOption(passwordOpt);
-    opts.addOption(readonlyOpt);
-    opts.addOption(tablesOpt);
-    opts.addOption(countOpt);
+  static class Opts extends ClientOpts {
+    @Parameter(names="--readonly", description="read only")
+    boolean readonly = false;
+    @Parameter(names="--tables", description="number of tables to create")
+    int tables = 5;
+    @Parameter(names="--count", description="number of entries to create")
+    int count = 10000;
   }
   
-  // root user is needed for tests
-  private static String user;
-  private static String password;
-  private static boolean readOnly = false;
-  private static int count = 10000;
-  private static int tables = 5;
-  
-  private static void readBack(Connector conn, int last) throws Exception {
+  private static void readBack(Opts opts, Connector conn) throws Exception {
     int i = 0;
     for (String table : tableNames) {
-      Scanner scanner = conn.createScanner(table, Constants.NO_AUTHS);
+      Scanner scanner = conn.createScanner(table, opts.auths);
+      scanner.setBatchSize(opts.scanBatchSize);
       int count = i;
       for (Entry<Key,Value> elt : scanner) {
         String expected = String.format("%05d", count);
-        assert (elt.getKey().getRow().toString().equals(expected));
+        if (!elt.getKey().getRow().toString().equals(expected))
+          throw new RuntimeException("entry " + elt + " does not match expected " + expected + " in table " + table);
         count += tableNames.size();
       }
       i++;
     }
-    assert (last == count);
   }
   
   public static void main(String[] args) throws Exception {
-    
-    Parser p = new BasicParser();
-    CommandLine cl = null;
-    
-    try {
-      cl = p.parse(opts, args);
-    } catch (ParseException e) {
-      throw new RuntimeException(e);
-    }
-    String[] rargs = cl.getArgs();
-    if (rargs.length != 0) {
-      HelpFormatter hf = new HelpFormatter();
-      hf.printHelp("", opts);
-    }
-    count = Integer.parseInt(cl.getOptionValue(countOpt.getOpt(), "10000"));
-    tables = Integer.parseInt(cl.getOptionValue(tablesOpt.getOpt(), "5"));
-    readOnly = cl.hasOption(readonlyOpt.getOpt());
-    user = cl.getOptionValue(usernameOpt.getOpt(), "root");
-    password = cl.getOptionValue(passwordOpt.getOpt(), "secret");
-    
+    Opts opts = new Opts();
+    opts.parseArgs(TestMultiTableIngest.class.getName(), args);
     // create the test table within accumulo
     Connector connector;
     try {
-      connector = HdfsZooInstance.getInstance().getConnector(user, password.getBytes());
+      connector = opts.getConnector();
     } catch (AccumuloException e) {
       throw new RuntimeException(e);
     } catch (AccumuloSecurityException e) {
       throw new RuntimeException(e);
     }
-    for (int i = 0; i < tables; i++) {
+    for (int i = 0; i < opts.tables; i++) {
       tableNames.add(String.format("test_%04d", i));
     }
     
-    if (!readOnly) {
+    if (!opts.readonly) {
       for (String table : tableNames)
         connector.tableOperations().create(table);
       
       MultiTableBatchWriter b;
       try {
-        b = connector.createMultiTableBatchWriter(new BatchWriterConfig());
+        b = connector.createMultiTableBatchWriter(opts.getBatchWriterConfig());
       } catch (Exception e) {
         throw new RuntimeException(e);
       }
       
       // populate
-      for (int i = 0; i < count; i++) {
+      for (int i = 0; i < opts.count; i++) {
         Mutation m = new Mutation(new Text(String.format("%05d", i)));
         m.put(new Text("col" + Integer.toString((i % 3) + 1)), new Text("qual"), new Value("junk".getBytes()));
         b.getBatchWriter(tableNames.get(i % tableNames.size())).addMutation(m);
@@ -140,7 +102,7 @@ public class TestMultiTableIngest {
       }
     }
     try {
-      readBack(connector, count);
+      readBack(opts, connector);
     } catch (Exception e) {
       throw new RuntimeException(e);
     }



Mime
View raw message