accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vi...@apache.org
Subject svn commit: r1433166 [15/20] - in /accumulo/branches/ACCUMULO-259: ./ assemble/ assemble/platform/ assemble/scripts/ assemble/scripts/init.d/ bin/ conf/examples/1GB/native-standalone/ conf/examples/1GB/standalone/ conf/examples/2GB/native-standalone/ c...
Date Mon, 14 Jan 2013 22:03:34 GMT
Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestIngest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestIngest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestIngest.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestIngest.java Mon Jan 14 22:03:24 2013
@@ -16,20 +16,17 @@
  */
 package org.apache.accumulo.server.test;
 
-import java.nio.ByteBuffer;
-import java.util.Arrays;
+import java.util.Map.Entry;
 import java.util.Random;
+import java.util.Set;
 import java.util.TreeSet;
 
 import org.apache.accumulo.cloudtrace.instrument.Trace;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.TabletServerBatchWriter;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
@@ -43,175 +40,102 @@ import org.apache.accumulo.core.file.Fil
 import org.apache.accumulo.core.file.rfile.RFile;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.security.thrift.AuthInfo;
+import org.apache.accumulo.core.security.thrift.SecurityErrorCode;
 import org.apache.accumulo.core.trace.DistributedTrace;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.Parser;
+import org.apache.accumulo.server.cli.ClientOnDefaultTable;
+import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 
+import com.beust.jcommander.Parameter;
+
 
 public class TestIngest {
   public static final Authorizations AUTHS = new Authorizations("L1", "L2", "G1", "GROUP2");
   
-  @SuppressWarnings("unused")
-  private static final Logger log = Logger.getLogger(TestIngest.class);
-  private static AuthInfo rootCredentials;
-  private static String username;
-  private static String passwd;
-  
-  public static class CreateTable {
-    public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableExistsException {
-      long start = Long.parseLong(args[0]);
-      long end = Long.parseLong(args[1]);
-      long numsplits = Long.parseLong(args[2]);
-      String username = args[3];
-      byte[] passwd = args[4].getBytes();
-      
-      TreeSet<Text> splits = getSplitPoints(start, end, numsplits);
-      
-      Connector conn = HdfsZooInstance.getInstance().getConnector(username, passwd);
-      conn.tableOperations().create("test_ingest");
-      try {
-        conn.tableOperations().addSplits("test_ingest", splits);
-      } catch (TableNotFoundException ex) {
-        // unlikely
-        throw new RuntimeException(ex);
-      }
-    }
+  static class Opts extends ClientOnDefaultTable {
     
-    public static TreeSet<Text> getSplitPoints(long start, long end, long numsplits) {
-      long splitSize = (end - start) / numsplits;
-      
-      long pos = start + splitSize;
-      
-      TreeSet<Text> splits = new TreeSet<Text>();
-      
-      while (pos < end) {
-        splits.add(new Text(String.format("row_%010d", pos)));
-        pos += splitSize;
-      }
-      return splits;
-    }
-  }
-  
-  public static class IngestArgs {
-    int rows;
-    int startRow;
-    int cols;
+    @Parameter(names="--createTable")
+    boolean createTable = false;
+    
+    @Parameter(names="--splits", description="the number of splits to use when creating the table")
+    int numsplits = 1;
+    
+    @Parameter(names="--start", description="the starting row number")
+    int startRow = 0;
+    
+    @Parameter(names="--rows", description="the number of rows to ingest")
+    int rows = 100000;
+    
+    @Parameter(names="--cols", description="the number of columns to ingest per row")
+    int cols = 1;
+    
+    @Parameter(names="--random", description="insert random rows and use the given number to seed the psuedo-random number generator")
+    Integer random = null;
     
-    boolean random = false;
-    int seed = 0;
+    @Parameter(names="--size", description="the size of the value to ingest")
     int dataSize = 1000;
     
+    @Parameter(names="--delete", description="delete values instead of inserting them")
     boolean delete = false;
-    long timestamp = 0;
-    boolean hasTimestamp = false;
-    boolean useGet = false;
     
-    public boolean unique;
+    @Parameter(names={"-ts", "--timestamp"}, description="timestamp to use for all values")
+    long timestamp = -1;
     
-    boolean outputToRFile = false;
-    String outputFile;
+    @Parameter(names="--rfile", description="generate data into a file that can be imported")
+    String outputFile = null;
     
+    @Parameter(names="--stride", description="the difference between successive row ids")
     int stride;
-    public boolean useTsbw = false;
-    
+
+    @Parameter(names={"-cf","--columnFamily"}, description="place columns in this column family")
     String columnFamily = "colf";
-    
-    boolean trace = false;
+
+    @Parameter(names={"-cv","--columnVisibility"}, description="place columns in this column family", converter=VisibilityConverter.class)
+    ColumnVisibility columnVisibility = new ColumnVisibility();
+
+    Opts() { super("test_ingest"); }
   }
   
-  public static Options getOptions() {
-    Options opts = new Options();
-    opts.addOption(new Option("size", "size", true, "size"));
-    opts.addOption(new Option("colf", "colf", true, "colf"));
-    opts.addOption(new Option("delete", "delete", false, "delete"));
-    opts.addOption(new Option("random", "random", true, "random"));
-    opts.addOption(new Option("timestamp", "timestamp", true, "timestamp"));
-    opts.addOption(new Option("stride", "stride", true, "stride"));
-    opts.addOption(new Option("useGet", "useGet", false, "use get"));
-    opts.addOption(new Option("tsbw", "tsbw", false, "tsbw"));
-    opts.addOption(new Option("username", "username", true, "username"));
-    opts.addOption(new Option("password", "password", true, "password"));
-    opts.addOption(new Option("trace", "trace", false, "turn on distributed tracing"));
-    opts.addOption(new Option("rFile", "rFile", true, "relative-key file"));
-    return opts;
-  }
+  @SuppressWarnings("unused")
+  private static final Logger log = Logger.getLogger(TestIngest.class);
   
-  public static IngestArgs parseArgs(String args[]) {
-    
-    Parser p = new BasicParser();
-    Options opts = getOptions();
-    CommandLine cl;
-    
-    try {
-      cl = p.parse(opts, args);
-    } catch (ParseException e) {
-      System.out.println("Parse Error, exiting.");
-      throw new RuntimeException(e);
-    }
-    
-    if (cl.getArgs().length != 3) {
-      HelpFormatter hf = new HelpFormatter();
-      hf.printHelp("test_ingest <rows> <start_row> <num_columns>", getOptions());
-      throw new RuntimeException();
-    }
-    
-    IngestArgs ia = new IngestArgs();
-    
-    if (cl.hasOption("size")) {
-      ia.dataSize = Integer.parseInt(cl.getOptionValue("size"));
-    }
-    if (cl.hasOption("colf")) {
-      ia.columnFamily = cl.getOptionValue("colf");
-    }
-    if (cl.hasOption("timestamp")) {
-      ia.timestamp = Long.parseLong(cl.getOptionValue("timestamp"));
-      ia.hasTimestamp = true;
-    }
-    if (cl.hasOption("rFile")) {
-      ia.outputToRFile = true;
-      ia.outputFile = cl.getOptionValue("rFile");
-    }
-    ia.delete = cl.hasOption("delete");
-    ia.useGet = cl.hasOption("useGet");
-    if (cl.hasOption("random")) {
-      ia.random = true;
-      ia.seed = Integer.parseInt(cl.getOptionValue("random"));
-    }
-    if (cl.hasOption("stride")) {
-      ia.stride = Integer.parseInt(cl.getOptionValue("stride"));
+  public static void createTable(Opts args) throws Exception {
+    if (args.createTable) {
+      TreeSet<Text> splits = getSplitPoints(args.startRow, args.startRow + args.rows, args.numsplits);
+      
+      Connector conn = args.getConnector();
+      if (!conn.tableOperations().exists(args.getTableName()))
+        conn.tableOperations().create(args.getTableName());
+      try {
+        conn.tableOperations().addSplits(args.getTableName(), splits);
+      } catch (TableNotFoundException ex) {
+        // unlikely
+        throw new RuntimeException(ex);
+      }
     }
-    ia.useTsbw = cl.hasOption("tsbw");
-    
-    username = cl.getOptionValue("username", "root");
-    passwd = cl.getOptionValue("password", "secret");
+  }
+  
+  public static TreeSet<Text> getSplitPoints(long start, long end, long numsplits) {
+    long splitSize = (end - start) / numsplits;
     
-    String[] requiredArgs = cl.getArgs();
+    long pos = start + splitSize;
     
-    ia.rows = Integer.parseInt(requiredArgs[0]);
-    ia.startRow = Integer.parseInt(requiredArgs[1]);
-    ia.cols = Integer.parseInt(requiredArgs[2]);
+    TreeSet<Text> splits = new TreeSet<Text>();
     
-    if (cl.hasOption("trace")) {
-      ia.trace = true;
+    while (pos < end) {
+      splits.add(new Text(String.format("row_%010d", pos)));
+      pos += splitSize;
     }
-    return ia;
+    return splits;
   }
   
-  public static byte[][] generateValues(IngestArgs ingestArgs) {
+  public static byte[][] generateValues(Opts ingestArgs) {
     
     byte[][] bytevals = new byte[10][];
     
@@ -222,7 +146,6 @@ public class TestIngest {
       for (int j = 0; j < ingestArgs.dataSize; j++)
         bytevals[i][j] = letters[i];
     }
-    
     return bytevals;
   }
   
@@ -248,19 +171,22 @@ public class TestIngest {
     }
   }
   
-  public static void main(String[] args) {
-    // log.error("usage : test_ingest [-delete] [-size <value size>] [-random <seed>] [-timestamp <ts>] [-stride <size>] <rows> <start row> <# cols> ");
+  public static void main(String[] args) throws Exception {
     
-    IngestArgs ingestArgs = parseArgs(args);
-    Instance instance = HdfsZooInstance.getInstance();
+    Opts opts = new Opts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(TestIngest.class.getName(), args, bwOpts);
+    opts.getInstance().setConfiguration(ServerConfiguration.getSiteConfiguration());
+
+    createTable(opts);
+    
+    Instance instance = opts.getInstance();
+    
+    String name = TestIngest.class.getSimpleName();
+    DistributedTrace.enable(instance, new ZooReader(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut()), name, null);
     
     try {
-      if (ingestArgs.trace) {
-        String name = TestIngest.class.getSimpleName();
-        DistributedTrace.enable(instance, new ZooReader(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut()), name, null);
-        Trace.on(name);
-        Trace.currentTrace().data("cmdLine", Arrays.asList(args).toString());
-      }
+      opts.startTracing(name);
       
       Logger.getLogger(TabletServerBatchWriter.class.getName()).setLevel(Level.TRACE);
       
@@ -268,60 +194,53 @@ public class TestIngest {
       
       long stopTime;
       
-      byte[][] bytevals = generateValues(ingestArgs);
+      byte[][] bytevals = generateValues(opts);
       
-      byte randomValue[] = new byte[ingestArgs.dataSize];
+      byte randomValue[] = new byte[opts.dataSize];
       Random random = new Random();
       
       long bytesWritten = 0;
       
       BatchWriter bw = null;
       FileSKVWriter writer = null;
+      Connector connector = opts.getConnector();
       
-      rootCredentials = new AuthInfo(username, ByteBuffer.wrap(passwd.getBytes()), instance.getInstanceID());
-      if (ingestArgs.outputToRFile) {
+      if (opts.outputFile != null) {
         Configuration conf = CachedConfiguration.getInstance();
         FileSystem fs = FileSystem.get(conf);
-        writer = FileOperations.getInstance().openWriter(ingestArgs.outputFile + "." + RFile.EXTENSION, fs, conf,
+        writer = FileOperations.getInstance().openWriter(opts.outputFile + "." + RFile.EXTENSION, fs, conf,
             AccumuloConfiguration.getDefaultConfiguration());
         writer.startDefaultLocalityGroup();
       } else {
-        Connector connector = instance.getConnector(rootCredentials.user, rootCredentials.password);
-        bw = connector.createBatchWriter("test_ingest", new BatchWriterConfig());
-        connector.securityOperations().changeUserAuthorizations(rootCredentials.user, AUTHS);
+        bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
       }
-      
-      ColumnVisibility le = new ColumnVisibility("L1&L2&G1&GROUP2");
-      Text labBA = new Text(le.getExpression());
-      
-      // int step = 100;
+      connector.securityOperations().changeUserAuthorizations(opts.user, AUTHS);
+      Text labBA = new Text(opts.columnVisibility.getExpression());
       
       long startTime = System.currentTimeMillis();
-      for (int i = 0; i < ingestArgs.rows; i++) {
-        
+      for (int i = 0; i < opts.rows; i++) {
         int rowid;
-        
-        if (ingestArgs.stride > 0) {
-          rowid = ((i % ingestArgs.stride) * (ingestArgs.rows / ingestArgs.stride)) + (i / ingestArgs.stride);
+        if (opts.stride > 0) {
+          rowid = ((i % opts.stride) * (opts.rows / opts.stride)) + (i / opts.stride);
         } else {
           rowid = i;
         }
         
-        Text row = generateRow(rowid, ingestArgs.startRow);
+        Text row = generateRow(rowid, opts.startRow);
         Mutation m = new Mutation(row);
-        for (int j = 0; j < ingestArgs.cols; j++) {
-          Text colf = new Text(ingestArgs.columnFamily);
+        for (int j = 0; j < opts.cols; j++) {
+          Text colf = new Text(opts.columnFamily);
           Text colq = new Text(FastFormat.toZeroPaddedString(j, 5, 10, COL_PREFIX));
           
           if (writer != null) {
             Key key = new Key(row, colf, colq, labBA);
-            if (ingestArgs.hasTimestamp) {
-              key.setTimestamp(ingestArgs.timestamp);
+            if (opts.timestamp >= 0) {
+              key.setTimestamp(opts.timestamp);
             } else {
-              key.setTimestamp(System.currentTimeMillis());
+              key.setTimestamp(startTime);
             }
             
-            if (ingestArgs.delete) {
+            if (opts.delete) {
               key.setDeleted(true);
             } else {
               key.setDeleted(false);
@@ -329,12 +248,12 @@ public class TestIngest {
             
             bytesWritten += key.getSize();
             
-            if (ingestArgs.delete) {
+            if (opts.delete) {
               writer.append(key, new Value(new byte[0]));
             } else {
               byte value[];
-              if (ingestArgs.random) {
-                value = genRandomValue(random, randomValue, ingestArgs.seed, rowid + ingestArgs.startRow, j);
+              if (opts.random != null) {
+                value = genRandomValue(random, randomValue, opts.random.intValue(), rowid + opts.startRow, j);
               } else {
                 value = bytevals[j % bytevals.length];
               }
@@ -348,24 +267,24 @@ public class TestIngest {
             Key key = new Key(row, colf, colq, labBA);
             bytesWritten += key.getSize();
             
-            if (ingestArgs.delete) {
-              if (ingestArgs.hasTimestamp)
-                m.putDelete(colf, colq, le, ingestArgs.timestamp);
+            if (opts.delete) {
+              if (opts.timestamp >= 0)
+                m.putDelete(colf, colq, opts.columnVisibility, opts.timestamp);
               else
-                m.putDelete(colf, colq, le);
+                m.putDelete(colf, colq, opts.columnVisibility);
             } else {
               byte value[];
-              if (ingestArgs.random) {
-                value = genRandomValue(random, randomValue, ingestArgs.seed, rowid + ingestArgs.startRow, j);
+              if (opts.random != null) {
+                value = genRandomValue(random, randomValue, opts.random.intValue(), rowid + opts.startRow, j);
               } else {
                 value = bytevals[j % bytevals.length];
               }
               bytesWritten += value.length;
               
-              if (ingestArgs.hasTimestamp) {
-                m.put(colf, colq, le, ingestArgs.timestamp, new Value(value, true));
+              if (opts.timestamp >= 0) {
+                m.put(colf, colq, opts.columnVisibility, opts.timestamp, new Value(value, true));
               } else {
-                m.put(colf, colq, le, new Value(value, true));
+                m.put(colf, colq, opts.columnVisibility, new Value(value, true));
                 
               }
             }
@@ -384,8 +303,8 @@ public class TestIngest {
           bw.close();
         } catch (MutationsRejectedException e) {
           if (e.getAuthorizationFailures().size() > 0) {
-            for (KeyExtent ke : e.getAuthorizationFailures()) {
-              System.err.println("ERROR : Not authorized to write to : " + ke);
+            for (Entry<KeyExtent,Set<SecurityErrorCode>> entry : e.getAuthorizationFailures().entrySet()) {
+              System.err.println("ERROR : Not authorized to write to : " + entry.getKey() + " due to " + entry.getValue());
             }
           }
           
@@ -401,7 +320,7 @@ public class TestIngest {
       
       stopTime = System.currentTimeMillis();
       
-      int totalValues = ingestArgs.rows * ingestArgs.cols;
+      int totalValues = opts.rows * opts.cols;
       double elapsed = (stopTime - startTime) / 1000.0;
       
       System.out.printf("%,12d records written | %,8d records/sec | %,12d bytes written | %,8d bytes/sec | %6.3f secs   %n", totalValues,

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestMultiTableIngest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestMultiTableIngest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestMultiTableIngest.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestMultiTableIngest.java Mon Jan 14 22:03:24 2013
@@ -19,10 +19,10 @@ package org.apache.accumulo.server.test;
 import java.util.ArrayList;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MultiTableBatchWriter;
 import org.apache.accumulo.core.client.MutationsRejectedException;
@@ -30,105 +30,71 @@ import org.apache.accumulo.core.client.S
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.Parser;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 public class TestMultiTableIngest {
   
   private static ArrayList<String> tableNames = new ArrayList<String>();
   
-  private static Option usernameOpt = new Option("username", true, "username");
-  private static Option passwordOpt = new Option("password", true, "password");
-  private static Option readonlyOpt = new Option("readonly", false, "read only");
-  private static Option tablesOpt = new Option("tables", true, "number of tables to create");
-  private static Option countOpt = new Option("count", true, "number of entries to create");
-  
-  private static Options opts = new Options();
-  
-  static {
-    opts.addOption(usernameOpt);
-    opts.addOption(passwordOpt);
-    opts.addOption(readonlyOpt);
-    opts.addOption(tablesOpt);
-    opts.addOption(countOpt);
+  static class Opts extends ClientOpts {
+    @Parameter(names="--readonly", description="read only")
+    boolean readonly = false;
+    @Parameter(names="--tables", description="number of tables to create")
+    int tables = 5;
+    @Parameter(names="--count", description="number of entries to create")
+    int count = 10000;
   }
   
-  // root user is needed for tests
-  private static String user;
-  private static String password;
-  private static boolean readOnly = false;
-  private static int count = 10000;
-  private static int tables = 5;
-  
-  private static void readBack(Connector conn, int last) throws Exception {
+  private static void readBack(Opts opts, ScannerOpts scanOpts, Connector conn) throws Exception {
     int i = 0;
     for (String table : tableNames) {
-      Scanner scanner = conn.createScanner(table, Constants.NO_AUTHS);
+      Scanner scanner = conn.createScanner(table, opts.auths);
+      scanner.setBatchSize(scanOpts.scanBatchSize);
       int count = i;
       for (Entry<Key,Value> elt : scanner) {
         String expected = String.format("%05d", count);
-        assert (elt.getKey().getRow().toString().equals(expected));
+        if (!elt.getKey().getRow().toString().equals(expected))
+          throw new RuntimeException("entry " + elt + " does not match expected " + expected + " in table " + table);
         count += tableNames.size();
       }
       i++;
     }
-    assert (last == count);
   }
   
   public static void main(String[] args) throws Exception {
-    
-    Parser p = new BasicParser();
-    CommandLine cl = null;
-    
-    try {
-      cl = p.parse(opts, args);
-    } catch (ParseException e) {
-      throw new RuntimeException(e);
-    }
-    String[] rargs = cl.getArgs();
-    if (rargs.length != 0) {
-      HelpFormatter hf = new HelpFormatter();
-      hf.printHelp("", opts);
-    }
-    count = Integer.parseInt(cl.getOptionValue(countOpt.getOpt(), "10000"));
-    tables = Integer.parseInt(cl.getOptionValue(tablesOpt.getOpt(), "5"));
-    readOnly = cl.hasOption(readonlyOpt.getOpt());
-    user = cl.getOptionValue(usernameOpt.getOpt(), "root");
-    password = cl.getOptionValue(passwordOpt.getOpt(), "secret");
-    
+    Opts opts = new Opts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(TestMultiTableIngest.class.getName(), args, scanOpts, bwOpts);
     // create the test table within accumulo
     Connector connector;
     try {
-      connector = HdfsZooInstance.getInstance().getConnector(user, password.getBytes());
+      connector = opts.getConnector();
     } catch (AccumuloException e) {
       throw new RuntimeException(e);
     } catch (AccumuloSecurityException e) {
       throw new RuntimeException(e);
     }
-    for (int i = 0; i < tables; i++) {
+    for (int i = 0; i < opts.tables; i++) {
       tableNames.add(String.format("test_%04d", i));
     }
     
-    if (!readOnly) {
+    if (!opts.readonly) {
       for (String table : tableNames)
         connector.tableOperations().create(table);
       
       MultiTableBatchWriter b;
       try {
-        b = connector.createMultiTableBatchWriter(new BatchWriterConfig());
+        b = connector.createMultiTableBatchWriter(bwOpts.getBatchWriterConfig());
       } catch (Exception e) {
         throw new RuntimeException(e);
       }
       
       // populate
-      for (int i = 0; i < count; i++) {
+      for (int i = 0; i < opts.count; i++) {
         Mutation m = new Mutation(new Text(String.format("%05d", i)));
         m.put(new Text("col" + Integer.toString((i % 3) + 1)), new Text("qual"), new Value("junk".getBytes()));
         b.getBatchWriter(tableNames.get(i % tableNames.size())).addMutation(m);
@@ -140,7 +106,7 @@ public class TestMultiTableIngest {
       }
     }
     try {
-      readBack(connector, count);
+      readBack(opts, scanOpts, connector);
     } catch (Exception e) {
       throw new RuntimeException(e);
     }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestRandomDeletes.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestRandomDeletes.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestRandomDeletes.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/TestRandomDeletes.java Mon Jan 14 22:03:24 2013
@@ -16,14 +16,15 @@
  */
 package org.apache.accumulo.server.test;
 
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
 
+import org.apache.accumulo.server.cli.ClientOpts;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Column;
@@ -32,15 +33,7 @@ import org.apache.accumulo.core.data.Mut
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.security.thrift.AuthInfo;
 import org.apache.accumulo.core.util.TextUtil;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.Parser;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
@@ -48,8 +41,6 @@ public class TestRandomDeletes {
   private static final Logger log = Logger.getLogger(TestRandomDeletes.class);
   private static Authorizations auths = new Authorizations("L1", "L2", "G1", "GROUP2");
   
-  private static AuthInfo credentials;
-  
   static private class RowColumn implements Comparable<RowColumn> {
     Text row;
     Column column;
@@ -73,11 +64,11 @@ public class TestRandomDeletes {
     }
   }
   
-  private static TreeSet<RowColumn> scanAll(Text t) throws Exception {
+  private static TreeSet<RowColumn> scanAll(ClientOpts opts, ScannerOpts scanOpts, Text t) throws Exception {
     TreeSet<RowColumn> result = new TreeSet<RowColumn>();
-    Connector conn = HdfsZooInstance.getInstance().getConnector(credentials.user, credentials.password);
+    Connector conn = opts.getConnector();
     Scanner scanner = conn.createScanner(t.toString(), auths);
-    scanner.setBatchSize(20000);
+    scanner.setBatchSize(scanOpts.scanBatchSize);
     for (Entry<Key,Value> entry : scanner) {
       Key key = entry.getKey();
       Column column = new Column(TextUtil.getBytes(key.getColumnFamily()), TextUtil.getBytes(key.getColumnQualifier()), TextUtil.getBytes(key
@@ -87,19 +78,18 @@ public class TestRandomDeletes {
     return result;
   }
   
-  private static long scrambleDeleteHalfAndCheck(Text t, Set<RowColumn> rows) throws Exception {
+  private static long scrambleDeleteHalfAndCheck(ClientOpts opts, ScannerOpts scanOpts, BatchWriterOpts bwOpts, Text t, Set<RowColumn> rows) throws Exception {
     int result = 0;
     ArrayList<RowColumn> entries = new ArrayList<RowColumn>(rows);
     java.util.Collections.shuffle(entries);
     
-    Connector connector = HdfsZooInstance.getInstance().getConnector(credentials.user, credentials.password);
-    BatchWriter mutations = connector.createBatchWriter(t.toString(), new BatchWriterConfig());
-    ColumnVisibility cv = new ColumnVisibility("L1&L2&G1&GROUP2");
+    Connector connector = opts.getConnector();
+    BatchWriter mutations = connector.createBatchWriter(t.toString(), bwOpts.getBatchWriterConfig());
     
     for (int i = 0; i < (entries.size() + 1) / 2; i++) {
       RowColumn rc = entries.get(i);
       Mutation m = new Mutation(rc.row);
-      m.putDelete(new Text(rc.column.columnFamily), new Text(rc.column.columnQualifier), cv, rc.timestamp + 1);
+      m.putDelete(new Text(rc.column.columnFamily), new Text(rc.column.columnQualifier), new ColumnVisibility(rc.column.getColumnVisibility()), rc.timestamp + 1);
       mutations.addMutation(m);
       rows.remove(rc);
       result++;
@@ -107,7 +97,7 @@ public class TestRandomDeletes {
     
     mutations.close();
     
-    Set<RowColumn> current = scanAll(t);
+    Set<RowColumn> current = scanAll(opts, scanOpts, t);
     current.removeAll(rows);
     if (current.size() > 0) {
       throw new RuntimeException(current.size() + " records not deleted");
@@ -116,36 +106,23 @@ public class TestRandomDeletes {
   }
   
   static public void main(String[] args) {
-    Option usernameOpt = new Option("username", "username", true, "username");
-    Option passwordOpt = new Option("password", "password", true, "password");
-    
-    Options opts = new Options();
-    
-    opts.addOption(usernameOpt);
-    opts.addOption(passwordOpt);
     
-    Parser p = new BasicParser();
-    CommandLine cl = null;
-    try {
-      cl = p.parse(opts, args);
-    } catch (ParseException e1) {
-      System.out.println("Parse Exception, exiting.");
-      return;
-    }
-    credentials = new AuthInfo(cl.getOptionValue("username", "root"), ByteBuffer.wrap(cl.getOptionValue("password", "secret").getBytes()), HdfsZooInstance
-        .getInstance().getInstanceID());
+    ClientOpts opts = new ClientOpts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(TestRandomDeletes.class.getName(), args, scanOpts, bwOpts);
     
     try {
       long deleted = 0;
       
       Text t = new Text("test_ingest");
       
-      TreeSet<RowColumn> doomed = scanAll(t);
+      TreeSet<RowColumn> doomed = scanAll(opts, scanOpts, t);
       log.info("Got " + doomed.size() + " rows");
       
       long startTime = System.currentTimeMillis();
       while (true) {
-        long half = scrambleDeleteHalfAndCheck(t, doomed);
+        long half = scrambleDeleteHalfAndCheck(opts, scanOpts, bwOpts, t, doomed);
         deleted += half;
         if (half == 0)
           break;

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/VerifyIngest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/VerifyIngest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/VerifyIngest.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/VerifyIngest.java Mon Jan 14 22:03:24 2013
@@ -22,6 +22,7 @@ import java.util.Map.Entry;
 import java.util.Random;
 
 import org.apache.accumulo.cloudtrace.instrument.Trace;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
@@ -34,15 +35,13 @@ import org.apache.accumulo.core.security
 import org.apache.accumulo.core.trace.DistributedTrace;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.test.TestIngest.IngestArgs;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
+import com.beust.jcommander.Parameter;
+
 
 public class VerifyIngest {
-  private static String username = "root";
-  private static byte[] passwd = "secret".getBytes();
   
   private static final Logger log = Logger.getLogger(VerifyIngest.class);
   
@@ -54,11 +53,18 @@ public class VerifyIngest {
     return Integer.parseInt(k.getColumnQualifier().toString().split("_")[1]);
   }
   
+  public static class Opts extends TestIngest.Opts {
+    @Parameter(names="-useGet", description="fetches values one at a time, instead of scanning")
+    boolean useGet = false;
+  }
+  
   public static void main(String[] args) {
-    IngestArgs ingestArgs = TestIngest.parseArgs(args);
-    Instance instance = HdfsZooInstance.getInstance();
+    Opts opts = new Opts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    opts.parseArgs(VerifyIngest.class.getName(), args, scanOpts);
+    Instance instance = opts.getInstance();
     try {
-      if (ingestArgs.trace) {
+      if (opts.trace) {
         String name = VerifyIngest.class.getSimpleName();
         DistributedTrace.enable(instance, new ZooReader(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut()), name, null);
         Trace.on(name);
@@ -68,38 +74,36 @@ public class VerifyIngest {
       Connector connector = null;
       while (connector == null) {
         try {
-          connector = instance.getConnector(username, passwd);
+          connector = opts.getConnector();
         } catch (AccumuloException e) {
           log.warn("Could not connect to accumulo; will retry: " + e);
           UtilWaitThread.sleep(1000);
         }
       }
       
-      byte[][] bytevals = TestIngest.generateValues(ingestArgs);
+      byte[][] bytevals = TestIngest.generateValues(opts);
       
       Authorizations labelAuths = new Authorizations("L1", "L2", "G1", "GROUP2");
       
-      int expectedRow = ingestArgs.startRow;
+      int expectedRow = opts.startRow;
       int expectedCol = 0;
       int recsRead = 0;
       
       long bytesRead = 0;
       long t1 = System.currentTimeMillis();
       
-      byte randomValue[] = new byte[ingestArgs.dataSize];
+      byte randomValue[] = new byte[opts.dataSize];
       Random random = new Random();
       
-      Key endKey = new Key(new Text("row_" + String.format("%010d", ingestArgs.rows + ingestArgs.startRow)));
+      Key endKey = new Key(new Text("row_" + String.format("%010d", opts.rows + opts.startRow)));
       
       int errors = 0;
       
-      connector.securityOperations().changeUserAuthorizations(username, labelAuths);
-
-      while (expectedRow < (ingestArgs.rows + ingestArgs.startRow)) {
+      while (expectedRow < (opts.rows + opts.startRow)) {
         
-        if (ingestArgs.useGet) {
-          Text rowKey = new Text("row_" + String.format("%010d", expectedRow + ingestArgs.startRow));
-          Text colf = new Text(ingestArgs.columnFamily);
+        if (opts.useGet) {
+          Text rowKey = new Text("row_" + String.format("%010d", expectedRow + opts.startRow));
+          Text colf = new Text(opts.columnFamily);
           Text colq = new Text("col_" + String.format("%05d", expectedCol));
           
           Scanner scanner = connector.createScanner("test_ingest", labelAuths);
@@ -117,8 +121,8 @@ public class VerifyIngest {
           }
           
           byte ev[];
-          if (ingestArgs.random) {
-            ev = TestIngest.genRandomValue(random, randomValue, ingestArgs.seed, expectedRow, expectedCol);
+          if (opts.random != null) {
+            ev = TestIngest.genRandomValue(random, randomValue, opts.random.intValue(), expectedRow, expectedCol);
           } else {
             ev = bytevals[expectedCol % bytevals.length];
           }
@@ -137,22 +141,20 @@ public class VerifyIngest {
           }
           
           expectedCol++;
-          if (expectedCol >= ingestArgs.cols) {
+          if (expectedCol >= opts.cols) {
             expectedCol = 0;
             expectedRow++;
           }
           
         } else {
           
-          int batchSize = 10000;
-          
           Key startKey = new Key(new Text("row_" + String.format("%010d", expectedRow)));
           
           Scanner scanner = connector.createScanner("test_ingest", labelAuths);
-          scanner.setBatchSize(batchSize);
+          scanner.setBatchSize(scanOpts.scanBatchSize);
           scanner.setRange(new Range(startKey, endKey));
-          for (int j = 0; j < ingestArgs.cols; j++) {
-            scanner.fetchColumn(new Text(ingestArgs.columnFamily), new Text("col_" + String.format("%05d", j)));
+          for (int j = 0; j < opts.cols; j++) {
+            scanner.fetchColumn(new Text(opts.columnFamily), new Text("col_" + String.format("%05d", j)));
           }
           
           int recsReadBefore = recsRead;
@@ -178,16 +180,16 @@ public class VerifyIngest {
               errors++;
             }
             
-            if (expectedRow >= (ingestArgs.rows + ingestArgs.startRow)) {
-              log.error("expectedRow (" + expectedRow + ") >= (ingestArgs.rows + ingestArgs.startRow)  (" + (ingestArgs.rows + ingestArgs.startRow)
+            if (expectedRow >= (opts.rows + opts.startRow)) {
+              log.error("expectedRow (" + expectedRow + ") >= (ingestArgs.rows + ingestArgs.startRow)  (" + (opts.rows + opts.startRow)
                   + "), get batch returned data passed end key");
               errors++;
               break;
             }
             
             byte value[];
-            if (ingestArgs.random) {
-              value = TestIngest.genRandomValue(random, randomValue, ingestArgs.seed, expectedRow, colNum);
+            if (opts.random != null) {
+              value = TestIngest.genRandomValue(random, randomValue, opts.random.intValue(), expectedRow, colNum);
             } else {
               value = bytevals[colNum % bytevals.length];
             }
@@ -198,13 +200,13 @@ public class VerifyIngest {
               errors++;
             }
             
-            if (ingestArgs.hasTimestamp && entry.getKey().getTimestamp() != ingestArgs.timestamp) {
+            if (opts.timestamp >= 0 && entry.getKey().getTimestamp() != opts.timestamp) {
               log.error("unexpected timestamp " + entry.getKey().getTimestamp() + ", rowNum : " + rowNum + " colNum : " + colNum);
               errors++;
             }
             
             expectedCol++;
-            if (expectedCol >= ingestArgs.cols) {
+            if (expectedCol >= opts.cols) {
               expectedCol = 0;
               expectedRow++;
             }
@@ -226,8 +228,8 @@ public class VerifyIngest {
         System.exit(1);
       }
       
-      if (expectedRow != (ingestArgs.rows + ingestArgs.startRow)) {
-        log.error("Did not read expected number of rows. Saw " + (expectedRow - ingestArgs.startRow) + " expected " + ingestArgs.rows);
+      if (expectedRow != (opts.rows + opts.startRow)) {
+        log.error("Did not read expected number of rows. Saw " + (expectedRow - opts.startRow) + " expected " + opts.rows);
         System.exit(1);
       } else {
         System.out.printf("%,12d records read | %,8d records/sec | %,12d bytes read | %,8d bytes/sec | %6.3f secs   %n", recsRead,

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/WrongTabletTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/WrongTabletTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/WrongTabletTest.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/WrongTabletTest.java Mon Jan 14 22:03:24 2013
@@ -16,31 +16,35 @@
  */
 package org.apache.accumulo.server.test;
 
-import java.nio.ByteBuffer;
-
 import org.apache.accumulo.cloudtrace.instrument.Tracer;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.security.thrift.AuthInfo;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 import org.apache.accumulo.core.util.ThriftUtil;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 public class WrongTabletTest {
-  private static AuthInfo rootCredentials = new AuthInfo("root", ByteBuffer.wrap("secret".getBytes()), HdfsZooInstance.getInstance().getInstanceID());
+  
+  static class Opts extends ClientOpts {
+    @Parameter(names="--location", required=true)
+    String location;
+  }
   
   public static void main(String[] args) {
-    String location = args[0];
-    ServerConfiguration conf = new ServerConfiguration(HdfsZooInstance.getInstance());
+    Opts opts = new Opts();
+    opts.parseArgs(WrongTabletTest.class.getName(), args);
+    
+    ServerConfiguration conf = new ServerConfiguration(opts.getInstance());
     try {
-      TabletClientService.Iface client = ThriftUtil.getTServerClient(location, conf.getConfiguration());
+      TabletClientService.Iface client = ThriftUtil.getTServerClient(opts.location, conf.getConfiguration());
       
       Mutation mutation = new Mutation(new Text("row_0003750001"));
-      // mutation.set(new Text("colf:colq"), new Value("val".getBytes()));
       mutation.putDelete(new Text("colf"), new Text("colq"));
-      client.update(Tracer.traceInfo(), rootCredentials, new KeyExtent(new Text("test_ingest"), null, new Text("row_0003750000")).toThrift(), mutation.toThrift());
+      client.update(Tracer.traceInfo(), opts.getAuthInfo(), new KeyExtent(new Text("!!"), null, new Text("row_0003750000")).toThrift(), mutation.toThrift());
     } catch (Exception e) {
       throw new RuntimeException(e);
     }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousBatchWalker.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousBatchWalker.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousBatchWalker.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousBatchWalker.java Mon Jan 14 22:03:24 2013
@@ -23,96 +23,58 @@ import java.util.List;
 import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.BatchScannerOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.server.test.continuous.ContinuousWalk.RandomAuths;
 import org.apache.hadoop.io.Text;
-import org.apache.log4j.FileAppender;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
+
+import com.beust.jcommander.Parameter;
+import com.beust.jcommander.validators.PositiveInteger;
 
 public class ContinuousBatchWalker {
-  private static String debugLog = null;
-  private static String authsFile = null;
-  
-  private static String[] processOptions(String[] args) {
-    ArrayList<String> al = new ArrayList<String>();
-    
-    for (int i = 0; i < args.length; i++) {
-      if (args[i].equals("--debug")) {
-        debugLog = args[++i];
-      } else if (args[i].equals("--auths")) {
-        authsFile = args[++i];
-      } else {
-        al.add(args[i]);
-      }
-    }
-    
-    return al.toArray(new String[al.size()]);
+
+  static class Opts extends ContinuousWalk.Opts {
+    @Parameter(names="--numToScan", description="Number rows to scan between sleeps", required=true, validateWith=PositiveInteger.class)
+    long numToScan = 0;
   }
-  
+
   public static void main(String[] args) throws Exception {
     
-    args = processOptions(args);
-    
-    if (args.length != 10) {
-      throw new IllegalArgumentException("usage : " + ContinuousBatchWalker.class.getName()
-          + " [--debug <debug log>] [--auths <file>] <instance name> <zookeepers> <user> <pass> <table> <min> <max> <sleep time> <batch size> <query threads>");
-    }
-    
-    if (debugLog != null) {
-      Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
-      logger.setLevel(Level.TRACE);
-      logger.setAdditivity(false);
-      logger.addAppender(new FileAppender(new PatternLayout("%d{dd HH:mm:ss,SSS} [%-8c{2}] %-5p: %m%n"), debugLog, true));
-    }
-    
-    String instanceName = args[0];
-    String zooKeepers = args[1];
-    
-    String user = args[2];
-    String password = args[3];
-    
-    String table = args[4];
-    
-    long min = Long.parseLong(args[5]);
-    long max = Long.parseLong(args[6]);
-    
-    long sleepTime = Long.parseLong(args[7]);
-    
-    int batchSize = Integer.parseInt(args[8]);
-    int numQueryThreads = Integer.parseInt(args[9]);
+    Opts opts = new Opts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    BatchScannerOpts bsOpts = new BatchScannerOpts();
+    opts.parseArgs(ContinuousBatchWalker.class.getName(), args, scanOpts, bsOpts);
     
     Random r = new Random();
-    RandomAuths randomAuths = new RandomAuths(authsFile);
-    Authorizations auths = randomAuths.getAuths(r);
+    Authorizations auths = opts.randomAuths.getAuths(r);
 
-    Connector conn = new ZooKeeperInstance(instanceName, zooKeepers).getConnector(user, password.getBytes());
-    Scanner scanner = conn.createScanner(table, auths);
-    BatchScanner bs = conn.createBatchScanner(table, auths, numQueryThreads);
+    Connector conn = opts.getConnector();
+    Scanner scanner = conn.createScanner(opts.getTableName(), auths);
+    scanner.setBatchSize(scanOpts.scanBatchSize);
+    
+    BatchScanner bs = conn.createBatchScanner(opts.getTableName(), auths, bsOpts.scanThreads);
+    bs.setTimeout(bsOpts.scanTimeout, TimeUnit.MILLISECONDS);
 
     while (true) {
-      Set<Text> batch = getBatch(scanner, min, max, batchSize, r);
-      
+      Set<Text> batch = getBatch(scanner, opts.min, opts.max, scanOpts.scanBatchSize, r);
       List<Range> ranges = new ArrayList<Range>(batch.size());
       
       for (Text row : batch) {
         ranges.add(new Range(row));
       }
       
-      runBatchScan(batchSize, bs, batch, ranges);
+      runBatchScan(scanOpts.scanBatchSize, bs, batch, ranges);
       
-      UtilWaitThread.sleep(sleepTime);
+      UtilWaitThread.sleep(opts.sleepTime);
     }
     
   }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousIngest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousIngest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousIngest.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousIngest.java Mon Jan 14 22:03:24 2013
@@ -17,33 +17,28 @@
 package org.apache.accumulo.server.test.continuous;
 
 import java.io.BufferedReader;
+import java.io.IOException;
 import java.io.InputStreamReader;
-import java.net.InetAddress;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Random;
 import java.util.UUID;
-import java.util.concurrent.TimeUnit;
 import java.util.zip.CRC32;
 import java.util.zip.Checksum;
 
 import org.apache.accumulo.cloudtrace.instrument.CountSampler;
 import org.apache.accumulo.cloudtrace.instrument.Trace;
-import org.apache.accumulo.cloudtrace.instrument.Tracer;
-import org.apache.accumulo.cloudtrace.instrument.receivers.ZooSpanClient;
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOnDefaultTable;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.server.test.FastFormat;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -54,17 +49,70 @@ import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.log4j.PatternLayout;
 
+import com.beust.jcommander.IStringConverter;
+import com.beust.jcommander.Parameter;
+
 
 public class ContinuousIngest {
   
-  private static String visFile = null;
-  private static String debugLog = null;
+  static public class BaseOpts extends ClientOnDefaultTable {
+    public class DebugConverter implements IStringConverter<String> {
+      @Override
+      public String convert(String debugLog) {
+        Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
+        logger.setLevel(Level.TRACE);
+        logger.setAdditivity(false);
+        try {
+          logger.addAppender(new FileAppender(new PatternLayout("%d{dd HH:mm:ss,SSS} [%-8c{2}] %-5p: %m%n"), debugLog, true));
+        } catch (IOException ex) {
+          throw new RuntimeException(ex);
+        }
+        return debugLog;
+      }
+    }
+    
+    @Parameter(names="--min", description="lowest random row number to use")
+    long min = 0;
+    
+    @Parameter(names="--max", description="maximum random row number to use")
+    long max = Long.MAX_VALUE;
+    
+    @Parameter(names="--debugLog", description="file to write debugging output", converter=DebugConverter.class)
+    String debugLog = null;
+
+    BaseOpts() { super("ci"); }
+  }
+  
+  public static class ShortConverter implements IStringConverter<Short> {
+    @Override
+    public Short convert(String value) {
+      return Short.valueOf(value);
+    }
+  }
+  
+  static public class Opts extends BaseOpts {
+    @Parameter(names="--num", description="the number of entries to ingest")
+    long num = Long.MAX_VALUE;
+    
+    @Parameter(names="--maxColF", description="maximum column family value to use", converter=ShortConverter.class)
+    short maxColF = Short.MAX_VALUE;
+    
+    @Parameter(names="--maxColQ", description="maximum column qualifier value to use", converter=ShortConverter.class)
+    short maxColQ = Short.MAX_VALUE;
+ 
+    @Parameter(names="--addCheckSum", description="turn on checksums")
+    boolean checksum = false;
+    
+    @Parameter(names="--visibilities", description="read the visibilities to ingest with from a file")
+    String visFile = null;
+  }
+  
   private static final byte[] EMPTY_BYTES = new byte[0];
   
   private static List<ColumnVisibility> visibilities;
   
-  private static void initVisibilities() throws Exception {
-    if (visFile == null) {
+  private static void initVisibilities(Opts opts) throws Exception {
+    if (opts.visFile == null) {
       visibilities = Collections.singletonList(new ColumnVisibility());
       return;
     }
@@ -72,7 +120,7 @@ public class ContinuousIngest {
     visibilities = new ArrayList<ColumnVisibility>();
     
     FileSystem fs = FileSystem.get(new Configuration());
-    BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(visFile))));
+    BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(opts.visFile))));
     
     String line;
     
@@ -87,78 +135,25 @@ public class ContinuousIngest {
     return visibilities.get(rand.nextInt(visibilities.size()));
   }
 
-  private static String[] processOptions(String[] args) {
-    ArrayList<String> al = new ArrayList<String>();
-    
-    for (int i = 0; i < args.length; i++) {
-      if (args[i].equals("--debug")) {
-        debugLog = args[++i];
-      } else if (args[i].equals("--visibilities")) {
-        visFile = args[++i];
-      } else {
-        al.add(args[i]);
-      }
-    }
-    
-    return al.toArray(new String[al.size()]);
-  }
-  
   public static void main(String[] args) throws Exception {
     
-    args = processOptions(args);
-    
-    if (args.length != 14) {
-      throw new IllegalArgumentException(
-          "usage : "
-              + ContinuousIngest.class.getName()
-              + " [--debug <debug log>] [--visibilities <file>] <instance name> <zookeepers> <user> <pass> <table> <num> <min> <max> <max colf> <max colq> <max mem> <max latency> <max threads> <enable checksum>");
-    }
-    
-    if (debugLog != null) {
-      Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
-      logger.setLevel(Level.TRACE);
-      logger.setAdditivity(false);
-      logger.addAppender(new FileAppender(new PatternLayout("%d{dd HH:mm:ss,SSS} [%-8c{2}] %-5p: %m%n"), debugLog, true));
-    }
+    Opts opts = new Opts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(ContinuousIngest.class.getName(), args, bwOpts);
     
-    initVisibilities();
+    initVisibilities(opts);
 
-    String instanceName = args[0];
-    String zooKeepers = args[1];
-    
-    String user = args[2];
-    String password = args[3];
-    
-    String table = args[4];
-    
-    long num = Long.parseLong(args[5]);
-    long min = Long.parseLong(args[6]);
-    long max = Long.parseLong(args[7]);
-    short maxColF = Short.parseShort(args[8]);
-    short maxColQ = Short.parseShort(args[9]);
-    
-    long maxMemory = Long.parseLong(args[10]);
-    long maxLatency = Integer.parseInt(args[11]);
-    int maxWriteThreads = Integer.parseInt(args[12]);
-    
-    boolean checksum = Boolean.parseBoolean(args[13]);
-    
-    if (min < 0 || max < 0 || max <= min) {
+    if (opts.min < 0 || opts.max < 0 || opts.max <= opts.min) {
       throw new IllegalArgumentException("bad min and max");
     }
-    Instance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    Connector conn = instance.getConnector(user, password);
-    String localhost = InetAddress.getLocalHost().getHostName();
-    String path = ZooUtil.getRoot(instance) + Constants.ZTRACERS;
-    Tracer.getInstance().addReceiver(new ZooSpanClient(zooKeepers, path, localhost, "cingest", 1000));
+    Connector conn = opts.getConnector();
     
-    if (!conn.tableOperations().exists(table))
+    if (!conn.tableOperations().exists(opts.getTableName()))
       try {
-        conn.tableOperations().create(table);
+        conn.tableOperations().create(opts.getTableName());
       } catch (TableExistsException tee) {}
 
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig().setMaxMemory(maxMemory).setMaxLatency(maxLatency, TimeUnit.MILLISECONDS)
-        .setMaxWriteThreads(maxWriteThreads));
+    BatchWriter bw = conn.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
     bw = Trace.wrapAll(bw, new CountSampler(1024));
     
     Random r = new Random();
@@ -187,38 +182,38 @@ public class ContinuousIngest {
       ColumnVisibility cv = getVisibility(r);
 
       for (int index = 0; index < flushInterval; index++) {
-        long rowLong = genLong(min, max, r);
+        long rowLong = genLong(opts.min, opts.max, r);
         prevRows[index] = rowLong;
         firstRows[index] = rowLong;
         
-        int cf = r.nextInt(maxColF);
-        int cq = r.nextInt(maxColQ);
+        int cf = r.nextInt(opts.maxColF);
+        int cq = r.nextInt(opts.maxColQ);
         
         firstColFams[index] = cf;
         firstColQuals[index] = cq;
         
-        Mutation m = genMutation(rowLong, cf, cq, cv, ingestInstanceId, count, null, r, checksum);
+        Mutation m = genMutation(rowLong, cf, cq, cv, ingestInstanceId, count, null, r, opts.checksum);
         count++;
         bw.addMutation(m);
       }
       
       lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
-      if (count >= num)
+      if (count >= opts.num)
         break out;
       
       // generate subsequent sets of nodes that link to previous set of nodes
       for (int depth = 1; depth < maxDepth; depth++) {
         for (int index = 0; index < flushInterval; index++) {
-          long rowLong = genLong(min, max, r);
+          long rowLong = genLong(opts.min, opts.max, r);
           byte[] prevRow = genRow(prevRows[index]);
           prevRows[index] = rowLong;
-          Mutation m = genMutation(rowLong, r.nextInt(maxColF), r.nextInt(maxColQ), cv, ingestInstanceId, count, prevRow, r, checksum);
+          Mutation m = genMutation(rowLong, r.nextInt(opts.maxColF), r.nextInt(opts.maxColQ), cv, ingestInstanceId, count, prevRow, r, opts.checksum);
           count++;
           bw.addMutation(m);
         }
         
         lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
-        if (count >= num)
+        if (count >= opts.num)
           break out;
       }
       
@@ -226,16 +221,17 @@ public class ContinuousIngest {
       // point to something
       for (int index = 0; index < flushInterval - 1; index++) {
         Mutation m = genMutation(firstRows[index], firstColFams[index], firstColQuals[index], cv, ingestInstanceId, count, genRow(prevRows[index + 1]), r,
-            checksum);
+            opts.checksum);
         count++;
         bw.addMutation(m);
       }
       lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
-      if (count >= num)
+      if (count >= opts.num)
         break out;
     }
     
     bw.close();
+    opts.stopTracing();
   }
 
   private static long flush(BatchWriter bw, long count, final int flushInterval, long lastFlushTime) throws MutationsRejectedException {

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousMoru.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousMoru.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousMoru.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousMoru.java Mon Jan 14 22:03:24 2013
@@ -18,18 +18,19 @@ package org.apache.accumulo.server.test.
 
 import java.io.IOException;
 import java.util.Random;
+import java.util.Set;
 import java.util.UUID;
 
-import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.server.test.continuous.ContinuousIngest.BaseOpts;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.Text;
@@ -39,8 +40,11 @@ import org.apache.hadoop.mapreduce.Mappe
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+import com.beust.jcommander.validators.PositiveInteger;
+
 /**
- * A map only job that reads a table created by continuous ingest and creates doubly linked list. This map reduce job test ability of a map only job to read and
+ * A map only job that reads a table created by continuous ingest and creates doubly linked list. This map reduce job tests the ability of a map only job to read and
  * write to accumulo at the same time. This map reduce job mutates the table in such a way that it should not create any undefined nodes.
  * 
  */
@@ -107,38 +111,33 @@ public class ContinuousMoru extends Conf
     }
   }
   
+  static class Opts extends BaseOpts {
+    @Parameter(names="--maxColF", description="maximum column family value to use")
+    short maxColF = Short.MAX_VALUE;
+    
+    @Parameter(names="--maxColQ", description="maximum column qualifier value to use")
+    short maxColQ = Short.MAX_VALUE;
+ 
+    @Parameter(names="--maxMappers", description="the maximum number of mappers to use", required=true, validateWith=PositiveInteger.class)
+    int maxMaps = 0;
+  }
+  
   @Override
   public int run(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
-    if (args.length != 13) {
-      throw new IllegalArgumentException("Usage : " + ContinuousMoru.class.getName()
-          + " <instance name> <zookeepers> <user> <pass> <table> <min> <max> <max cf> <max cq> <max mem> <max latency> <num threads> <max maps>");
-    }
-    
-    String instance = args[0];
-    String zookeepers = args[1];
-    String user = args[2];
-    String pass = args[3];
-    String table = args[4];
-    String min = args[5];
-    String max = args[6];
-    String max_cf = args[7];
-    String max_cq = args[8];
-    String maxMem = args[9];
-    String maxLatency = args[10];
-    String numThreads = args[11];
-    String maxMaps = args[12];
+    Opts opts = new Opts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(ContinuousMoru.class.getName(), args, bwOpts);
     
     Job job = new Job(getConf(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
     job.setJarByClass(this.getClass());
     
     job.setInputFormatClass(AccumuloInputFormat.class);
-    AccumuloInputFormat.setInputInfo(job.getConfiguration(), user, pass.getBytes(), table, new Authorizations());
-    AccumuloInputFormat.setZooKeeperInstance(job.getConfiguration(), instance, zookeepers);
+    opts.setAccumuloConfigs(job);
     
     // set up ranges
     try {
-      AccumuloInputFormat.setRanges(job.getConfiguration(), new ZooKeeperInstance(instance, zookeepers).getConnector(user, pass.getBytes()).tableOperations()
-          .splitRangeByTablets(table, new Range(), Integer.parseInt(maxMaps)));
+      Set<Range> ranges = opts.getConnector().tableOperations().splitRangeByTablets(opts.getTableName(), new Range(), opts.maxMaps);
+      AccumuloInputFormat.setRanges(job.getConfiguration(), ranges);
       AccumuloInputFormat.disableAutoAdjustRanges(job.getConfiguration());
     } catch (Exception e) {
       throw new IOException(e);
@@ -149,20 +148,19 @@ public class ContinuousMoru extends Conf
     job.setNumReduceTasks(0);
     
     job.setOutputFormatClass(AccumuloOutputFormat.class);
-    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), user, pass.getBytes(), false, table);
-    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), instance, zookeepers);
-    AccumuloOutputFormat.setMaxLatency(job.getConfiguration(), (int) (Integer.parseInt(maxLatency) / 1000.0));
-    AccumuloOutputFormat.setMaxMutationBufferSize(job.getConfiguration(), Long.parseLong(maxMem));
-    AccumuloOutputFormat.setMaxWriteThreads(job.getConfiguration(), Integer.parseInt(numThreads));
+    AccumuloOutputFormat.setMaxLatency(job.getConfiguration(), (int) (bwOpts.batchLatency / 1000.0));
+    AccumuloOutputFormat.setMaxMutationBufferSize(job.getConfiguration(), bwOpts.batchMemory);
+    AccumuloOutputFormat.setMaxWriteThreads(job.getConfiguration(), bwOpts.batchThreads);
     
     Configuration conf = job.getConfiguration();
-    conf.setLong(MIN, Long.parseLong(min));
-    conf.setLong(MAX, Long.parseLong(max));
-    conf.setInt(MAX_CF, Integer.parseInt(max_cf));
-    conf.setInt(MAX_CQ, Integer.parseInt(max_cq));
+    conf.setLong(MIN, opts.min);
+    conf.setLong(MAX, opts.max);
+    conf.setInt(MAX_CF, opts.maxColF);
+    conf.setInt(MAX_CQ, opts.maxColQ);
     conf.set(CI_ID, UUID.randomUUID().toString());
     
     job.waitForCompletion(true);
+    opts.stopTracing();
     return job.isSuccessful() ? 0 : 1;
   }
   

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousQuery.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousQuery.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousQuery.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousQuery.java Mon Jan 14 22:03:24 2013
@@ -19,43 +19,37 @@ package org.apache.accumulo.server.test.
 import java.util.Map.Entry;
 import java.util.Random;
 
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.server.test.continuous.ContinuousIngest.BaseOpts;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 public class ContinuousQuery {
   
+  public static class Opts extends BaseOpts {
+    @Parameter(names="--sleep", description="the time to wait between queries", converter=TimeConverter.class)
+    long sleepTime = 100;
+  }
+  
   public static void main(String[] args) throws Exception {
-    if (args.length != 8) {
-      throw new IllegalArgumentException("usage : " + ContinuousIngest.class.getName()
-          + " <instance name> <zookeepers> <user> <pass> <table> <min> <max> <sleep time>");
-    }
-    
-    String instanceName = args[0];
-    String zooKeepers = args[1];
-    
-    String user = args[2];
-    String password = args[3];
-    
-    String table = args[4];
-    
-    long min = Long.parseLong(args[5]);
-    long max = Long.parseLong(args[6]);
-    
-    long sleepTime = Long.parseLong(args[7]);
-    
-    Connector conn = new ZooKeeperInstance(instanceName, zooKeepers).getConnector(user, password.getBytes());
-    Scanner scanner = conn.createScanner(table, new Authorizations());
+    Opts opts = new Opts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    opts.parseArgs(ContinuousQuery.class.getName(), args, scanOpts);
+    
+    Connector conn = opts.getConnector();
+    Scanner scanner = conn.createScanner(opts.getTableName(), opts.auths);
+    scanner.setBatchSize(scanOpts.scanBatchSize);
     
     Random r = new Random();
     
     while (true) {
-      byte[] row = ContinuousIngest.genRow(min, max, r);
+      byte[] row = ContinuousIngest.genRow(opts.min, opts.max, r);
       
       int count = 0;
       
@@ -69,11 +63,8 @@ public class ContinuousQuery {
       
       System.out.printf("SRQ %d %s %d %d%n", t1, new String(row), (t2 - t1), count);
       
-      if (sleepTime > 0)
-        Thread.sleep(sleepTime);
-      
+      if (opts.sleepTime > 0)
+        Thread.sleep(opts.sleepTime);
     }
-    
   }
-  
 }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousScanner.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousScanner.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousScanner.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousScanner.java Mon Jan 14 22:03:24 2013
@@ -16,94 +16,48 @@
  */
 package org.apache.accumulo.server.test.continuous;
 
-import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.Map.Entry;
 import java.util.Random;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.server.test.continuous.ContinuousWalk.RandomAuths;
 import org.apache.hadoop.io.Text;
-import org.apache.log4j.FileAppender;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
+
+import com.beust.jcommander.Parameter;
+import com.beust.jcommander.validators.PositiveInteger;
 
 public class ContinuousScanner {
-  private static String debugLog = null;
-  private static String authsFile = null;
   
-  private static String[] processOptions(String[] args) {
-    ArrayList<String> al = new ArrayList<String>();
-    
-    for (int i = 0; i < args.length; i++) {
-      if (args[i].equals("--debug")) {
-        debugLog = args[++i];
-      } else if (args[i].equals("--auths")) {
-        authsFile = args[++i];
-      } else {
-        al.add(args[i]);
-      }
-    }
-    
-    return al.toArray(new String[al.size()]);
+  static class Opts extends ContinuousWalk.Opts {
+    @Parameter(names="--numToScan", description="Number rows to scan between sleeps", required=true, validateWith=PositiveInteger.class)
+    long numToScan = 0;
   }
   
   public static void main(String[] args) throws Exception {
-    
-    args = processOptions(args);
-    
-    if (args.length != 9) {
-      throw new IllegalArgumentException("usage : " + ContinuousScanner.class.getName()
-          + " [--debug <debug log>] [--auths <file>] <instance name> <zookeepers> <user> <pass> <table> <min> <max> <sleep time> <num to scan>");
-    }
-    
-    if (debugLog != null) {
-      Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
-      logger.setLevel(Level.TRACE);
-      logger.setAdditivity(false);
-      logger.addAppender(new FileAppender(new PatternLayout("%d{dd HH:mm:ss,SSS} [%-8c{2}] %-5p: %m%n"), debugLog, true));
-    }
+    Opts opts = new Opts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    opts.parseArgs(ContinuousScanner.class.getName(), args, scanOpts);
     
     Random r = new Random();
 
-    String instanceName = args[0];
-    String zooKeepers = args[1];
-    
-    String user = args[2];
-    String password = args[3];
-    
-    String table = args[4];
-    
-    long min = Long.parseLong(args[5]);
-    long max = Long.parseLong(args[6]);
     long distance = 1000000000000l;
     
-    long sleepTime = Long.parseLong(args[7]);
-    
-    int numToScan = Integer.parseInt(args[8]);
-    
-    RandomAuths randomAuths = new RandomAuths(authsFile);
-
-    Instance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    Connector conn = instance.getConnector(user, password.getBytes());
-    Authorizations auths = randomAuths.getAuths(r);
-    Scanner scanner = conn.createScanner(table, auths);
+    Connector conn = opts.getConnector();
+    Authorizations auths = opts.randomAuths.getAuths(r);
+    Scanner scanner = conn.createScanner(opts.getTableName(), auths);
+    scanner.setBatchSize(scanOpts.scanBatchSize);
     
-    double delta = Math.min(.05, .05 / (numToScan / 1000.0));
-    // System.out.println("Delta "+delta);
+    double delta = Math.min(.05, .05 / (opts.numToScan / 1000.0));
     
     while (true) {
-      long startRow = ContinuousIngest.genLong(min, max - distance, r);
+      long startRow = ContinuousIngest.genLong(opts.min, opts.max - distance, r);
       byte[] scanStart = ContinuousIngest.genRow(startRow);
       byte[] scanStop = ContinuousIngest.genRow(startRow + distance);
       
@@ -124,13 +78,13 @@ public class ContinuousScanner {
       
       // System.out.println("P1 " +count +" "+((1-delta) * numToScan)+" "+((1+delta) * numToScan)+" "+numToScan);
       
-      if (count < (1 - delta) * numToScan || count > (1 + delta) * numToScan) {
+      if (count < (1 - delta) * opts.numToScan || count > (1 + delta) * opts.numToScan) {
         if (count == 0) {
           distance = distance * 10;
           if (distance < 0)
             distance = 1000000000000l;
         } else {
-          double ratio = (double) numToScan / count;
+          double ratio = (double) opts.numToScan / count;
           // move ratio closer to 1 to make change slower
           ratio = ratio - (ratio - 1.0) * (2.0 / 3.0);
           distance = (long) (ratio * distance);
@@ -141,8 +95,8 @@ public class ContinuousScanner {
       
       System.out.printf("SCN %d %s %d %d%n", t1, new String(scanStart), (t2 - t1), count);
       
-      if (sleepTime > 0)
-        UtilWaitThread.sleep(sleepTime);
+      if (opts.sleepTime > 0)
+        UtilWaitThread.sleep(opts.sleepTime);
     }
     
   }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousStatsCollector.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousStatsCollector.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousStatsCollector.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousStatsCollector.java Mon Jan 14 22:03:24 2013
@@ -24,10 +24,11 @@ import java.util.TimerTask;
 
 import org.apache.accumulo.cloudtrace.instrument.Tracer;
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOnRequiredTable;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.impl.MasterClient;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.data.Key;
@@ -41,7 +42,6 @@ import org.apache.accumulo.core.master.t
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.Stat;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.monitor.Monitor;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.hadoop.conf.Configuration;
@@ -57,15 +57,13 @@ public class ContinuousStatsCollector {
   static class StatsCollectionTask extends TimerTask {
     
     private final String tableId;
-    private ZooKeeperInstance instance;
-    private String user;
-    private String pass;
-    
-    public StatsCollectionTask(String tableName, String instanceName, String zooHosts, String user, String pass) {
-      this.instance = new ZooKeeperInstance(instanceName, zooHosts);
-      this.tableId = Tables.getNameToIdMap(instance).get(tableName);
-      this.user = user;
-      this.pass = pass;
+    private final Opts opts;
+    private final int scanBatchSize;
+    
+    public StatsCollectionTask(Opts opts, int scanBatchSize) {
+      this.opts = opts;
+      this.scanBatchSize = scanBatchSize;
+      this.tableId = Tables.getNameToIdMap(opts.getInstance()).get(opts.tableName);
       System.out
           .println("TIME TABLET_SERVERS TOTAL_ENTRIES TOTAL_INGEST TOTAL_QUERY TABLE_RECS TABLE_RECS_IN_MEM TABLE_INGEST TABLE_QUERY TABLE_TABLETS TABLE_TABLETS_ONLINE"
               + " ACCUMULO_DU ACCUMULO_DIRS ACCUMULO_FILES TABLE_DU TABLE_DIRS TABLE_FILES"
@@ -88,9 +86,10 @@ public class ContinuousStatsCollector {
     }
     
     private String getTabletStats() throws Exception {
-      Connector conn = instance.getConnector(user, pass);
       
-      Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+      Connector conn = opts.getConnector();
+      Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, opts.auths);
+      scanner.setBatchSize(scanBatchSize);
       scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
       scanner.addScanIterator(new IteratorSetting(1000, "cfc", ColumnFamilyCounter.class.getName()));
       scanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
@@ -126,7 +125,7 @@ public class ContinuousStatsCollector {
       
       MasterClientService.Iface client = null;
       try {
-        client = MasterClient.getConnectionWithRetry(HdfsZooInstance.getInstance());
+        client = MasterClient.getConnectionWithRetry(opts.getInstance());
         MasterMonitorInfo stats = client.getMasterStats(Tracer.traceInfo(), SecurityConstants.getSystemCredentials());
         
         TableInfo all = new TableInfo();
@@ -170,10 +169,16 @@ public class ContinuousStatsCollector {
     
   }
   
+  static class Opts extends ClientOnRequiredTable {
+  }
+  
   public static void main(String[] args) {
+    Opts opts = new Opts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    opts.parseArgs(ContinuousStatsCollector.class.getName(), args, scanOpts);
     Timer jtimer = new Timer();
     
-    jtimer.schedule(new StatsCollectionTask(args[0], args[1], args[2], args[3], args[4]), 0, 30000);
+    jtimer.schedule(new StatsCollectionTask(opts, scanOpts.scanBatchSize), 0, 30000);
   }
   
 }

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousVerify.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousVerify.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousVerify.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/test/continuous/ContinuousVerify.java Mon Jan 14 22:03:24 2013
@@ -23,14 +23,12 @@ import java.util.HashSet;
 import java.util.Random;
 import java.util.Set;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.ClientOnDefaultTable;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.server.test.continuous.ContinuousWalk.BadChecksumException;
 import org.apache.hadoop.conf.Configured;
@@ -45,6 +43,9 @@ import org.apache.hadoop.mapreduce.lib.o
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+import com.beust.jcommander.validators.PositiveInteger;
+
 /**
  * A map reduce job that verifies a table created by continuous ingest. It verifies that all referenced nodes are defined.
  */
@@ -134,66 +135,48 @@ public class ContinuousVerify extends Co
     }
   }
   
-  @Override
-  public int run(String[] args) throws Exception {
+  static class Opts extends ClientOnDefaultTable {
+    @Parameter(names="--output", description="location in HDFS to store the results; must not exist", required=true)
+    String outputDir = "/tmp/continuousVerify";
     
-    String auths = "";
-    ArrayList<String> argsList = new ArrayList<String>();
+    @Parameter(names="--maxMappers", description="the maximum number of mappers to use", required=true, validateWith=PositiveInteger.class)
+    int maxMaps = 0;
     
-    for (int i = 0; i < args.length; i++) {
-      if (args[i].equals("--auths")) {
-        auths = args[++i];
-      } else {
-        argsList.add(args[i]);
-      }
-    }
+    @Parameter(names="--reducers", description="the number of reducers to use", required=true, validateWith=PositiveInteger.class)
+    int reducers = 0;
     
-    args = argsList.toArray(new String[0]);
-
-    if (args.length != 9) {
-      throw new IllegalArgumentException("Usage : " + ContinuousVerify.class.getName()
-          + " <instance name> <zookeepers> <user> <pass> <table> <output dir> <max mappers> <num reducers> <scan offline>");
-    }
+    @Parameter(names="--offline", description="perform the verification directly on the files while the table is offline")
+    boolean scanOffline = false;
     
-    String instance = args[0];
-    String zookeepers = args[1];
-    String user = args[2];
-    String pass = args[3];
-    String table = args[4];
-    String outputdir = args[5];
-    String maxMaps = args[6];
-    String reducers = args[7];
-    boolean scanOffline = Boolean.parseBoolean(args[8]);
+    public Opts() { super("ci"); }
+  }
+  
+  @Override
+  public int run(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(this.getClass().getName(), args);
     
     Job job = new Job(getConf(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
     job.setJarByClass(this.getClass());
     
-    String clone = table;
+    String clone = opts.getTableName();
     Connector conn = null;
-    if (scanOffline) {
+    if (opts.scanOffline) {
       Random random = new Random();
-      clone = table + "_" + String.format("%016x", Math.abs(random.nextLong()));
-      ZooKeeperInstance zki = new ZooKeeperInstance(instance, zookeepers);
-      conn = zki.getConnector(user, pass.getBytes());
-      conn.tableOperations().clone(table, clone, true, new HashMap<String,String>(), new HashSet<String>());
+      clone = opts.getTableName() + "_" + String.format("%016x", Math.abs(random.nextLong()));
+      conn = opts.getConnector();
+      conn.tableOperations().clone(opts.getTableName(), clone, true, new HashMap<String,String>(), new HashSet<String>());
       conn.tableOperations().offline(clone);
     }
 
     job.setInputFormatClass(AccumuloInputFormat.class);
-    Authorizations authorizations;
-    if (auths == null || auths.trim().equals(""))
-      authorizations = Constants.NO_AUTHS;
-    else
-      authorizations = new Authorizations(auths.split(","));
-
-    AccumuloInputFormat.setInputInfo(job.getConfiguration(), user, pass.getBytes(), clone, authorizations);
-    AccumuloInputFormat.setZooKeeperInstance(job.getConfiguration(), instance, zookeepers);
-    AccumuloInputFormat.setScanOffline(job.getConfiguration(), scanOffline);
+
+    opts.setAccumuloConfigs(job);
+    AccumuloInputFormat.setScanOffline(job.getConfiguration(), opts.scanOffline);
 
     // set up ranges
     try {
-      Set<Range> ranges = new ZooKeeperInstance(instance, zookeepers).getConnector(user, pass.getBytes()).tableOperations()
-          .splitRangeByTablets(table, new Range(), Integer.parseInt(maxMaps));
+      Set<Range> ranges = opts.getConnector().tableOperations().splitRangeByTablets(opts.getTableName(), new Range(), opts.maxMaps);
       AccumuloInputFormat.setRanges(job.getConfiguration(), ranges);
       AccumuloInputFormat.disableAutoAdjustRanges(job.getConfiguration());
     } catch (Exception e) {
@@ -205,20 +188,20 @@ public class ContinuousVerify extends Co
     job.setMapOutputValueClass(VLongWritable.class);
     
     job.setReducerClass(CReducer.class);
-    job.setNumReduceTasks(Integer.parseInt(reducers));
+    job.setNumReduceTasks(opts.reducers);
     
     job.setOutputFormatClass(TextOutputFormat.class);
     
-    job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", scanOffline);
+    job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", opts.scanOffline);
 
-    TextOutputFormat.setOutputPath(job, new Path(outputdir));
+    TextOutputFormat.setOutputPath(job, new Path(opts.outputDir));
     
     job.waitForCompletion(true);
     
-    if (scanOffline) {
+    if (opts.scanOffline) {
       conn.tableOperations().delete(clone);
     }
-
+    opts.stopTracing();
     return job.isSuccessful() ? 0 : 1;
   }
   



Mime
View raw message