accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vi...@apache.org
Subject svn commit: r1433166 [11/20] - in /accumulo/branches/ACCUMULO-259: ./ assemble/ assemble/platform/ assemble/scripts/ assemble/scripts/init.d/ bin/ conf/examples/1GB/native-standalone/ conf/examples/1GB/standalone/ conf/examples/2GB/native-standalone/ c...
Date Mon, 14 Jan 2013 22:03:34 GMT
Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java Mon Jan 14 22:03:24 2013
@@ -20,10 +20,10 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.Map;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.format.DefaultFormatter;
@@ -37,11 +37,21 @@ import org.apache.hadoop.mapreduce.lib.o
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Takes a table and outputs the specified column to a set of part files on hdfs accumulo accumulo.examples.mapreduce.TableToFile <username> <password>
  * <tablename> <column> <hdfs-output-path>
  */
 public class TableToFile extends Configured implements Tool {
+  
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--output", description="output directory", required=true)
+    String output;
+    @Parameter(names="--columns", description="columns to extract, in cf:cq{,cf:cq,...} form")
+    String columns;
+  }
+  
   /**
    * The Mapper class that given a row number, will generate the appropriate output line.
    */
@@ -74,13 +84,14 @@ public class TableToFile extends Configu
   public int run(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
     Job job = new Job(getConf(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
     job.setJarByClass(this.getClass());
+    Opts opts = new Opts();
+    opts.parseArgs(getClass().getName(), args);
     
     job.setInputFormatClass(AccumuloInputFormat.class);
-    AccumuloInputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    AccumuloInputFormat.setInputInfo(job.getConfiguration(), args[2], args[3].getBytes(), args[4], new Authorizations());
+    opts.setAccumuloConfigs(job);
     
     HashSet<Pair<Text,Text>> columnsToFetch = new HashSet<Pair<Text,Text>>();
-    for (String col : args[5].split(",")) {
+    for (String col : opts.columns.split(",")) {
       int idx = col.indexOf(":");
       Text cf = new Text(idx < 0 ? col : col.substring(0, idx));
       Text cq = idx < 0 ? null : new Text(col.substring(idx + 1));
@@ -97,7 +108,7 @@ public class TableToFile extends Configu
     job.setNumReduceTasks(0);
     
     job.setOutputFormatClass(TextOutputFormat.class);
-    TextOutputFormat.setOutputPath(job, new Path(args[6]));
+    TextOutputFormat.setOutputPath(job, new Path(opts.output));
     
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java Mon Jan 14 22:03:24 2013
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
@@ -46,6 +47,8 @@ import org.apache.hadoop.mapreduce.TaskA
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Generate the *almost* official terasort input data set. (See below) The user specifies the number of rows and the output directory and this class runs a
  * map/reduce program to generate the data. The format of the data is:
@@ -60,10 +63,7 @@ import org.apache.hadoop.util.ToolRunner
  * the same way TeraSort does use 10000000000 rows and 10/10 byte key length and 78/78 byte value length. Along with the 10 byte row id and \r\n this gives you
  * 100 byte row * 10000000000 rows = 1tb. Min/Max ranges for key and value parameters are inclusive/inclusive respectively.
  * 
- * Params <numrows> <minkeylength> <maxkeylength> <minvaluelength> <maxvaluelength> <tablename> <instance> <zoohosts> <username> <password> [numsplits]
- * numsplits allows you specify how many splits, and therefore mappers, to use
- * 
- * 
+ *  
  */
 public class TeraSortIngest extends Configured implements Tool {
   /**
@@ -343,10 +343,27 @@ public class TeraSortIngest extends Conf
     System.exit(res);
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--count", description="number of rows to ingest", required=true)
+    long numRows;
+    @Parameter(names={"-nk", "--minKeySize"}, description="miniumum key size", required=true)
+    int minKeyLength;
+    @Parameter(names={"-xk", "--maxKeySize"}, description="maximum key size", required=true)
+    int maxKeyLength;
+    @Parameter(names={"-nv", "--minValueSize"}, description="minimum key size", required=true)
+    int minValueLength;
+    @Parameter(names={"-xv", "--maxValueSize"}, description="maximum key size", required=true)
+    int maxValueLength;
+    @Parameter(names="--splits", description="number of splits to create in the table")
+    int splits = 0;
+  }
+  
   @Override
   public int run(String[] args) throws Exception {
     Job job = new Job(getConf(), "TeraSortCloud");
     job.setJarByClass(this.getClass());
+    Opts opts = new Opts();
+    opts.parseArgs(TeraSortIngest.class.getName(), args);
     
     job.setInputFormatClass(RangeInputFormat.class);
     job.setMapperClass(SortGenMapper.class);
@@ -356,20 +373,19 @@ public class TeraSortIngest extends Conf
     job.setNumReduceTasks(0);
     
     job.setOutputFormatClass(AccumuloOutputFormat.class);
-    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), args[6], args[7]);
-    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), args[8], args[9].getBytes(), true, null);
+    opts.setAccumuloConfigs(job);
     AccumuloOutputFormat.setMaxMutationBufferSize(job.getConfiguration(), 10L * 1000 * 1000);
     
     Configuration conf = job.getConfiguration();
-    conf.setLong(NUMROWS, Long.parseLong(args[0]));
-    conf.setInt("cloudgen.minkeylength", Integer.parseInt(args[1]));
-    conf.setInt("cloudgen.maxkeylength", Integer.parseInt(args[2]));
-    conf.setInt("cloudgen.minvaluelength", Integer.parseInt(args[3]));
-    conf.setInt("cloudgen.maxvaluelength", Integer.parseInt(args[4]));
-    conf.set("cloudgen.tablename", args[5]);
+    conf.setLong(NUMROWS, opts.numRows);
+    conf.setInt("cloudgen.minkeylength", opts.minKeyLength);
+    conf.setInt("cloudgen.maxkeylength", opts.maxKeyLength);
+    conf.setInt("cloudgen.minvaluelength", opts.minValueLength);
+    conf.setInt("cloudgen.maxvaluelength", opts.maxValueLength);
+    conf.set("cloudgen.tablename", opts.tableName);
     
     if (args.length > 10)
-      conf.setInt(NUMSPLITS, Integer.parseInt(args[10]));
+      conf.setInt(NUMSPLITS, opts.splits);
     
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java Mon Jan 14 22:03:24 2013
@@ -4,13 +4,12 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.HashSet;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
@@ -22,6 +21,8 @@ import org.apache.hadoop.mapreduce.lib.o
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -70,34 +71,37 @@ public class UniqueColumns extends Confi
     }
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--output", description="output directory")
+    String output;
+    @Parameter(names="--reducers", description="number of reducers to use", required=true)
+    int reducers;
+    @Parameter(names="--offline", description="run against an offline table")
+    boolean offline = false;
+  }
+  
   
   @Override
   public int run(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(UniqueColumns.class.getName(), args);
     
-    if (args.length != 8) {
-      throw new IllegalArgumentException("Usage : " + UniqueColumns.class.getSimpleName()
-          + " <instance name> <zookeepers> <user> <password> <table> <output directory> <num reducers> offline|online");
-    }
-
-    boolean scanOffline = args[7].equals("offline");
-    String table = args[4];
     String jobName = this.getClass().getSimpleName() + "_" + System.currentTimeMillis();
     
     Job job = new Job(getConf(), jobName);
     job.setJarByClass(this.getClass());
 
-    String clone = table;
+    String clone = opts.tableName;
     Connector conn = null;
-    if (scanOffline) {
+    if (opts.offline) {
       /*
        * this example clones the table and takes it offline. If you plan to run map reduce jobs over a table many times, it may be more efficient to compact the
        * table, clone it, and then keep using the same clone as input for map reduce.
        */
       
-      ZooKeeperInstance zki = new ZooKeeperInstance(args[0], args[1]);
-      conn = zki.getConnector(args[2], args[3].getBytes());
-      clone = table + "_" + jobName;
-      conn.tableOperations().clone(table, clone, true, new HashMap<String,String>(), new HashSet<String>());
+      conn = opts.getConnector();
+      clone = opts.tableName + "_" + jobName;
+      conn.tableOperations().clone(opts.tableName, clone, true, new HashMap<String,String>(), new HashSet<String>());
       conn.tableOperations().offline(clone);
       
       AccumuloInputFormat.setScanOffline(job.getConfiguration(), true);
@@ -106,9 +110,7 @@ public class UniqueColumns extends Confi
 
     
     job.setInputFormatClass(AccumuloInputFormat.class);
-    AccumuloInputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    AccumuloInputFormat.setInputInfo(job.getConfiguration(), args[2], args[3].getBytes(), clone, new Authorizations());
-    
+    opts.setAccumuloConfigs(job);
     
     job.setMapperClass(UMapper.class);
     job.setMapOutputKeyClass(Text.class);
@@ -117,14 +119,14 @@ public class UniqueColumns extends Confi
     job.setCombinerClass(UReducer.class);
     job.setReducerClass(UReducer.class);
 
-    job.setNumReduceTasks(Integer.parseInt(args[6]));
+    job.setNumReduceTasks(opts.reducers);
 
     job.setOutputFormatClass(TextOutputFormat.class);
-    TextOutputFormat.setOutputPath(job, new Path(args[5]));
+    TextOutputFormat.setOutputPath(job, new Path(opts.output));
     
     job.waitForCompletion(true);
     
-    if (scanOffline) {
+    if (opts.offline) {
       conn.tableOperations().delete(clone);
     }
 

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java Mon Jan 14 22:03:24 2013
@@ -18,16 +18,11 @@ package org.apache.accumulo.examples.sim
 
 import java.io.IOException;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.Parser;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.LongWritable;
@@ -38,24 +33,17 @@ import org.apache.hadoop.mapreduce.lib.i
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * A simple map reduce job that inserts word counts into accumulo. See the README for instructions on how to run this.
  * 
  */
 public class WordCount extends Configured implements Tool {
-  private static Options opts;
-  private static Option passwordOpt;
-  private static Option usernameOpt;
-  private static String USAGE = "wordCount <instance name> <zoo keepers> <input dir> <output table>";
   
-  static {
-    usernameOpt = new Option("u", "username", true, "username");
-    passwordOpt = new Option("p", "password", true, "password");
-    
-    opts = new Options();
-    
-    opts.addOption(usernameOpt);
-    opts.addOption(passwordOpt);
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--input", description="input directory")
+    String inputDirectory;
   }
   
   public static class MapClass extends Mapper<LongWritable,Text,Text,Mutation> {
@@ -77,25 +65,15 @@ public class WordCount extends Configure
     }
   }
   
-  public int run(String[] unprocessed_args) throws Exception {
-    Parser p = new BasicParser();
-    
-    CommandLine cl = p.parse(opts, unprocessed_args);
-    String[] args = cl.getArgs();
-    
-    String username = cl.getOptionValue(usernameOpt.getOpt(), "root");
-    String password = cl.getOptionValue(passwordOpt.getOpt(), "secret");
-    
-    if (args.length != 4) {
-      System.out.println("ERROR: Wrong number of parameters: " + args.length + " instead of 4.");
-      return printUsage();
-    }
+  public int run(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(WordCount.class.getName(), args);
     
     Job job = new Job(getConf(), WordCount.class.getName());
     job.setJarByClass(this.getClass());
     
     job.setInputFormatClass(TextInputFormat.class);
-    TextInputFormat.setInputPaths(job, new Path(args[2]));
+    TextInputFormat.setInputPaths(job, new Path(opts.inputDirectory));
     
     job.setMapperClass(MapClass.class);
     
@@ -104,18 +82,11 @@ public class WordCount extends Configure
     job.setOutputFormatClass(AccumuloOutputFormat.class);
     job.setOutputKeyClass(Text.class);
     job.setOutputValueClass(Mutation.class);
-    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), username, password.getBytes(), true, args[3]);
-    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
+    opts.setAccumuloConfigs(job);
     job.waitForCompletion(true);
     return 0;
   }
   
-  private int printUsage() {
-    HelpFormatter hf = new HelpFormatter();
-    hf.printHelp(USAGE, opts);
-    return 0;
-  }
-  
   public static void main(String[] args) throws Exception {
     int res = ToolRunner.run(CachedConfiguration.getInstance(), new WordCount(), args);
     System.exit(res);

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java Mon Jan 14 22:03:24 2013
@@ -21,9 +21,8 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.util.Collection;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.mapreduce.AccumuloFileOutputFormat;
 import org.apache.accumulo.core.client.mapreduce.lib.partition.RangePartitioner;
 import org.apache.accumulo.core.data.Key;
@@ -44,6 +43,8 @@ import org.apache.hadoop.mapreduce.lib.i
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Example map reduce job that bulk ingest data into an accumulo table. The expected input is text files containing tab separated key value pairs on each line.
  */
@@ -92,11 +93,16 @@ public class BulkIngestExample extends C
     }
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--inputDir", required=true)
+    String inputDir;
+    @Parameter(names="--workDir", required=true)
+    String workDir;
+  }
+  
   public int run(String[] args) {
-    if (args.length != 7) {
-      System.out.println("ERROR: Wrong number of parameters: " + args.length + " instead of 7.");
-      return printUsage();
-    }
+    Opts opts = new Opts();
+    opts.parseArgs(BulkIngestExample.class.getName(), args);
     
     Configuration conf = getConf();
     PrintStream out = null;
@@ -112,23 +118,17 @@ public class BulkIngestExample extends C
       
       job.setReducerClass(ReduceClass.class);
       job.setOutputFormatClass(AccumuloFileOutputFormat.class);
+      opts.setAccumuloConfigs(job);
       
-      Instance instance = new ZooKeeperInstance(args[0], args[1]);
-      String user = args[2];
-      byte[] pass = args[3].getBytes();
-      String tableName = args[4];
-      String inputDir = args[5];
-      String workDir = args[6];
+      Connector connector = opts.getConnector();
       
-      Connector connector = instance.getConnector(user, pass);
-      
-      TextInputFormat.setInputPaths(job, new Path(inputDir));
-      AccumuloFileOutputFormat.setOutputPath(job, new Path(workDir + "/files"));
+      TextInputFormat.setInputPaths(job, new Path(opts.inputDir));
+      AccumuloFileOutputFormat.setOutputPath(job, new Path(opts.workDir + "/files"));
       
       FileSystem fs = FileSystem.get(conf);
-      out = new PrintStream(new BufferedOutputStream(fs.create(new Path(workDir + "/splits.txt"))));
+      out = new PrintStream(new BufferedOutputStream(fs.create(new Path(opts.workDir + "/splits.txt"))));
       
-      Collection<Text> splits = connector.tableOperations().getSplits(tableName, 100);
+      Collection<Text> splits = connector.tableOperations().getSplits(opts.tableName, 100);
       for (Text split : splits)
         out.println(new String(Base64.encodeBase64(TextUtil.getBytes(split))));
       
@@ -136,13 +136,13 @@ public class BulkIngestExample extends C
       out.close();
       
       job.setPartitionerClass(RangePartitioner.class);
-      RangePartitioner.setSplitFile(job, workDir + "/splits.txt");
+      RangePartitioner.setSplitFile(job, opts.workDir + "/splits.txt");
       
       job.waitForCompletion(true);
-      Path failures = new Path(workDir, "failures");
+      Path failures = new Path(opts.workDir, "failures");
       fs.delete(failures, true);
-      fs.mkdirs(new Path(workDir, "failures"));
-      connector.tableOperations().importDirectory(tableName, workDir + "/files", workDir + "/failures", false);
+      fs.mkdirs(new Path(opts.workDir, "failures"));
+      connector.tableOperations().importDirectory(opts.tableName, opts.workDir + "/files", opts.workDir + "/failures", false);
       
     } catch (Exception e) {
       throw new RuntimeException(e);
@@ -154,11 +154,6 @@ public class BulkIngestExample extends C
     return 0;
   }
   
-  private int printUsage() {
-    System.out.println("accumulo " + this.getClass().getName() + " <instanceName> <zooKeepers> <username> <password> <table> <input dir> <work dir> ");
-    return 0;
-  }
-  
   public static void main(String[] args) throws Exception {
     int res = ToolRunner.run(CachedConfiguration.getInstance(), new BulkIngestExample(), args);
     System.exit(res);

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java Mon Jan 14 22:03:24 2013
@@ -21,26 +21,32 @@ import java.io.IOException;
 import java.io.PrintStream;
 
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
+import com.beust.jcommander.Parameter;
+
 public class GenerateTestData {
   
+  static class Opts extends org.apache.accumulo.core.cli.Help {
+    @Parameter(names="--start-row", required=true)
+    int startRow = 0;
+    @Parameter(names="--count", required=true)
+    int numRows = 0;
+    @Parameter(names="--output", required=true)
+    String outputFile;
+  }
+  
   public static void main(String[] args) throws IOException {
-    int startRow = Integer.parseInt(args[0]);
-    int numRows = Integer.parseInt(args[1]);
-    String outputFile = args[2];
+    Opts opts = new Opts();
+    opts.parseArgs(GenerateTestData.class.getName(), args);
     
-    Configuration conf = CachedConfiguration.getInstance();
-    FileSystem fs = FileSystem.get(conf);
+    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+    PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(new Path(opts.outputFile))));
     
-    PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(new Path(outputFile))));
-    
-    for (int i = 0; i < numRows; i++) {
-      out.println(String.format("row_%08d\tvalue_%08d", i + startRow, i + startRow));
+    for (int i = 0; i < opts.numRows; i++) {
+      out.println(String.format("row_%08d\tvalue_%08d", i + opts.startRow, i + opts.startRow));
     }
-    
     out.close();
   }
   

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java Mon Jan 14 22:03:24 2013
@@ -16,38 +16,35 @@
  */
 package org.apache.accumulo.examples.simple.mapreduce.bulk;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 public class SetupTable {
   
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableExistsException {
-    Connector conn = new ZooKeeperInstance(args[0], args[1]).getConnector(args[2], args[3].getBytes());
-    if (args.length == 5) {
-      // create a basic table
-      conn.tableOperations().create(args[4]);
-    } else if (args.length > 5) {
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(description="<split> { <split> ... } ")
+    List<String> splits = new ArrayList<String>();
+  }
+  
+  public static void main(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(SetupTable.class.getName(), args);
+    Connector conn = opts.getConnector();
+    conn.tableOperations().create(opts.tableName);
+    if (!opts.splits.isEmpty()) {
       // create a table with initial partitions
       TreeSet<Text> intialPartitions = new TreeSet<Text>();
-      for (int i = 5; i < args.length; ++i)
-        intialPartitions.add(new Text(args[i]));
-      conn.tableOperations().create(args[4]);
-      
-      try {
-        conn.tableOperations().addSplits(args[4], intialPartitions);
-      } catch (TableNotFoundException e) {
-        // unlikely
-        throw new RuntimeException(e);
+      for (String split : opts.splits) {
+        intialPartitions.add(new Text(split));
       }
-    } else {
-      System.err.println("Usage : SetupTable <instance> <zookeepers> <username> <password> <table name> [<split point>{ <split point}]");
-    }
+      conn.tableOperations().addSplits(opts.tableName, intialPartitions);
+    } 
   }
 }

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java Mon Jan 14 22:03:24 2013
@@ -19,49 +19,44 @@ package org.apache.accumulo.examples.sim
 import java.util.Iterator;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
+import com.beust.jcommander.Parameter;
+
 public class VerifyIngest {
   private static final Logger log = Logger.getLogger(VerifyIngest.class);
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--start-row")
+    int startRow = 0;
+    @Parameter(names="--count", required=true, description="number of rows to verify")
+    int numRows = 0;
+  }
+  
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    if (args.length != 7) {
-      System.err.println("VerifyIngest <instance name> <zoo keepers> <username> <password> <table> <startRow> <numRows> ");
-      return;
-    }
+    Opts opts = new Opts();
+    opts.parseArgs(VerifyIngest.class.getName(), args);
     
-    String instanceName = args[0];
-    String zooKeepers = args[1];
-    String user = args[2];
-    byte[] pass = args[3].getBytes();
-    String table = args[4];
-    
-    int startRow = Integer.parseInt(args[5]);
-    int numRows = Integer.parseInt(args[6]);
-    
-    Instance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    Connector connector = instance.getConnector(user, pass);
-    Scanner scanner = connector.createScanner(table, Constants.NO_AUTHS);
+    Connector connector = opts.getConnector();
+    Scanner scanner = connector.createScanner(opts.tableName, opts.auths);
     
-    scanner.setRange(new Range(new Text(String.format("row_%08d", startRow)), null));
+    scanner.setRange(new Range(new Text(String.format("row_%08d", opts.startRow)), null));
     
     Iterator<Entry<Key,Value>> si = scanner.iterator();
     
     boolean ok = true;
     
-    for (int i = startRow; i < numRows; i++) {
+    for (int i = opts.startRow; i < opts.numRows; i++) {
       
       if (si.hasNext()) {
         Entry<Key,Value> entry = si.next();

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java Mon Jan 14 22:03:24 2013
@@ -20,20 +20,23 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
 import java.util.Random;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.BatchScannerOpts;
+import org.apache.accumulo.core.cli.ClientOpts;
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.IntersectingIterator;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Using the doc2word table created by Reverse.java, this program randomly selects N words per document. Then it continually queries a random set of words in
  * the shard table (created by {@link Index}) using the {@link IntersectingIterator}.
@@ -42,35 +45,33 @@ import org.apache.hadoop.io.Text;
  */
 
 public class ContinuousQuery {
-  public static void main(String[] args) throws Exception {
-    
-    if (args.length != 7 && args.length != 8) {
-      System.err.println("Usage : " + ContinuousQuery.class.getName()
-          + " <instance> <zoo keepers> <shard table> <doc2word table> <user> <pass> <num query terms> [iterations]");
-      System.exit(-1);
-    }
-    
-    String instance = args[0];
-    String zooKeepers = args[1];
-    String table = args[2];
-    String docTable = args[3];
-    String user = args[4];
-    String pass = args[5];
-    int numTerms = Integer.parseInt(args[6]);
+  
+  static class Opts extends ClientOpts {
+    @Parameter(names="--shardTable", required=true, description="name of the shard table")
+    String table = null;
+    @Parameter(names="--doc2Term", required=true, description="name of the doc2Term table")
+    String doc2Term;
+    @Parameter(names="--terms", required=true, description="the number of terms in the query")
+    int numTerms;
+    @Parameter(names="--count", description="the number of queries to run")
     long iterations = Long.MAX_VALUE;
-    if (args.length > 7)
-      iterations = Long.parseLong(args[7]);
+  }
+  
+  public static void main(String[] args) throws Exception {
+    Opts opts = new Opts();
+    BatchScannerOpts bsOpts = new BatchScannerOpts();
+    opts.parseArgs(ContinuousQuery.class.getName(), args, bsOpts);
     
-    ZooKeeperInstance zki = new ZooKeeperInstance(instance, zooKeepers);
-    Connector conn = zki.getConnector(user, pass.getBytes());
+    Connector conn = opts.getConnector();
     
-    ArrayList<Text[]> randTerms = findRandomTerms(conn.createScanner(docTable, Constants.NO_AUTHS), numTerms);
+    ArrayList<Text[]> randTerms = findRandomTerms(conn.createScanner(opts.doc2Term, opts.auths), opts.numTerms);
     
     Random rand = new Random();
     
-    BatchScanner bs = conn.createBatchScanner(table, Constants.NO_AUTHS, 20);
+    BatchScanner bs = conn.createBatchScanner(opts.table, opts.auths, bsOpts.scanThreads);
+    bs.setTimeout(bsOpts.scanTimeout, TimeUnit.MILLISECONDS);
     
-    for (long i = 0; i < iterations; i += 1) {
+    for (long i = 0; i < opts.iterations; i += 1) {
       Text[] columns = randTerms.get(rand.nextInt(randTerms.size()));
       
       bs.clearScanIterators();

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java Mon Jan 14 22:03:24 2013
@@ -18,15 +18,19 @@ package org.apache.accumulo.examples.sim
 
 import java.io.File;
 import java.io.FileReader;
+import java.util.ArrayList;
 import java.util.HashSet;
+import java.util.List;
 
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * This program indexes a set of documents given on the command line into a shard table.
  * 
@@ -88,37 +92,25 @@ public class Index {
     
   }
   
-  private static BatchWriter setupBatchWriter(String instance, String zooKeepers, String table, String user, String pass) throws Exception {
-    ZooKeeperInstance zinstance = new ZooKeeperInstance(instance, zooKeepers);
-    Connector conn = zinstance.getConnector(user, pass.getBytes());
-    return conn.createBatchWriter(table, 50000000, 300000l, 4);
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--partitions", required=true, description="the number of shards to create")
+    int partitions;
+    @Parameter(required=true, description="<file> { <file> ... }")
+    List<String> files = new ArrayList<String>();
   }
   
   public static void main(String[] args) throws Exception {
-    
-    if (args.length < 7) {
-      System.err.println("Usage : " + Index.class.getName() + " <instance> <zoo keepers> <table> <user> <pass> <num partitions> <file>{ <file>}");
-      System.exit(-1);
-    }
-    
-    String instance = args[0];
-    String zooKeepers = args[1];
-    String table = args[2];
-    String user = args[3];
-    String pass = args[4];
-    
-    int numPartitions = Integer.parseInt(args[5]);
+    Opts opts = new Opts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(Index.class.getName(), args, bwOpts);
     
     String splitRegex = "\\W+";
     
-    BatchWriter bw = setupBatchWriter(instance, zooKeepers, table, user, pass);
-    
-    for (int i = 6; i < args.length; i++) {
-      index(numPartitions, new File(args[i]), splitRegex, bw);
+    BatchWriter bw = opts.getConnector().createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());    
+    for (String filename : opts.files) {
+      index(opts.partitions, new File(filename), splitRegex, bw);
     }
-    
     bw.close();
-    
   }
   
 }

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java Mon Jan 14 22:03:24 2013
@@ -16,20 +16,25 @@
  */
 package org.apache.accumulo.examples.simple.shard;
 
+import java.util.ArrayList;
 import java.util.Collections;
+import java.util.List;
 import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.BatchScannerOpts;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.IntersectingIterator;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * This program queries a set of terms in the shard table (populated by {@link Index}) using the {@link IntersectingIterator}.
  * 
@@ -38,30 +43,27 @@ import org.apache.hadoop.io.Text;
 
 public class Query {
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(description=" term { <term> ... }")
+    List<String> terms = new ArrayList<String>();
+  }
+  
   /**
    * @param args
    */
   public static void main(String[] args) throws Exception {
-    
-    if (args.length < 6) {
-      System.err.println("Usage : " + Query.class.getName() + " <instance> <zoo keepers> <table> <user> <pass> <term>{ <term>}");
-      System.exit(-1);
-    }
-    
-    String instance = args[0];
-    String zooKeepers = args[1];
-    String table = args[2];
-    String user = args[3];
-    String pass = args[4];
-    
-    ZooKeeperInstance zki = new ZooKeeperInstance(instance, zooKeepers);
-    Connector conn = zki.getConnector(user, pass.getBytes());
-    
-    BatchScanner bs = conn.createBatchScanner(table, Constants.NO_AUTHS, 20);
-    
-    Text columns[] = new Text[args.length - 5];
-    for (int i = 5; i < args.length; i++) {
-      columns[i - 5] = new Text(args[i]);
+    Opts opts = new Opts();
+    BatchScannerOpts bsOpts = new BatchScannerOpts();
+    opts.parseArgs(Query.class.getName(), args, bsOpts);
+    
+    Connector conn = opts.getConnector();
+    BatchScanner bs = conn.createBatchScanner(opts.tableName, opts.auths, bsOpts.scanThreads);
+    bs.setTimeout(bsOpts.scanTimeout, TimeUnit.MILLISECONDS);
+    
+    Text columns[] = new Text[opts.terms.size()];
+    int i = 0;
+    for (String term : opts.terms) {
+      columns[i++] = new Text(term);
     }
     IteratorSetting ii = new IteratorSetting(20, "ii", IntersectingIterator.class);
     IntersectingIterator.setColumnFamilies(ii, columns);

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java Mon Jan 14 22:03:24 2013
@@ -18,16 +18,19 @@ package org.apache.accumulo.examples.sim
 
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * The program reads an accumulo table written by {@link Index} and writes out to another table. It writes out a mapping of documents to terms. The document to
  * term mapping is used by {@link ContinuousQuery}.
@@ -36,25 +39,25 @@ import org.apache.hadoop.io.Text;
  */
 
 public class Reverse {
+  
+  static class Opts extends ClientOpts {
+    @Parameter(names="--shardTable")
+    String shardTable = "shard";
+    @Parameter(names="--doc2Term")
+    String doc2TermTable = "doc2Term";
+  }
+  
   public static void main(String[] args) throws Exception {
-    
-    if (args.length != 6) {
-      System.err.println("Usage : " + Reverse.class.getName() + " <instance> <zoo keepers> <shard table> <doc2word table> <user> <pass>");
-      System.exit(-1);
-    }
-    
-    String instance = args[0];
-    String zooKeepers = args[1];
-    String inTable = args[2];
-    String outTable = args[3];
-    String user = args[4];
-    String pass = args[5];
-    
-    ZooKeeperInstance zki = new ZooKeeperInstance(instance, zooKeepers);
-    Connector conn = zki.getConnector(user, pass.getBytes());
-    
-    Scanner scanner = conn.createScanner(inTable, Constants.NO_AUTHS);
-    BatchWriter bw = conn.createBatchWriter(outTable, 50000000, 600000l, 4);
+    Opts opts = new Opts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.parseArgs(Reverse.class.getName(), args, scanOpts, bwOpts);
+    
+    Connector conn = opts.getConnector();
+    
+    Scanner scanner = conn.createScanner(opts.shardTable, opts.auths);
+    scanner.setBatchSize(scanOpts.scanBatchSize);
+    BatchWriter bw = conn.createBatchWriter(opts.doc2TermTable, bwOpts.getBatchWriterConfig());
     
     for (Entry<Key,Value> entry : scanner) {
       Key key = entry.getKey();

Modified: accumulo/branches/ACCUMULO-259/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java Mon Jan 14 22:03:24 2013
@@ -21,6 +21,8 @@ import java.util.Map.Entry;
 
 import junit.framework.TestCase;
 
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
@@ -30,9 +32,7 @@ import org.apache.accumulo.core.data.Val
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.examples.simple.dirlist.FileCount;
-import org.apache.accumulo.examples.simple.dirlist.Ingest;
-import org.apache.accumulo.examples.simple.dirlist.QueryUtil;
+import org.apache.accumulo.examples.simple.dirlist.FileCount.Opts;
 import org.apache.hadoop.io.Text;
 
 public class CountTest extends TestCase {
@@ -63,7 +63,13 @@ public class CountTest extends TestCase 
     scanner.fetchColumn(new Text("dir"), new Text("counts"));
     assertFalse(scanner.iterator().hasNext());
     
-    FileCount fc = new FileCount("counttest", null, "root", "", "dirlisttable", "", "", true);
+    Opts opts = new Opts();
+    ScannerOpts scanOpts = new ScannerOpts();
+    BatchWriterOpts bwOpts = new BatchWriterOpts();
+    opts.instance = "counttest";
+    opts.tableName = "dirlisttable";
+    opts.mock = true;
+    FileCount fc = new FileCount(opts, scanOpts, bwOpts);
     fc.run();
     
     ArrayList<Pair<String,String>> expected = new ArrayList<Pair<String,String>>();

Propchange: accumulo/branches/ACCUMULO-259/fate/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Mon Jan 14 22:03:24 2013
@@ -2,3 +2,4 @@
 .classpath
 .project
 target
+accumulo-fate.iml

Modified: accumulo/branches/ACCUMULO-259/fate/pom.xml
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/fate/pom.xml?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/fate/pom.xml (original)
+++ accumulo/branches/ACCUMULO-259/fate/pom.xml Mon Jan 14 22:03:24 2013
@@ -51,7 +51,7 @@
     </dependency>
 
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
+      <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
     </dependency>
   </dependencies>

Modified: accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java (original)
+++ accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java Mon Jan 14 22:03:24 2013
@@ -129,8 +129,8 @@ public class AdminUtil<T> {
     }
   }
   
-  public void prepDelete(ZooStore<T> zs, String path, String txidStr) {
-    checkGlobalLock(path);
+  public void prepDelete(ZooStore<T> zs, IZooReaderWriter zk, String path, String txidStr) {
+    checkGlobalLock(zk, path);
     
     long txid = Long.parseLong(txidStr, 16);
     zs.reserve(txid);
@@ -138,8 +138,8 @@ public class AdminUtil<T> {
     zs.unreserve(txid, 0);
   }
   
-  public void prepFail(ZooStore<T> zs, String path, String txidStr) {
-    checkGlobalLock(path);
+  public void prepFail(ZooStore<T> zs, IZooReaderWriter zk, String path, String txidStr) {
+    checkGlobalLock(zk, path);
     
     long txid = Long.parseLong(txidStr, 16);
     zs.reserve(txid);
@@ -163,9 +163,17 @@ public class AdminUtil<T> {
     }
   }
   
-  public void checkGlobalLock(String path) {
-    if (ZooLock.getLockData(path) != null) {
-      System.err.println("ERROR: Master lock is held, not running");
+  public void checkGlobalLock(IZooReaderWriter zk, String path) {
+    try {
+      if (ZooLock.getLockData(zk.getZooKeeper(), path) != null) {
+        System.err.println("ERROR: Master lock is held, not running");
+        System.exit(-1);
+      }
+    } catch (KeeperException e) {
+      System.err.println("ERROR: Could not read master lock, not running " + e.getMessage());
+      System.exit(-1);
+    } catch (InterruptedException e) {
+      System.err.println("ERROR: Could not read master lock, not running" + e.getMessage());
       System.exit(-1);
     }
   }

Modified: accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/TStore.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/TStore.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/TStore.java (original)
+++ accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/TStore.java Mon Jan 14 22:03:24 2013
@@ -18,6 +18,7 @@ package org.apache.accumulo.fate;
 
 import java.io.Serializable;
 import java.util.EnumSet;
+import java.util.List;
 
 /**
  * Transaction Store: a place to save transactions
@@ -38,7 +39,9 @@ public interface TStore<T> {
     /** Transaction has failed and has been fully rolled back */
     FAILED,
     /** Transaction has succeeded */
-    SUCCESSFUL, UNKNOWN
+    SUCCESSFUL,
+    /** Unrecognized or unknown transaction state */
+    UNKNOWN
   }
   
   /**
@@ -129,4 +132,12 @@ public interface TStore<T> {
    */
   public void delete(long tid);
   
+  /**
+   * list all transaction ids in store
+   * 
+   * @return
+   */
+  
+  public List<Long> list();
+  
 }

Modified: accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/ZooStore.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/ZooStore.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/ZooStore.java (original)
+++ accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/ZooStore.java Mon Jan 14 22:03:24 2013
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
 import java.io.Serializable;
-import java.nio.charset.Charset;
 import java.security.SecureRandom;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -54,8 +53,6 @@ public class ZooStore<T> implements TSto
   private SecureRandom idgenerator;
   private long statusChangeEvents = 0;
   private int reservationsWaiting = 0;
-
-  private static final Charset utf8 = Charset.forName("UTF8");
   
   private byte[] serialize(Object o) {
     
@@ -107,7 +104,7 @@ public class ZooStore<T> implements TSto
       try {
         // looking at the code for SecureRandom, it appears to be thread safe
         long tid = Math.abs(idgenerator.nextLong());
-        zk.putPersistentData(getTXPath(tid), TStatus.NEW.name().getBytes(utf8), NodeExistsPolicy.FAIL);
+        zk.putPersistentData(getTXPath(tid), TStatus.NEW.name().getBytes(), NodeExistsPolicy.FAIL);
         return tid;
       } catch (NodeExistsException nee) {
         // exist, so just try another random #
@@ -362,7 +359,7 @@ public class ZooStore<T> implements TSto
     verifyReserved(tid);
     
     try {
-      zk.putPersistentData(getTXPath(tid), status.name().getBytes(utf8), NodeExistsPolicy.OVERWRITE);
+      zk.putPersistentData(getTXPath(tid), status.name().getBytes(), NodeExistsPolicy.OVERWRITE);
     } catch (Exception e) {
       throw new RuntimeException(e);
     }
@@ -390,7 +387,7 @@ public class ZooStore<T> implements TSto
     
     try {
       if (so instanceof String) {
-        zk.putPersistentData(getTXPath(tid) + "/prop_" + prop, ("S " + so).getBytes(utf8), NodeExistsPolicy.OVERWRITE);
+        zk.putPersistentData(getTXPath(tid) + "/prop_" + prop, ("S " + so).getBytes(), NodeExistsPolicy.OVERWRITE);
       } else {
         byte[] sera = serialize(so);
         byte[] data = new byte[sera.length + 2];
@@ -427,6 +424,7 @@ public class ZooStore<T> implements TSto
     }
   }
   
+  @Override
   public List<Long> list() {
     try {
       ArrayList<Long> l = new ArrayList<Long>();

Propchange: accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/ZooStore.java
------------------------------------------------------------------------------
  Merged /accumulo/branches/1.4/fate/src/main/java/org/apache/accumulo/fate/ZooStore.java:r1407301,1423032,1427864
  Merged /accumulo/trunk/fate/src/main/java/org/apache/accumulo/fate/ZooStore.java:r1404663-1432174,1432271-1433134
  Merged /accumulo/branches/1.4/src/fate/src/main/java/org/apache/accumulo/fate/ZooStore.java:r1407157,1423032,1427919,1428054

Modified: accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/util/LoggingRunnable.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/util/LoggingRunnable.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/util/LoggingRunnable.java (original)
+++ accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/util/LoggingRunnable.java Mon Jan 14 22:03:24 2013
@@ -46,20 +46,5 @@ public class LoggingRunnable implements 
       }
     }
   }
-  
-  public static void main(String[] args) {
-    Runnable r = new Runnable() {
-      @Override
-      public void run() {
-        int x[] = new int[0];
-        
-        x[0]++;
-      }
-    };
-    
-    LoggingRunnable lr = new LoggingRunnable(null, r);
-    lr.run();
-    
-  }
-  
+
 }

Modified: accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLock.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLock.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLock.java (original)
+++ accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLock.java Mon Jan 14 22:03:24 2013
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.fate.zookeeper;
 
-import java.nio.charset.Charset;
 import java.util.Arrays;
 import java.util.Iterator;
 import java.util.Map.Entry;
@@ -32,8 +31,6 @@ import org.apache.log4j.Logger;
 // A ReadWriteLock that can be implemented in ZooKeeper.  Features the ability to store data
 // with the lock, and recover the lock using that data to find the lock.
 public class DistributedReadWriteLock implements java.util.concurrent.locks.ReadWriteLock {
-
-  private static final Charset utf8 = Charset.forName("UTF8");
   
   static enum LockType {
     READ, WRITE,
@@ -74,7 +71,7 @@ public class DistributedReadWriteLock im
     }
     
     public byte[] getLockData() {
-      byte typeBytes[] = type.name().getBytes(utf8);
+      byte typeBytes[] = type.name().getBytes();
       byte[] result = new byte[userData.length + 1 + typeBytes.length];
       System.arraycopy(typeBytes, 0, result, 0, typeBytes.length);
       result[typeBytes.length] = ':';

Modified: accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/IZooReader.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/IZooReader.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/IZooReader.java (original)
+++ accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/IZooReader.java Mon Jan 14 22:03:24 2013
@@ -38,4 +38,6 @@ public interface IZooReader {
   
   public abstract boolean exists(String zPath, Watcher watcher) throws KeeperException, InterruptedException;
   
+  public abstract void sync(final String path) throws KeeperException, InterruptedException;
+  
 }

Modified: accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReader.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReader.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReader.java (original)
+++ accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReader.java Mon Jan 14 22:03:24 2013
@@ -17,10 +17,14 @@
 package org.apache.accumulo.fate.zookeeper;
 
 import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.Watcher;
 import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.AsyncCallback.VoidCallback;
+import org.apache.zookeeper.KeeperException.Code;
 import org.apache.zookeeper.data.Stat;
 
 public class ZooReader implements IZooReader {
@@ -71,6 +75,29 @@ public class ZooReader implements IZooRe
     return getZooKeeper().exists(zPath, watcher) != null;
   }
   
+  @Override
+  public void sync(final String path) throws KeeperException, InterruptedException {
+    final AtomicInteger rc = new AtomicInteger();
+    final AtomicBoolean waiter = new AtomicBoolean(false);
+    getZooKeeper().sync(path, new VoidCallback() {
+      @Override
+      public void processResult(int code, String arg1, Object arg2) {
+        rc.set(code);
+        synchronized (waiter) {
+          waiter.set(true);
+          waiter.notifyAll();
+        }
+      }}, null);
+    synchronized (waiter) {
+      while (!waiter.get())
+        waiter.wait();
+    }
+    Code code = Code.get(rc.get());
+    if (code != KeeperException.Code.OK) {
+      throw KeeperException.create(code);
+    }
+  }  
+  
   public ZooReader(String keepers, int timeout) {
     this.keepers = keepers;
     this.timeout = timeout;

Modified: accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReaderWriter.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReaderWriter.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReaderWriter.java (original)
+++ accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReaderWriter.java Mon Jan 14 22:03:24 2013
@@ -22,13 +22,11 @@ import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.security.SecurityPermission;
 import java.util.List;
-import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.accumulo.fate.util.UtilWaitThread;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.log4j.Logger;
-import org.apache.zookeeper.AsyncCallback.VoidCallback;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.KeeperException.BadVersionException;
@@ -205,21 +203,5 @@ public class ZooReaderWriter extends Zoo
     putPersistentData(path, new byte[] {}, NodeExistsPolicy.SKIP);
   }
 
-  @Override
-  public void sync(final String path) throws KeeperException, InterruptedException {
-    final AtomicBoolean waiter = new AtomicBoolean(false);
-    getZooKeeper().sync(path, new VoidCallback() {
-      @Override
-      public void processResult(int arg0, String arg1, Object arg2) {
-        synchronized (waiter) {
-          waiter.set(true);
-          waiter.notifyAll();
-        }
-      }}, null);
-    synchronized (waiter) {
-      if (!waiter.get())
-        waiter.wait();
-    }
-  }
-  
+
 }

Modified: accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReservation.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReservation.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReservation.java (original)
+++ accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooReservation.java Mon Jan 14 22:03:24 2013
@@ -16,8 +16,6 @@
  */
 package org.apache.accumulo.fate.zookeeper;
 
-import java.nio.charset.Charset;
-
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.log4j.Logger;
@@ -27,8 +25,6 @@ import org.apache.zookeeper.KeeperExcept
 import org.apache.zookeeper.data.Stat;
 
 public class ZooReservation {
-
-  private static final Charset utf8 = Charset.forName("UTF8");
   
   public static boolean attempt(IZooReaderWriter zk, String path, String reservationID, String debugInfo) throws KeeperException, InterruptedException {
     if (reservationID.contains(":"))
@@ -36,7 +32,7 @@ public class ZooReservation {
     
     while (true) {
       try {
-        zk.putPersistentData(path, (reservationID + ":" + debugInfo).getBytes(utf8), NodeExistsPolicy.FAIL);
+        zk.putPersistentData(path, (reservationID + ":" + debugInfo).getBytes(), NodeExistsPolicy.FAIL);
         return true;
       } catch (NodeExistsException nee) {
         Stat stat = new Stat();

Propchange: accumulo/branches/ACCUMULO-259/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java
------------------------------------------------------------------------------
  Merged /accumulo/branches/1.4/src/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java:r1407157,1423032,1427919,1428054
  Merged /accumulo/trunk/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java:r1404663-1432174,1432271-1433134
  Merged /accumulo/branches/1.4/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooSession.java:r1407301,1423032,1427864

Modified: accumulo/branches/ACCUMULO-259/fate/src/test/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLockTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/fate/src/test/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLockTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/fate/src/test/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLockTest.java (original)
+++ accumulo/branches/ACCUMULO-259/fate/src/test/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLockTest.java Mon Jan 14 22:03:24 2013
@@ -21,7 +21,7 @@ import java.util.TreeMap;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 
-import junit.framework.Assert;
+import static org.junit.Assert.*;
 
 import org.apache.accumulo.fate.zookeeper.DistributedReadWriteLock.QueueLock;
 import org.junit.Test;
@@ -32,7 +32,7 @@ public class DistributedReadWriteLockTes
   public static class MockQueueLock implements QueueLock {
     
     long next = 0L;
-    SortedMap<Long,byte[]> locks = new TreeMap<Long,byte[]>();
+    final SortedMap<Long,byte[]> locks = new TreeMap<Long,byte[]>();
     
     @Override
     synchronized public SortedMap<Long,byte[]> getEarlierEntries(long entry) {
@@ -67,7 +67,7 @@ public class DistributedReadWriteLockTes
     
     void read() {
       for (int i = 0; i < data.length; i++)
-        Assert.assertEquals(counter, data[i]);
+        assertEquals(counter, data[i]);
     }
     
     void write() {
@@ -85,8 +85,8 @@ public class DistributedReadWriteLockTes
     QueueLock qlock = new MockQueueLock();
     
     final ReadWriteLock locker = new DistributedReadWriteLock(qlock, "locker1".getBytes());
-    Lock readLock = locker.readLock();
-    Lock writeLock = locker.writeLock();
+    final Lock readLock = locker.readLock();
+    final Lock writeLock = locker.writeLock();
     readLock.lock();
     readLock.unlock();
     writeLock.lock();
@@ -101,7 +101,7 @@ public class DistributedReadWriteLockTes
       threads[i] = new Thread() {
         public void run() {
           if (which % 2 == 0) {
-            Lock wl = locker.writeLock();
+            final Lock wl = locker.writeLock();
             wl.lock();
             try {
               data.write();
@@ -109,7 +109,7 @@ public class DistributedReadWriteLockTes
               wl.unlock();
             }
           } else {
-            Lock rl = locker.readLock();
+            final Lock rl = locker.readLock();
             rl.lock();
             data.read();
             try {

Modified: accumulo/branches/ACCUMULO-259/fate/src/test/java/org/apache/accumulo/fate/zookeeper/TransactionWatcherTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/fate/src/test/java/org/apache/accumulo/fate/zookeeper/TransactionWatcherTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/fate/src/test/java/org/apache/accumulo/fate/zookeeper/TransactionWatcherTest.java (original)
+++ accumulo/branches/ACCUMULO-259/fate/src/test/java/org/apache/accumulo/fate/zookeeper/TransactionWatcherTest.java Mon Jan 14 22:03:24 2013
@@ -22,7 +22,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Callable;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 
 import org.junit.Test;
 

Propchange: accumulo/branches/ACCUMULO-259/packages/
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Mon Jan 14 22:03:24 2013
@@ -0,0 +1,18 @@
+/accumulo/branches/1.3/packages:1309369,1328076,1330246,1330264,1330944,1349971,1354669
+/accumulo/branches/1.3/src/packages:1309369,1328076,1330246,1349971,1354669
+/accumulo/branches/1.4/packages:1305403-1382577,1382613,1388120,1388629,1393868,1396065,1396572,1396616,1396758,1396772,1397048,1397113,1397117,1397176,1397189,1397383,1397700,1397921,1398286,1398308,1398359,1398393,1398399,1398438,1398514,1398801,1399211,1399717,1400976,1402571,1402682,1407301,1423032,1423629,1427864
+/accumulo/branches/1.4/src/packages:1305403-1356900,1358206,1363430,1364778,1365213,1382566,1382923,1388120,1396772,1397048,1397113,1397117,1397176,1397189,1397383,1397700,1397921,1398286,1398308,1398359,1398393,1398399,1398438,1399211,1400976,1402571,1402682,1407157,1423032,1423624,1427919,1428054,1428098
+/accumulo/branches/1.4.2/packages:1399210,1402681
+/accumulo/branches/1.4.2/src/packages:1399210,1402681
+/accumulo/branches/ACCUMULO-672/packages:1357826,1357829,1357842,1357858,1358236,1359163
+/accumulo/trunk/packages:1343822-1391624,1391755-1398536,1398540-1403334,1404663-1432174,1432271-1433134
+/accumulo/trunk/src/packages:1329425,1332224,1332278,1332347,1333047,1333070,1341000,1342373,1350779,1351691,1356400,1359721
+/incubator/accumulo/branches/1.3/packages:1190280,1190413,1190420,1190427,1190500,1195622,1195625,1195629,1195635,1196044,1196054,1196057,1196071-1196072,1196106,1197066,1198935,1199383,1203683,1204625,1205547,1205880,1206169,1208031,1209124,1209526,1209532,1209539,1209541,1209587,1209657,1210518,1210571,1210596,1210598,1213424,1214320,1225006,1227215,1227231,1227611,1228195,1230180,1230736,1231043,1236873,1245632
+/incubator/accumulo/branches/1.3/src/packages:1190280,1190413,1190420,1190427,1190500,1195622,1195625,1195629,1195635,1196044,1196054,1196057,1196071-1196072,1196106,1197066,1198935,1199383,1203683,1204625,1205547,1205880,1206169,1208031,1209124,1209526,1209532,1209539,1209541,1209587,1209657,1210518,1210571,1210596,1210598,1213424,1214320,1225006,1227215,1227231,1227611,1228195,1230180,1230736,1231043,1236873,1245632
+/incubator/accumulo/branches/1.3.5rc/packages:1209938
+/incubator/accumulo/branches/1.3.5rc/src/packages:1209938
+/incubator/accumulo/branches/1.4/packages:1201902-1305402
+/incubator/accumulo/branches/1.4/src/packages:1201899-1305402
+/incubator/accumulo/branches/1.4.0rc/packages:1304025,1305326
+/incubator/accumulo/branches/1.4.0rc/src/packages:1304025,1305326
+/incubator/accumulo/trunk/src/packages:1178656-1201898,1205476,1205570,1208726,1222413,1222719,1222725,1222733-1222734,1296160-1296495

Modified: accumulo/branches/ACCUMULO-259/packages/deb/accumulo-native/postinst
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/packages/deb/accumulo-native/postinst?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/packages/deb/accumulo-native/postinst (original)
+++ accumulo/branches/ACCUMULO-259/packages/deb/accumulo-native/postinst Mon Jan 14 22:03:24 2013
@@ -16,11 +16,19 @@
 # limitations under the License.
 
 # postinst script for Apache Accumulo native libraries
-if [ -z $JAVA_HOME -o ! -f $JAVA_HOME/jre/bin/ ] ; then
+echo JAVA HOME IS $JAVA_HOME
+if [ -z "$JAVA_HOME" -o ! -f "$JAVA_HOME/jre/bin/" ] ; then
   JAVA_HOME=`readlink -f /usr/bin/java`
   JAVA_HOME=`dirname $JAVA_HOME`
-  JAVA_HOME=`cd $JAVA_HOME/../../; pwd`
+  JAVA_HOME=`cd $JAVA_HOME/../; pwd`
 fi
 export JAVA_HOME
+export PATH=$PATH:$JAVA_HOME/bin
+echo $JAVA_HOME
+echo $PATH
 cd /usr/lib/accumulo/src/server/src/main/c++
 make
+
+if [ $? -ne 0 ]; then
+  echo "Issues building native libraries. Make sure root has javah on it's classpath"
+fi

Modified: accumulo/branches/ACCUMULO-259/packages/deb/accumulo/control
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/packages/deb/accumulo/control?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/packages/deb/accumulo/control (original)
+++ accumulo/branches/ACCUMULO-259/packages/deb/accumulo/control Mon Jan 14 22:03:24 2013
@@ -21,7 +21,7 @@ Package: accumulo
 Version: [[version]]
 Section: misc
 Architecture: all
-Depends: java6-runtime, hadoop, hadoop-zookeeper, coreutils, ssh-server
+Depends: java6-runtime, hadoop, zookeeper, coreutils, ssh-server
 Recommends: accumulo-native
 Description: Accumulo is a large distributed structured store based on Google's BigTable design.
 Distribution: development

Modified: accumulo/branches/ACCUMULO-259/packages/deb/accumulo/postinst
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/packages/deb/accumulo/postinst?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/packages/deb/accumulo/postinst (original)
+++ accumulo/branches/ACCUMULO-259/packages/deb/accumulo/postinst Mon Jan 14 22:03:24 2013
@@ -26,7 +26,7 @@ mkdir -p /var/lib/accumulo/walogs
 ln -fs /var/log/accumulo/logs /usr/lib/accumulo/logs 
 ln -fs /var/lib/accumulo/walogs /usr/lib/accumulo/walogs 
 
-if [ -z $JAVA_HOME -o ! -f $JAVA_HOME/jre/bin/ ] ; then
+if [ -z "$JAVA_HOME" -o ! -f "$JAVA_HOME/jre/bin/" ] ; then
   JAVA_HOME=`readlink -f /usr/bin/java`
   JAVA_HOME=`dirname $JAVA_HOME`
   JAVA_HOME=`cd $JAVA_HOME/../../; pwd`
@@ -37,7 +37,7 @@ fi
 if [ -z $HADOOP_HOME ]; then
   HADOOP_HOME="/usr/lib/hadoop"
 fi
-for files in `ls /etc/accumulo/conf/accumulo-env.sh.*`; do
+for files in `ls /etc/accumulo/conf/examples/*/*/accumulo-env.sh`; do
   sed -i -e "s:/path/to/java:$JAVA_HOME:" $files 
   sed -i -e "s:/path/to/zookeeper:$ZOOKEEPER_HOME:" $files 
   sed -i -e "s:/path/to/hadoop:$HADOOP_HOME:" $files 

Modified: accumulo/branches/ACCUMULO-259/pom.xml
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/pom.xml?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/pom.xml (original)
+++ accumulo/branches/ACCUMULO-259/pom.xml Mon Jan 14 22:03:24 2013
@@ -21,7 +21,7 @@
   <parent>
     <groupId>org.apache</groupId>
     <artifactId>apache</artifactId>
-    <version>10</version>
+    <version>11</version>
   </parent>
   <groupId>org.apache.accumulo</groupId>
   <artifactId>accumulo</artifactId>
@@ -53,6 +53,7 @@
     <module>start</module>
     <module>examples</module>
     <module>assemble</module>
+    <module>test</module>
   </modules>
 
   <build>
@@ -143,7 +144,7 @@
             <configuration>
               <outputDirectory>../lib</outputDirectory>
               <!-- just grab the non-provided runtime dependencies -->
-              <includeArtifactIds>commons-collections,commons-configuration,commons-io,commons-lang,jline,log4j,libthrift,commons-jci-core,commons-jci-fam,commons-logging,commons-logging-api</includeArtifactIds>
+              <includeArtifactIds>commons-collections,commons-configuration,commons-io,commons-lang,jline,log4j,libthrift,commons-logging,commons-logging-api,commons-vfs2,gson,jcommander</includeArtifactIds>
               <excludeTransitive>true</excludeTransitive>
             </configuration>
           </execution>
@@ -194,11 +195,6 @@
       </plugin>
       <plugin>
         <artifactId>maven-surefire-plugin</artifactId>
-        <configuration>
-          <environmentVariables>
-            <ACCUMULO_HOME>..</ACCUMULO_HOME>
-          </environmentVariables>
-        </configuration>
       </plugin>
     </plugins>
 
@@ -241,7 +237,7 @@
         </plugin>
         <plugin>
           <artifactId>maven-javadoc-plugin</artifactId>
-          <version>2.8.1</version>
+          <version>2.9</version>
         </plugin>
         <plugin>
           <artifactId>maven-surefire-plugin</artifactId>
@@ -337,7 +333,7 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-site-plugin</artifactId>
-          <version>3.0</version>
+          <version>3.2</version>
           <executions>
             <execution>
               <id>attach-descriptor</id>
@@ -356,7 +352,7 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-changes-plugin</artifactId>
-        <version>2.7.1</version>
+        <version>2.8</version>
         <configuration>
           <onlyCurrentVersion>true</onlyCurrentVersion>
           <statusIds>closed,resolved</statusIds>
@@ -387,19 +383,8 @@
       </plugin>
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
-        <artifactId>cobertura-maven-plugin</artifactId>
-        <version>2.5.2</version>
-        <configuration>
-          <formats>
-            <format>xml</format>
-            <format>html</format>
-          </formats>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
         <artifactId>findbugs-maven-plugin</artifactId>
-        <version>2.4.0</version>
+        <version>2.5.2</version>
         <configuration>
           <findbugsXmlOutput>true</findbugsXmlOutput>
           <findbugsXmlWithMessages>true</findbugsXmlWithMessages>
@@ -411,12 +396,19 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-javadoc-plugin</artifactId>
-        <version>2.8.1</version>
+        <version>2.9</version>
+        <reportSets>
+          <reportSet>
+            <reports>
+              <report>javadoc</report>
+            </reports>
+          </reportSet>
+        </reportSets>
       </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-project-info-reports-plugin</artifactId>
-        <version>2.4</version>
+        <version>2.6</version>
         <configuration>
           <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
         </configuration>
@@ -483,6 +475,80 @@
         </plugins>
       </build>
     </profile>
+    <profile>
+      <id>cobertura</id>
+      <reporting>
+        <plugins>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>cobertura-maven-plugin</artifactId>
+            <version>2.5.2</version>
+            <configuration>
+              <formats>
+                <format>xml</format>
+                <format>html</format>
+              </formats>
+            </configuration>
+          </plugin>
+        </plugins>
+      </reporting>
+    </profile>
+    <!-- profile for building against Hadoop 1.0.x
+    Activate by not specifying hadoop.profile -->
+    <profile>
+      <id>hadoop-1.0</id>
+      <activation>
+        <property>
+          <name>!hadoop.profile</name>
+        </property>
+      </activation>
+      <properties>
+        <hadoop.version>1.0.4</hadoop.version>
+        <slf4j.version>1.4.3</slf4j.version>
+      </properties>
+      <dependencyManagement>
+        <dependencies>
+          <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-core</artifactId>
+            <version>${hadoop.version}</version>
+            <scope>provided</scope>
+          </dependency>
+        </dependencies>
+      </dependencyManagement>
+    </profile>
+    <!-- profile for building against Hadoop 2.0.x
+    Activate using: mvn -Dhadoop.profile=2.0 -->
+    <profile>
+      <id>hadoop-2.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>2.0</value>
+        </property>
+      </activation>
+      <properties>
+        <slf4j.version>1.6.1</slf4j.version>
+        <hadoop.version>2.0.2-alpha</hadoop.version>
+        <avro.version>1.5.3</avro.version>
+      </properties>
+      <dependencyManagement>
+        <dependencies>
+          <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-client</artifactId>
+            <version>${hadoop.version}</version>
+            <scope>provided</scope>
+          </dependency>
+          <dependency>
+            <groupId>org.apache.avro</groupId>
+            <artifactId>avro</artifactId>
+            <version>${avro.version}</version>
+            <scope>provided</scope>
+          </dependency>
+        </dependencies>
+      </dependencyManagement>
+    </profile>
   </profiles>
 
   <dependencyManagement>
@@ -491,21 +557,15 @@
       <dependency>
         <groupId>junit</groupId>
         <artifactId>junit</artifactId>
-        <version>4.4</version>
+        <version>4.11</version>
         <scope>test</scope>
       </dependency>
 
       <!-- provided dependencies needed at runtime -->
       <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-core</artifactId>
-        <version>0.20.205.0</version>
-        <scope>provided</scope>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
+        <groupId>org.apache.zookeeper</groupId>
         <artifactId>zookeeper</artifactId>
-        <version>3.3.1</version>
+        <version>${zookeeper.version}</version>
         <scope>provided</scope>
       </dependency>
       <dependency>
@@ -517,7 +577,7 @@
       <dependency>
         <groupId>org.mortbay.jetty</groupId>
         <artifactId>jetty</artifactId>
-        <version>[5.1,7.0)</version>
+        <version>[6.1,7.0)</version>
         <scope>provided</scope>
       </dependency>
 
@@ -549,6 +609,11 @@
       </dependency>
       <dependency>
         <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-test</artifactId>
+        <version>1.5.0-SNAPSHOT</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
         <artifactId>examples-simple</artifactId>
         <version>1.5.0-SNAPSHOT</version>
       </dependency>
@@ -625,12 +690,12 @@
       <dependency>
         <groupId>org.slf4j</groupId>
         <artifactId>slf4j-api</artifactId>
-        <version>1.4.3</version>
+        <version>${slf4j.version}</version>
       </dependency>
       <dependency>
         <groupId>org.slf4j</groupId>
         <artifactId>slf4j-log4j12</artifactId>
-        <version>1.4.3</version>
+        <version>${slf4j.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.commons</groupId>
@@ -642,6 +707,16 @@
         <artifactId>commons-jci-fam</artifactId>
         <version>1.0</version>
       </dependency>
+      <dependency>
+      	<groupId>com.beust</groupId>
+      	<artifactId>jcommander</artifactId>
+      	<version>1.30</version>
+      </dependency>
+      <dependency>
+        <groupId>com.google.code.gson</groupId>
+        <artifactId>gson</artifactId>
+        <version>2.2.2</version>
+      </dependency>
     </dependencies>
   </dependencyManagement>
 
@@ -675,6 +750,7 @@
 
   <properties>
     <targetJdk>1.6</targetJdk>
+    <zookeeper.version>3.3.1</zookeeper.version>
   </properties>
 
 </project>

Propchange: accumulo/branches/ACCUMULO-259/server/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Mon Jan 14 22:03:24 2013
@@ -1,7 +1,5 @@
 target
-
 .project
-
 .settings
-
 .classpath
+accumulo-server.iml

Propchange: accumulo/branches/ACCUMULO-259/server/
------------------------------------------------------------------------------
  Merged /accumulo/branches/1.4/server:r1407301,1423032,1423629,1427864
  Merged /accumulo/branches/1.4/src/server:r1407157,1423032,1423624,1427919,1428054
  Merged /accumulo/trunk/server:r1404663-1432174,1432271-1433134

Modified: accumulo/branches/ACCUMULO-259/server/pom.xml
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/pom.xml?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/pom.xml (original)
+++ accumulo/branches/ACCUMULO-259/server/pom.xml Mon Jan 14 22:03:24 2013
@@ -66,11 +66,7 @@
       <artifactId>libthrift</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
+      <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
     </dependency>
     <dependency>
@@ -109,6 +105,14 @@
       <groupId>commons-lang</groupId>
       <artifactId>commons-lang</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jetty</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.code.gson</groupId>
+      <artifactId>gson</artifactId>
+     </dependency>
   </dependencies>
 
   <profiles>
@@ -136,6 +140,43 @@
         </plugins>
       </build>
     </profile>
+    <!-- profile for building against Hadoop 1.0.x
+    Activate by not specifying hadoop.profile -->
+    <profile>
+      <id>hadoop-1.0</id>
+      <activation>
+        <property>
+          <name>!hadoop.profile</name>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-core</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+    <!-- profile for building against Hadoop 2.0.x
+    Activate using: mvn -Dhadoop.profile=2.0 -->
+    <profile>
+      <id>hadoop-2.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>2.0</value>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-client</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.avro</groupId>
+          <artifactId>avro</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
   </profiles>
 
 </project>

Propchange: accumulo/branches/ACCUMULO-259/server/src/main/c++/nativeMap/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Mon Jan 14 22:03:24 2013
@@ -1 +1,2 @@
 *.jnilib
+libNativeMap-Linux-amd64-64.so

Modified: accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java (original)
+++ accumulo/branches/ACCUMULO-259/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java Mon Jan 14 22:03:24 2013
@@ -431,7 +431,7 @@ public class BulkImporter {
   }
   
   private class AssignmentTask implements Runnable {
-    Map<Path,List<KeyExtent>> assignmentFailures;
+    final Map<Path,List<KeyExtent>> assignmentFailures;
     String location;
     AuthInfo credentials;
     private Map<KeyExtent,List<PathSize>> assignmentsPerTablet;
@@ -578,7 +578,8 @@ public class BulkImporter {
   private List<KeyExtent> assignMapFiles(AuthInfo credentials, String location, Map<KeyExtent,List<PathSize>> assignmentsPerTablet) throws AccumuloException,
       AccumuloSecurityException {
     try {
-      TabletClientService.Iface client = ThriftUtil.getTServerClient(location, instance.getConfiguration());
+      long timeInMillis = instance.getConfiguration().getTimeInMillis(Property.TSERV_BULK_TIMEOUT);
+      TabletClientService.Iface client = ThriftUtil.getTServerClient(location, instance.getConfiguration(), timeInMillis);
       try {
         HashMap<KeyExtent,Map<String,org.apache.accumulo.core.data.thrift.MapFileInfo>> files = new HashMap<KeyExtent,Map<String,org.apache.accumulo.core.data.thrift.MapFileInfo>>();
         for (Entry<KeyExtent,List<PathSize>> entry : assignmentsPerTablet.entrySet()) {



Mime
View raw message