accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [24/66] [abbrv] accumulo git commit: ACCUMULO-3451 Format master branch (1.7.0-SNAPSHOT)
Date Fri, 09 Jan 2015 02:44:28 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java
index 52ea0bd..e899ff5 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java
@@ -49,13 +49,13 @@ public class FileDataIngest {
   public static final String REFS_FILE_EXT = "filext";
   public static final ByteSequence CHUNK_CF_BS = new ArrayByteSequence(CHUNK_CF.getBytes(), 0, CHUNK_CF.getLength());
   public static final ByteSequence REFS_CF_BS = new ArrayByteSequence(REFS_CF.getBytes(), 0, REFS_CF.getLength());
-  
+
   int chunkSize;
   byte[] chunkSizeBytes;
   byte[] buf;
   MessageDigest md5digest;
   ColumnVisibility cv;
-  
+
   public FileDataIngest(int chunkSize, ColumnVisibility colvis) {
     this.chunkSize = chunkSize;
     chunkSizeBytes = intToBytes(chunkSize);
@@ -67,35 +67,35 @@ public class FileDataIngest {
     }
     cv = colvis;
   }
-  
+
   public String insertFileData(String filename, BatchWriter bw) throws MutationsRejectedException, IOException {
     if (chunkSize == 0)
       return "";
     md5digest.reset();
     String uid = hexString(md5digest.digest(filename.getBytes()));
-    
+
     // read through file once, calculating hashes
     md5digest.reset();
     InputStream fis = null;
     int numRead = 0;
     try {
-	    fis = new FileInputStream(filename);
-	    numRead = fis.read(buf);
-	    while (numRead >= 0) {
-	      if (numRead > 0) {
-	        md5digest.update(buf, 0, numRead);
-	      }
-	      numRead = fis.read(buf);
-	    }
+      fis = new FileInputStream(filename);
+      numRead = fis.read(buf);
+      while (numRead >= 0) {
+        if (numRead > 0) {
+          md5digest.update(buf, 0, numRead);
+        }
+        numRead = fis.read(buf);
+      }
     } finally {
       if (fis != null) {
-    	  fis.close();
+        fis.close();
       }
     }
-    
+
     String hash = hexString(md5digest.digest());
     Text row = new Text(hash);
-    
+
     // write info to accumulo
     Mutation m = new Mutation(row);
     m.put(REFS_CF, KeyUtil.buildNullSepText(uid, REFS_ORIG_FILE), cv, new Value(filename.getBytes()));
@@ -103,34 +103,34 @@ public class FileDataIngest {
     if (fext != null)
       m.put(REFS_CF, KeyUtil.buildNullSepText(uid, REFS_FILE_EXT), cv, new Value(fext.getBytes()));
     bw.addMutation(m);
-    
+
     // read through file again, writing chunks to accumulo
     int chunkCount = 0;
     try {
-	    fis = new FileInputStream(filename);
-	    numRead = fis.read(buf);
-	    while (numRead >= 0) {
-	      while (numRead < buf.length) {
-	        int moreRead = fis.read(buf, numRead, buf.length - numRead);
-	        if (moreRead > 0)
-	          numRead += moreRead;
-	        else if (moreRead < 0)
-	          break;
-	      }
-	      m = new Mutation(row);
-	      Text chunkCQ = new Text(chunkSizeBytes);
-	      chunkCQ.append(intToBytes(chunkCount), 0, 4);
-	      m.put(CHUNK_CF, chunkCQ, cv, new Value(buf, 0, numRead));
-	      bw.addMutation(m);
-	      if (chunkCount == Integer.MAX_VALUE)
-	        throw new RuntimeException("too many chunks for file " + filename + ", try raising chunk size");
-	      chunkCount++;
-	      numRead = fis.read(buf);
-	    }
+      fis = new FileInputStream(filename);
+      numRead = fis.read(buf);
+      while (numRead >= 0) {
+        while (numRead < buf.length) {
+          int moreRead = fis.read(buf, numRead, buf.length - numRead);
+          if (moreRead > 0)
+            numRead += moreRead;
+          else if (moreRead < 0)
+            break;
+        }
+        m = new Mutation(row);
+        Text chunkCQ = new Text(chunkSizeBytes);
+        chunkCQ.append(intToBytes(chunkCount), 0, 4);
+        m.put(CHUNK_CF, chunkCQ, cv, new Value(buf, 0, numRead));
+        bw.addMutation(m);
+        if (chunkCount == Integer.MAX_VALUE)
+          throw new RuntimeException("too many chunks for file " + filename + ", try raising chunk size");
+        chunkCount++;
+        numRead = fis.read(buf);
+      }
     } finally {
-    	if (fis != null) {
-    		fis.close();
-    	}
+      if (fis != null) {
+        fis.close();
+      }
     }
     m = new Mutation(row);
     Text chunkCQ = new Text(chunkSizeBytes);
@@ -139,14 +139,14 @@ public class FileDataIngest {
     bw.addMutation(m);
     return hash;
   }
-  
+
   public static int bytesToInt(byte[] b, int offset) {
     if (b.length <= offset + 3)
       throw new NumberFormatException("couldn't pull integer from bytes at offset " + offset);
     int i = (((b[offset] & 255) << 24) + ((b[offset + 1] & 255) << 16) + ((b[offset + 2] & 255) << 8) + ((b[offset + 3] & 255) << 0));
     return i;
   }
-  
+
   public static byte[] intToBytes(int l) {
     byte[] b = new byte[4];
     b[0] = (byte) (l >>> 24);
@@ -155,13 +155,13 @@ public class FileDataIngest {
     b[3] = (byte) (l >>> 0);
     return b;
   }
-  
+
   private static String getExt(String filename) {
     if (filename.indexOf(".") == -1)
       return null;
     return filename.substring(filename.lastIndexOf(".") + 1);
   }
-  
+
   public String hexString(byte[] bytes) {
     StringBuilder sb = new StringBuilder();
     for (byte b : bytes) {
@@ -169,24 +169,23 @@ public class FileDataIngest {
     }
     return sb.toString();
   }
-  
+
   public static class Opts extends ClientOnRequiredTable {
-    @Parameter(names="--vis", description="use a given visibility for the new counts", converter=VisibilityConverter.class)
+    @Parameter(names = "--vis", description = "use a given visibility for the new counts", converter = VisibilityConverter.class)
     ColumnVisibility visibility = new ColumnVisibility();
-    
-    @Parameter(names="--chunk", description="size of the chunks used to store partial files")
-    int chunkSize = 64*1024;
-    
-    @Parameter(description="<file> { <file> ... }")
+
+    @Parameter(names = "--chunk", description = "size of the chunks used to store partial files")
+    int chunkSize = 64 * 1024;
+
+    @Parameter(description = "<file> { <file> ... }")
     List<String> files = new ArrayList<String>();
   }
-  
-  
+
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(FileDataIngest.class.getName(), args, bwOpts);
-    
+
     Connector conn = opts.getConnector();
     if (!conn.tableOperations().exists(opts.getTableName())) {
       conn.tableOperations().create(opts.getTableName());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java
index ee55fcc..75e32ae 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java
@@ -44,20 +44,20 @@ public class FileDataQuery {
   List<Entry<Key,Value>> lastRefs;
   private ChunkInputStream cis;
   Scanner scanner;
-  
-  public FileDataQuery(String instanceName, String zooKeepers, String user, AuthenticationToken token, String tableName, Authorizations auths) throws AccumuloException,
-      AccumuloSecurityException, TableNotFoundException {
+
+  public FileDataQuery(String instanceName, String zooKeepers, String user, AuthenticationToken token, String tableName, Authorizations auths)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     ZooKeeperInstance instance = new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zooKeepers));
     conn = instance.getConnector(user, token);
     lastRefs = new ArrayList<Entry<Key,Value>>();
     cis = new ChunkInputStream();
     scanner = conn.createScanner(tableName, auths);
   }
-  
+
   public List<Entry<Key,Value>> getLastRefs() {
     return lastRefs;
   }
-  
+
   public ChunkInputStream getData(String hash) throws IOException {
     scanner.setRange(new Range(hash));
     scanner.setBatchSize(1);
@@ -73,7 +73,7 @@ public class FileDataQuery {
     cis.setSource(pi);
     return cis;
   }
-  
+
   public String getSomeData(String hash, int numBytes) throws IOException {
     ChunkInputStream is = getData(hash);
     byte[] buf = new byte[numBytes];

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/KeyUtil.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/KeyUtil.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/KeyUtil.java
index d0ebcb0..2f09785 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/KeyUtil.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/KeyUtil.java
@@ -25,10 +25,10 @@ import org.apache.hadoop.io.Text;
  */
 public class KeyUtil {
   public static final byte[] nullbyte = new byte[] {0};
-  
+
   /**
    * Join some number of strings using a null byte separator into a text object.
-   * 
+   *
    * @param s
    *          strings
    * @return a text object containing the strings separated by null bytes
@@ -41,10 +41,10 @@ public class KeyUtil {
     }
     return t;
   }
-  
+
   /**
    * Split a text object using a null byte separator into an array of strings.
-   * 
+   *
    * @param t
    *          null-byte separated text object
    * @return an array of strings

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/VisibilityCombiner.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/VisibilityCombiner.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/VisibilityCombiner.java
index a3e2bcc..ab2e7fc 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/VisibilityCombiner.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/VisibilityCombiner.java
@@ -24,16 +24,16 @@ import org.apache.accumulo.core.data.ByteSequence;
  * A utility for merging visibilities into the form {@code (VIS1)|(VIS2)|...|(VISN)}. Used by the {@link ChunkCombiner}.
  */
 public class VisibilityCombiner {
-  
+
   private TreeSet<String> visibilities = new TreeSet<String>();
-  
+
   void add(ByteSequence cv) {
     if (cv.length() == 0)
       return;
-    
+
     int depth = 0;
     int offset = 0;
-    
+
     for (int i = 0; i < cv.length(); i++) {
       switch (cv.byteAt(i)) {
         case '(':
@@ -49,25 +49,25 @@ public class VisibilityCombiner {
             insert(cv.subSequence(offset, i));
             offset = i + 1;
           }
-          
+
           break;
       }
     }
-    
+
     insert(cv.subSequence(offset, cv.length()));
-    
+
     if (depth != 0)
       throw new IllegalArgumentException("Invalid vis " + cv);
-    
+
   }
-  
+
   private void insert(ByteSequence cv) {
     for (int i = 0; i < cv.length(); i++) {
-      
+
     }
-    
+
     String cvs = cv.toString();
-    
+
     if (cvs.charAt(0) != '(')
       cvs = "(" + cvs + ")";
     else {
@@ -85,14 +85,14 @@ public class VisibilityCombiner {
             break;
         }
       }
-      
+
       if (depthZeroCloses > 1)
         cvs = "(" + cvs + ")";
     }
-    
+
     visibilities.add(cvs);
   }
-  
+
   byte[] get() {
     StringBuilder sb = new StringBuilder();
     String sep = "";
@@ -101,7 +101,7 @@ public class VisibilityCombiner {
       sep = "|";
       sb.append(cvs);
     }
-    
+
     return sb.toString().getBytes();
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java
index 74d8548..0e60086 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java
@@ -34,20 +34,20 @@ import org.apache.hadoop.io.Text;
  * Inserts 10K rows (50K entries) into accumulo with each row having 5 entries.
  */
 public class InsertWithBatchWriter {
-  
+
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, MutationsRejectedException, TableExistsException,
       TableNotFoundException {
     ClientOnRequiredTable opts = new ClientOnRequiredTable();
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(InsertWithBatchWriter.class.getName(), args, bwOpts);
-    
+
     Connector connector = opts.getConnector();
     MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(bwOpts.getBatchWriterConfig());
-    
+
     if (!connector.tableOperations().exists(opts.getTableName()))
       connector.tableOperations().create(opts.getTableName());
     BatchWriter bw = mtbw.getBatchWriter(opts.getTableName());
-    
+
     Text colf = new Text("colfam");
     System.out.println("writing ...");
     for (int i = 0; i < 10000; i++) {
@@ -61,5 +61,5 @@ public class InsertWithBatchWriter {
     }
     mtbw.close();
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java
index 4eaa31f..041c15f 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java
@@ -38,13 +38,13 @@ import com.beust.jcommander.Parameter;
  * Reads all data between two rows; all data after a given row; or all data in a table, depending on the number of arguments given.
  */
 public class ReadData {
-  
+
   private static final Logger log = Logger.getLogger(ReadData.class);
-  
+
   static class Opts extends ClientOnRequiredTable {
-    @Parameter(names="--startKey")
+    @Parameter(names = "--startKey")
     String startKey;
-    @Parameter(names="--endKey")
+    @Parameter(names = "--endKey")
     String endKey;
   }
 
@@ -52,9 +52,9 @@ public class ReadData {
     Opts opts = new Opts();
     ScannerOpts scanOpts = new ScannerOpts();
     opts.parseArgs(ReadData.class.getName(), args, scanOpts);
-    
+
     Connector connector = opts.getConnector();
-    
+
     Scanner scan = connector.createScanner(opts.getTableName(), opts.auths);
     scan.setBatchSize(scanOpts.scanBatchSize);
     Key start = null;
@@ -65,7 +65,7 @@ public class ReadData {
       end = new Key(new Text(opts.endKey));
     scan.setRange(new Range(start, end));
     Iterator<Entry<Key,Value>> iter = scan.iterator();
-    
+
     while (iter.hasNext()) {
       Entry<Key,Value> e = iter.next();
       Text colf = e.getKey().getColumnFamily();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java
index fd6d159..bbfca48 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java
@@ -38,43 +38,43 @@ import com.beust.jcommander.Parameter;
 /**
  * This example shows how a concurrent reader and writer can interfere with each other. It creates two threads that run forever reading and writing to the same
  * table.
- * 
+ *
  * When the example is run with isolation enabled, no interference will be observed.
- * 
+ *
  * When the example is run with out isolation, the reader will see partial mutations of a row.
- * 
+ *
  */
 
 public class InterferenceTest {
-  
+
   private static final int NUM_ROWS = 500;
   private static final int NUM_COLUMNS = 113; // scanner batches 1000 by default, so make num columns not a multiple of 10
   private static final Logger log = Logger.getLogger(InterferenceTest.class);
-  
+
   static class Writer implements Runnable {
-    
+
     private final BatchWriter bw;
     private final long iterations;
-    
+
     Writer(BatchWriter bw, long iterations) {
       this.bw = bw;
-      this.iterations = iterations; 
+      this.iterations = iterations;
     }
-    
+
     @Override
     public void run() {
       int row = 0;
       int value = 0;
-      
+
       for (long i = 0; i < iterations; i++) {
         Mutation m = new Mutation(new Text(String.format("%03d", row)));
         row = (row + 1) % NUM_ROWS;
-        
+
         for (int cq = 0; cq < NUM_COLUMNS; cq++)
           m.put(new Text("000"), new Text(String.format("%04d", cq)), new Value(("" + value).getBytes()));
-        
+
         value++;
-        
+
         try {
           bw.addMutation(m);
         } catch (MutationsRejectedException e) {
@@ -89,80 +89,79 @@ public class InterferenceTest {
       }
     }
   }
-  
+
   static class Reader implements Runnable {
-    
+
     private Scanner scanner;
     volatile boolean stop = false;
-    
+
     Reader(Scanner scanner) {
       this.scanner = scanner;
     }
-    
+
     @Override
     public void run() {
       while (!stop) {
         ByteSequence row = null;
         int count = 0;
-        
+
         // all columns in a row should have the same value,
         // use this hash set to track that
         HashSet<String> values = new HashSet<String>();
-        
+
         for (Entry<Key,Value> entry : scanner) {
           if (row == null)
             row = entry.getKey().getRowData();
-          
+
           if (!row.equals(entry.getKey().getRowData())) {
             if (count != NUM_COLUMNS)
               System.err.println("ERROR Did not see " + NUM_COLUMNS + " columns in row " + row);
-            
+
             if (values.size() > 1)
               System.err.println("ERROR Columns in row " + row + " had multiple values " + values);
-            
+
             row = entry.getKey().getRowData();
             count = 0;
             values.clear();
           }
-          
+
           count++;
-          
+
           values.add(entry.getValue().toString());
         }
-        
+
         if (count > 0 && count != NUM_COLUMNS)
           System.err.println("ERROR Did not see " + NUM_COLUMNS + " columns in row " + row);
-        
+
         if (values.size() > 1)
           System.err.println("ERROR Columns in row " + row + " had multiple values " + values);
       }
     }
-    
+
     public void stopNow() {
       stop = true;
     }
   }
-  
+
   static class Opts extends ClientOnRequiredTable {
-    @Parameter(names="--iterations", description="number of times to run", required=true)
+    @Parameter(names = "--iterations", description = "number of times to run", required = true)
     long iterations = 0;
-    @Parameter(names="--isolated", description="use isolated scans")
+    @Parameter(names = "--isolated", description = "use isolated scans")
     boolean isolated = false;
   }
-  
-  
+
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(InterferenceTest.class.getName(), args, bwOpts);
-    
+
     if (opts.iterations < 1)
       opts.iterations = Long.MAX_VALUE;
-    
+
     Connector conn = opts.getConnector();
     if (!conn.tableOperations().exists(opts.getTableName()))
       conn.tableOperations().create(opts.getTableName());
-    
+
     Thread writer = new Thread(new Writer(conn.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig()), opts.iterations));
     writer.start();
     Reader r;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
index 351a51c..01fbb8f 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
@@ -102,7 +102,7 @@ public class TableToFile extends Configured implements Tool {
   }
 
   /**
-   * 
+   *
    * @param args
    *          instanceName zookeepers username password table columns outputpath
    */

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
index ade6ce1..8c48877 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License. You may obtain a copy of the License at
- * 
+ *
  * http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -59,12 +59,12 @@ import com.beust.jcommander.Parameter;
  * <li>The rowid is the right justified row id as a int.
  * <li>The filler consists of 7 runs of 10 characters from 'A' to 'Z'.
  * </ul>
- * 
+ *
  * This TeraSort is slightly modified to allow for variable length key sizes and value sizes. The row length isn't variable. To generate a terabyte of data in
  * the same way TeraSort does use 10000000000 rows and 10/10 byte key length and 78/78 byte value length. Along with the 10 byte row id and \r\n this gives you
  * 100 byte row * 10000000000 rows = 1tb. Min/Max ranges for key and value parameters are inclusive/inclusive respectively.
- * 
- * 
+ *
+ *
  */
 public class TeraSortIngest extends Configured implements Tool {
   /**
@@ -202,7 +202,7 @@ public class TeraSortIngest extends Configured implements Tool {
 
     /**
      * Start the random number generator on the given iteration.
-     * 
+     *
      * @param initalIteration
      *          the iteration number to start on
      */
@@ -290,7 +290,7 @@ public class TeraSortIngest extends Configured implements Tool {
 
     /**
      * Add the required filler bytes. Each row consists of 7 blocks of 10 characters and 1 block of 8 characters.
-     * 
+     *
      * @param rowId
      *          the current row number
      */

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java
index 7bb7e69..7822910 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java
@@ -36,10 +36,10 @@ import org.apache.log4j.Logger;
 /**
  * A simple map reduce job that inserts word counts into accumulo. See the README for instructions on how to run this. This version does not use the ClientOpts
  * class to parse arguments as an example of using AccumuloInputFormat and AccumuloOutputFormat directly. See README.mapred for more details.
- * 
+ *
  */
 public class TokenFileWordCount extends Configured implements Tool {
-  
+
   private static final Logger log = Logger.getLogger(TokenFileWordCount.class);
 
   public static class MapClass extends Mapper<LongWritable,Text,Text,Mutation> {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java
index 4f1f861..8ead101 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java
@@ -32,18 +32,18 @@ import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Logger;
 
 import com.beust.jcommander.Parameter;
-import org.apache.log4j.Logger;
 
 /**
  * A simple map reduce job that inserts word counts into accumulo. See the README for instructions on how to run this.
- * 
+ *
  */
 public class WordCount extends Configured implements Tool {
 
   private static final Logger log = Logger.getLogger(WordCount.class);
-  
+
   static class Opts extends MapReduceClientOnRequiredTable {
     @Parameter(names = "--input", description = "input directory")
     String inputDirectory;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java
index ac96e9d..8651c39 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java
@@ -27,12 +27,12 @@ import org.apache.hadoop.io.Text;
 import com.beust.jcommander.Parameter;
 
 public class SetupTable {
-  
+
   static class Opts extends ClientOnRequiredTable {
-    @Parameter(description="<split> { <split> ... } ")
+    @Parameter(description = "<split> { <split> ... } ")
     List<String> splits = new ArrayList<String>();
   }
-  
+
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     opts.parseArgs(SetupTable.class.getName(), args);
@@ -45,6 +45,6 @@ public class SetupTable {
         intialPartitions.add(new Text(split));
       }
       conn.tableOperations().addSplits(opts.getTableName(), intialPartitions);
-    } 
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java
index 61d3f7e..fd54058 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java
@@ -35,52 +35,52 @@ import com.beust.jcommander.Parameter;
 
 public class VerifyIngest {
   private static final Logger log = Logger.getLogger(VerifyIngest.class);
-  
+
   static class Opts extends ClientOnRequiredTable {
-    @Parameter(names="--start-row")
+    @Parameter(names = "--start-row")
     int startRow = 0;
-    @Parameter(names="--count", required=true, description="number of rows to verify")
+    @Parameter(names = "--count", required = true, description = "number of rows to verify")
     int numRows = 0;
   }
-  
+
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     Opts opts = new Opts();
     opts.parseArgs(VerifyIngest.class.getName(), args);
-    
+
     Connector connector = opts.getConnector();
     Scanner scanner = connector.createScanner(opts.getTableName(), opts.auths);
-    
+
     scanner.setRange(new Range(new Text(String.format("row_%08d", opts.startRow)), null));
-    
+
     Iterator<Entry<Key,Value>> si = scanner.iterator();
-    
+
     boolean ok = true;
-    
+
     for (int i = opts.startRow; i < opts.numRows; i++) {
-      
+
       if (si.hasNext()) {
         Entry<Key,Value> entry = si.next();
-        
+
         if (!entry.getKey().getRow().toString().equals(String.format("row_%08d", i))) {
           log.error("unexpected row key " + entry.getKey().getRow().toString() + " expected " + String.format("row_%08d", i));
           ok = false;
         }
-        
+
         if (!entry.getValue().toString().equals(String.format("value_%08d", i))) {
           log.error("unexpected value " + entry.getValue().toString() + " expected " + String.format("value_%08d", i));
           ok = false;
         }
-        
+
       } else {
         log.error("no more rows, expected " + String.format("row_%08d", i));
         ok = false;
         break;
       }
-      
+
     }
-    
+
     if (ok)
       System.out.println("OK");
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/reservations/ARS.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/reservations/ARS.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/reservations/ARS.java
index e9dc2aa..6f47abd 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/reservations/ARS.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/reservations/ARS.java
@@ -22,9 +22,9 @@ import java.util.Map.Entry;
 
 import jline.console.ConsoleReader;
 
+import org.apache.accumulo.core.client.ClientConfiguration;
 import org.apache.accumulo.core.client.ConditionalWriter;
 import org.apache.accumulo.core.client.ConditionalWriter.Status;
-import org.apache.accumulo.core.client.ClientConfiguration;
 import org.apache.accumulo.core.client.ConditionalWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IsolatedScanner;
@@ -52,43 +52,43 @@ import org.apache.log4j.Logger;
 // 10 threads per node.
 
 public class ARS {
-  
+
   private static final Logger log = Logger.getLogger(ARS.class);
-  
+
   private Connector conn;
   private String rTable;
-  
+
   public enum ReservationResult {
     RESERVED, WAIT_LISTED
   }
-  
+
   public ARS(Connector conn, String rTable) {
     this.conn = conn;
     this.rTable = rTable;
   }
-  
+
   public List<String> setCapacity(String what, String when, int count) {
     // EXCERCISE implement this method which atomically sets a capacity and returns anyone who was moved to the wait list if the capacity was decreased
-    
+
     throw new UnsupportedOperationException();
   }
-  
+
   public ReservationResult reserve(String what, String when, String who) throws Exception {
-    
+
     String row = what + ":" + when;
-    
+
     // EXCERCISE This code assumes there is no reservation and tries to create one. If a reservation exist then the update will fail. This is a good strategy
     // when it is expected there are usually no reservations. Could modify the code to scan first.
-    
+
     // The following mutation requires that the column tx:seq does not exist and will fail if it does.
     ConditionalMutation update = new ConditionalMutation(row, new Condition("tx", "seq"));
     update.put("tx", "seq", "0");
     update.put("res", String.format("%04d", 0), who);
-    
+
     ReservationResult result = ReservationResult.RESERVED;
-    
+
     ConditionalWriter cwriter = conn.createConditionalWriter(rTable, new ConditionalWriterConfig());
-    
+
     try {
       while (true) {
         Status status = cwriter.write(update).getStatus();
@@ -102,24 +102,24 @@ public class ARS {
           default:
             throw new RuntimeException("Unexpected status " + status);
         }
-        
+
         // EXCERCISE in the case of many threads trying to reserve a slot, this approach of immediately retrying is inefficient. Exponential back-off is good
         // general solution to solve contention problems like this. However in this particular case, exponential back-off could penalize the earliest threads
         // that attempted to make a reservation by putting them later in the list. A more complex solution could involve having independent sub-queues within
         // the row that approximately maintain arrival order and use exponential back off to fairly merge the sub-queues into the main queue.
-        
+
         // it is important to use an isolated scanner so that only whole mutations are seen
         Scanner scanner = new IsolatedScanner(conn.createScanner(rTable, Authorizations.EMPTY));
         scanner.setRange(new Range(row));
-        
+
         int seq = -1;
         int maxReservation = -1;
-        
+
         for (Entry<Key,Value> entry : scanner) {
           String cf = entry.getKey().getColumnFamilyData().toString();
           String cq = entry.getKey().getColumnQualifierData().toString();
           String val = entry.getValue().toString();
-          
+
           if (cf.equals("tx") && cq.equals("seq")) {
             seq = Integer.parseInt(val);
           } else if (cf.equals("res")) {
@@ -130,21 +130,21 @@ public class ARS {
                 return ReservationResult.RESERVED; // already have the first reservation
               else
                 return ReservationResult.WAIT_LISTED; // already on wait list
-                
+
             // EXCERCISE the way this code finds the max reservation is very inefficient.... it would be better if it did not have to scan the entire row.
             // One possibility is to just use the sequence number. Could also consider sorting the data in another way and/or using an iterator.
             maxReservation = Integer.parseInt(cq);
           }
         }
-        
+
         Condition condition = new Condition("tx", "seq");
         if (seq >= 0)
           condition.setValue(seq + ""); // only expect a seq # if one was seen
-          
+
         update = new ConditionalMutation(row, condition);
         update.put("tx", "seq", (seq + 1) + "");
         update.put("res", String.format("%04d", maxReservation + 1), who);
-        
+
         // EXCERCISE if set capacity is implemented, then result should take capacity into account
         if (maxReservation == -1)
           result = ReservationResult.RESERVED; // if successful, will be first reservation
@@ -154,48 +154,48 @@ public class ARS {
     } finally {
       cwriter.close();
     }
-    
+
   }
-  
+
   public void cancel(String what, String when, String who) throws Exception {
-    
+
     String row = what + ":" + when;
-    
+
     // Even though this method is only deleting a column, its important to use a conditional writer. By updating the seq # when deleting a reservation, it
     // will cause any concurrent reservations to retry. If this delete were done using a batch writer, then a concurrent reservation could report WAIT_LISTED
     // when it actually got the reservation.
-    
+
     ConditionalWriter cwriter = conn.createConditionalWriter(rTable, new ConditionalWriterConfig());
-    
+
     try {
       while (true) {
-        
+
         // its important to use an isolated scanner so that only whole mutations are seen
         Scanner scanner = new IsolatedScanner(conn.createScanner(rTable, Authorizations.EMPTY));
         scanner.setRange(new Range(row));
-        
+
         int seq = -1;
         String reservation = null;
-        
+
         for (Entry<Key,Value> entry : scanner) {
           String cf = entry.getKey().getColumnFamilyData().toString();
           String cq = entry.getKey().getColumnQualifierData().toString();
           String val = entry.getValue().toString();
-          
+
           // EXCERCISE avoid linear scan
-          
+
           if (cf.equals("tx") && cq.equals("seq")) {
             seq = Integer.parseInt(val);
           } else if (cf.equals("res") && val.equals(who)) {
             reservation = cq;
           }
         }
-        
+
         if (reservation != null) {
           ConditionalMutation update = new ConditionalMutation(row, new Condition("tx", "seq").setValue(seq + ""));
           update.putDelete("res", reservation);
           update.put("tx", "seq", (seq + 1) + "");
-          
+
           Status status = cwriter.write(update).getStatus();
           switch (status) {
             case ACCEPTED:
@@ -209,50 +209,50 @@ public class ARS {
             default:
               throw new RuntimeException("Unexpected status " + status);
           }
-          
+
         } else {
           // not reserved, nothing to do
           break;
         }
-        
+
       }
     } finally {
       cwriter.close();
     }
   }
-  
+
   public List<String> list(String what, String when) throws Exception {
     String row = what + ":" + when;
-    
+
     // its important to use an isolated scanner so that only whole mutations are seen
     Scanner scanner = new IsolatedScanner(conn.createScanner(rTable, Authorizations.EMPTY));
     scanner.setRange(new Range(row));
     scanner.fetchColumnFamily(new Text("res"));
-    
+
     List<String> reservations = new ArrayList<String>();
-    
+
     for (Entry<Key,Value> entry : scanner) {
       String val = entry.getValue().toString();
       reservations.add(val);
     }
-    
+
     return reservations;
   }
-  
+
   public static void main(String[] args) throws Exception {
     final ConsoleReader reader = new ConsoleReader();
     ARS ars = null;
-    
+
     while (true) {
       String line = reader.readLine(">");
       if (line == null)
         break;
-      
+
       final String[] tokens = line.split("\\s+");
-      
+
       if (tokens[0].equals("reserve") && tokens.length >= 4 && ars != null) {
         // start up multiple threads all trying to reserve the same resource, no more than one should succeed
-        
+
         final ARS fars = ars;
         ArrayList<Thread> threads = new ArrayList<Thread>();
         for (int i = 3; i < tokens.length; i++) {
@@ -267,16 +267,16 @@ public class ARS {
               }
             }
           };
-          
+
           threads.add(new Thread(reservationTask));
         }
-        
+
         for (Thread thread : threads)
           thread.start();
-        
+
         for (Thread thread : threads)
           thread.join();
-        
+
       } else if (tokens[0].equals("cancel") && tokens.length == 4 && ars != null) {
         ars.cancel(tokens[1], tokens[2], tokens[3]);
       } else if (tokens[0].equals("list") && tokens.length == 3 && ars != null) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java
index 5367a44..8d05922 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java
@@ -20,8 +20,8 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
 import java.util.Random;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.cli.BatchScannerOpts;
 import org.apache.accumulo.core.cli.ClientOpts;
@@ -40,48 +40,48 @@ import com.beust.jcommander.Parameter;
 /**
  * Using the doc2word table created by Reverse.java, this program randomly selects N words per document. Then it continually queries a random set of words in
  * the shard table (created by {@link Index}) using the {@link IntersectingIterator}.
- * 
+ *
  * See docs/examples/README.shard for instructions.
  */
 
 public class ContinuousQuery {
-  
+
   static class Opts extends ClientOpts {
-    @Parameter(names="--shardTable", required=true, description="name of the shard table")
+    @Parameter(names = "--shardTable", required = true, description = "name of the shard table")
     String table = null;
-    @Parameter(names="--doc2Term", required=true, description="name of the doc2Term table")
+    @Parameter(names = "--doc2Term", required = true, description = "name of the doc2Term table")
     String doc2Term;
-    @Parameter(names="--terms", required=true, description="the number of terms in the query")
+    @Parameter(names = "--terms", required = true, description = "the number of terms in the query")
     int numTerms;
-    @Parameter(names="--count", description="the number of queries to run")
+    @Parameter(names = "--count", description = "the number of queries to run")
     long iterations = Long.MAX_VALUE;
   }
-  
+
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     BatchScannerOpts bsOpts = new BatchScannerOpts();
     opts.parseArgs(ContinuousQuery.class.getName(), args, bsOpts);
-    
+
     Connector conn = opts.getConnector();
-    
+
     ArrayList<Text[]> randTerms = findRandomTerms(conn.createScanner(opts.doc2Term, opts.auths), opts.numTerms);
-    
+
     Random rand = new Random();
-    
+
     BatchScanner bs = conn.createBatchScanner(opts.table, opts.auths, bsOpts.scanThreads);
     bs.setTimeout(bsOpts.scanTimeout, TimeUnit.MILLISECONDS);
-    
+
     for (long i = 0; i < opts.iterations; i += 1) {
       Text[] columns = randTerms.get(rand.nextInt(randTerms.size()));
-      
+
       bs.clearScanIterators();
       bs.clearColumns();
-      
+
       IteratorSetting ii = new IteratorSetting(20, "ii", IntersectingIterator.class);
       IntersectingIterator.setColumnFamilies(ii, columns);
       bs.addScanIterator(ii);
       bs.setRanges(Collections.singleton(new Range()));
-      
+
       long t1 = System.currentTimeMillis();
       int count = 0;
       for (@SuppressWarnings("unused")
@@ -89,44 +89,44 @@ public class ContinuousQuery {
         count++;
       }
       long t2 = System.currentTimeMillis();
-      
+
       System.out.printf("  %s %,d %6.3f%n", Arrays.asList(columns), count, (t2 - t1) / 1000.0);
     }
-    
+
     bs.close();
-    
+
   }
-  
+
   private static ArrayList<Text[]> findRandomTerms(Scanner scanner, int numTerms) {
-    
+
     Text currentRow = null;
-    
+
     ArrayList<Text> words = new ArrayList<Text>();
     ArrayList<Text[]> ret = new ArrayList<Text[]>();
-    
+
     Random rand = new Random();
-    
+
     for (Entry<Key,Value> entry : scanner) {
       Key key = entry.getKey();
-      
+
       if (currentRow == null)
         currentRow = key.getRow();
-      
+
       if (!currentRow.equals(key.getRow())) {
         selectRandomWords(words, ret, rand, numTerms);
         words.clear();
         currentRow = key.getRow();
       }
-      
+
       words.add(key.getColumnFamily());
-      
+
     }
-    
+
     selectRandomWords(words, ret, rand, numTerms);
-    
+
     return ret;
   }
-  
+
   private static void selectRandomWords(ArrayList<Text> words, ArrayList<Text[]> ret, Random rand, int numTerms) {
     if (words.size() >= numTerms) {
       Collections.shuffle(words, rand);
@@ -134,7 +134,7 @@ public class ContinuousQuery {
       for (int i = 0; i < docWords.length; i++) {
         docWords[i] = words.get(i);
       }
-      
+
       ret.add(docWords);
     }
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java
index accb3a0..3564be4 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java
@@ -33,41 +33,41 @@ import com.beust.jcommander.Parameter;
 
 /**
  * This program indexes a set of documents given on the command line into a shard table.
- * 
+ *
  * What it writes to the table is row = partition id, column family = term, column qualifier = document id.
- * 
+ *
  * See docs/examples/README.shard for instructions.
  */
 
 public class Index {
-  
+
   static Text genPartition(int partition) {
     return new Text(String.format("%08x", Math.abs(partition)));
   }
-  
+
   public static void index(int numPartitions, Text docId, String doc, String splitRegex, BatchWriter bw) throws Exception {
-    
+
     String[] tokens = doc.split(splitRegex);
-    
+
     Text partition = genPartition(doc.hashCode() % numPartitions);
-    
+
     Mutation m = new Mutation(partition);
-    
+
     HashSet<String> tokensSeen = new HashSet<String>();
-    
+
     for (String token : tokens) {
       token = token.toLowerCase();
-      
+
       if (!tokensSeen.contains(token)) {
         tokensSeen.add(token);
         m.put(new Text(token), docId, new Value(new byte[0]));
       }
     }
-    
+
     if (m.size() > 0)
       bw.addMutation(m);
   }
-  
+
   public static void index(int numPartitions, File src, String splitRegex, BatchWriter bw) throws Exception {
     if (src.isDirectory()) {
       for (File child : src.listFiles()) {
@@ -75,41 +75,41 @@ public class Index {
       }
     } else {
       FileReader fr = new FileReader(src);
-      
+
       StringBuilder sb = new StringBuilder();
-      
+
       char data[] = new char[4096];
       int len;
       while ((len = fr.read(data)) != -1) {
         sb.append(data, 0, len);
       }
-      
+
       fr.close();
-      
+
       index(numPartitions, new Text(src.getAbsolutePath()), sb.toString(), splitRegex, bw);
     }
-    
+
   }
-  
+
   static class Opts extends ClientOnRequiredTable {
-    @Parameter(names="--partitions", required=true, description="the number of shards to create")
+    @Parameter(names = "--partitions", required = true, description = "the number of shards to create")
     int partitions;
-    @Parameter(required=true, description="<file> { <file> ... }")
+    @Parameter(required = true, description = "<file> { <file> ... }")
     List<String> files = new ArrayList<String>();
   }
-  
+
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(Index.class.getName(), args, bwOpts);
-    
+
     String splitRegex = "\\W+";
-    
+
     BatchWriter bw = opts.getConnector().createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
     for (String filename : opts.files) {
       index(opts.partitions, new File(filename), splitRegex, bw);
     }
     bw.close();
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
index b0502a7..41d5dc7 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
@@ -37,19 +37,19 @@ import com.beust.jcommander.Parameter;
 
 /**
  * This program queries a set of terms in the shard table (populated by {@link Index}) using the {@link IntersectingIterator}.
- * 
+ *
  * See docs/examples/README.shard for instructions.
  */
 
 public class Query {
-  
+
   static class Opts extends ClientOnRequiredTable {
-    @Parameter(description=" term { <term> ... }")
+    @Parameter(description = " term { <term> ... }")
     List<String> terms = new ArrayList<String>();
   }
-  
+
   public static List<String> query(BatchScanner bs, List<String> terms) {
-    
+
     Text columns[] = new Text[terms.size()];
     int i = 0;
     for (String term : terms) {
@@ -65,7 +65,7 @@ public class Query {
     }
     return result;
   }
-  
+
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     BatchScannerOpts bsOpts = new BatchScannerOpts();
@@ -77,5 +77,5 @@ public class Query {
     for (String entry : query(bs, opts.terms))
       System.out.println("  " + entry);
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java
index 4d03425..dbcbe5f 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java
@@ -34,39 +34,39 @@ import com.beust.jcommander.Parameter;
 /**
  * The program reads an accumulo table written by {@link Index} and writes out to another table. It writes out a mapping of documents to terms. The document to
  * term mapping is used by {@link ContinuousQuery}.
- * 
+ *
  * See docs/examples/README.shard for instructions.
  */
 
 public class Reverse {
-  
+
   static class Opts extends ClientOpts {
-    @Parameter(names="--shardTable")
+    @Parameter(names = "--shardTable")
     String shardTable = "shard";
-    @Parameter(names="--doc2Term")
+    @Parameter(names = "--doc2Term")
     String doc2TermTable = "doc2Term";
   }
-  
+
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     ScannerOpts scanOpts = new ScannerOpts();
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(Reverse.class.getName(), args, scanOpts, bwOpts);
-    
+
     Connector conn = opts.getConnector();
-    
+
     Scanner scanner = conn.createScanner(opts.shardTable, opts.auths);
     scanner.setBatchSize(scanOpts.scanBatchSize);
     BatchWriter bw = conn.createBatchWriter(opts.doc2TermTable, bwOpts.getBatchWriterConfig());
-    
+
     for (Entry<Key,Value> entry : scanner) {
       Key key = entry.getKey();
       Mutation m = new Mutation(key.getColumnQualifier());
       m.put(key.getColumnFamily(), new Text(), new Value(new byte[0]));
       bw.addMutation(m);
     }
-    
+
     bw.close();
-    
+
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/DebugCommand.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/DebugCommand.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/DebugCommand.java
index 6960898..728aa08 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/DebugCommand.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/DebugCommand.java
@@ -39,5 +39,5 @@ public class DebugCommand extends Command {
   public int numArgs() {
     return 0;
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/MyAppShellExtension.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/MyAppShellExtension.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/MyAppShellExtension.java
index 1ba5ad3..e37acbd 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/MyAppShellExtension.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/MyAppShellExtension.java
@@ -16,18 +16,18 @@
  */
 package org.apache.accumulo.examples.simple.shell;
 
-import org.apache.accumulo.shell.ShellExtension;
 import org.apache.accumulo.shell.Shell.Command;
+import org.apache.accumulo.shell.ShellExtension;
 
 public class MyAppShellExtension extends ShellExtension {
-  
+
   public String getExtensionName() {
     return "MyApp";
   }
-  
+
   @Override
   public Command[] getCommands() {
-    return new Command[] { new DebugCommand() };
+    return new Command[] {new DebugCommand()};
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java
index 0addc77..72ff442 100644
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java
+++ b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java
@@ -40,9 +40,9 @@ import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
 public class CountTest extends TestCase {
-  
+
   private static final Logger log = Logger.getLogger(CountTest.class);
-  
+
   {
     try {
       Connector conn = new MockInstance("counttest").getConnector("root", new PasswordToken(""));
@@ -64,12 +64,12 @@ public class CountTest extends TestCase {
       log.error("Could not add mutations in initializer.", e);
     }
   }
-  
+
   public void test() throws Exception {
     Scanner scanner = new MockInstance("counttest").getConnector("root", new PasswordToken("")).createScanner("dirlisttable", new Authorizations());
     scanner.fetchColumn(new Text("dir"), new Text("counts"));
     assertFalse(scanner.iterator().hasNext());
-    
+
     Opts opts = new Opts();
     ScannerOpts scanOpts = new ScannerOpts();
     BatchWriterOpts bwOpts = new BatchWriterOpts();
@@ -80,13 +80,13 @@ public class CountTest extends TestCase {
     opts.password = new Opts.Password("");
     FileCount fc = new FileCount(opts, scanOpts, bwOpts);
     fc.run();
-    
+
     ArrayList<Pair<String,String>> expected = new ArrayList<Pair<String,String>>();
     expected.add(new Pair<String,String>(QueryUtil.getRow("").toString(), "1,0,3,3"));
     expected.add(new Pair<String,String>(QueryUtil.getRow("/local").toString(), "2,1,2,3"));
     expected.add(new Pair<String,String>(QueryUtil.getRow("/local/user1").toString(), "0,2,0,2"));
     expected.add(new Pair<String,String>(QueryUtil.getRow("/local/user2").toString(), "0,0,0,0"));
-    
+
     int i = 0;
     for (Entry<Key,Value> e : scanner) {
       assertEquals(e.getKey().getRow().toString(), expected.get(i).getFirst());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkCombinerTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkCombinerTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkCombinerTest.java
index b380b2b..6d1467a 100644
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkCombinerTest.java
+++ b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkCombinerTest.java
@@ -22,9 +22,11 @@ import java.util.Collections;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.SortedMap;
 import java.util.TreeMap;
-import java.util.Map.Entry;
+
+import junit.framework.TestCase;
 
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
@@ -33,24 +35,20 @@ import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.examples.simple.filedata.ChunkCombiner;
-import org.apache.accumulo.examples.simple.filedata.FileDataIngest;
-
-import junit.framework.TestCase;
 
 public class ChunkCombinerTest extends TestCase {
-  
+
   public static class MapIterator implements SortedKeyValueIterator<Key,Value> {
     private Iterator<Entry<Key,Value>> iter;
     private Entry<Key,Value> entry;
     Collection<ByteSequence> columnFamilies;
     private SortedMap<Key,Value> map;
     private Range range;
-    
+
     public MapIterator deepCopy(IteratorEnvironment env) {
       return new MapIterator(map);
     }
-    
+
     private MapIterator(SortedMap<Key,Value> map) {
       this.map = map;
       iter = map.entrySet().iterator();
@@ -60,22 +58,22 @@ public class ChunkCombinerTest extends TestCase {
       else
         entry = null;
     }
-    
+
     @Override
     public Key getTopKey() {
       return entry.getKey();
     }
-    
+
     @Override
     public Value getTopValue() {
       return entry.getValue();
     }
-    
+
     @Override
     public boolean hasTop() {
       return entry != null;
     }
-    
+
     @Override
     public void next() throws IOException {
       entry = null;
@@ -90,7 +88,7 @@ public class ChunkCombinerTest extends TestCase {
         break;
       }
     }
-    
+
     @Override
     public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
       if (!inclusive) {
@@ -98,93 +96,93 @@ public class ChunkCombinerTest extends TestCase {
       }
       this.columnFamilies = columnFamilies;
       this.range = range;
-      
+
       Key key = range.getStartKey();
       if (key == null) {
         key = new Key();
       }
-      
+
       iter = map.tailMap(key).entrySet().iterator();
       next();
       while (hasTop() && range.beforeStartKey(getTopKey())) {
         next();
       }
     }
-    
+
     public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
       throw new UnsupportedOperationException();
     }
   }
-  
+
   private TreeMap<Key,Value> row1;
   private TreeMap<Key,Value> row2;
   private TreeMap<Key,Value> row3;
   private TreeMap<Key,Value> allRows;
-  
+
   private TreeMap<Key,Value> cRow1;
   private TreeMap<Key,Value> cRow2;
   private TreeMap<Key,Value> cRow3;
   private TreeMap<Key,Value> allCRows;
-  
+
   private TreeMap<Key,Value> cOnlyRow1;
   private TreeMap<Key,Value> cOnlyRow2;
   private TreeMap<Key,Value> cOnlyRow3;
   private TreeMap<Key,Value> allCOnlyRows;
-  
+
   private TreeMap<Key,Value> badrow;
-  
+
   @Override
   protected void setUp() {
     row1 = new TreeMap<Key,Value>();
     row2 = new TreeMap<Key,Value>();
     row3 = new TreeMap<Key,Value>();
     allRows = new TreeMap<Key,Value>();
-    
+
     cRow1 = new TreeMap<Key,Value>();
     cRow2 = new TreeMap<Key,Value>();
     cRow3 = new TreeMap<Key,Value>();
     allCRows = new TreeMap<Key,Value>();
-    
+
     cOnlyRow1 = new TreeMap<Key,Value>();
     cOnlyRow2 = new TreeMap<Key,Value>();
     cOnlyRow3 = new TreeMap<Key,Value>();
     allCOnlyRows = new TreeMap<Key,Value>();
-    
+
     badrow = new TreeMap<Key,Value>();
-    
+
     String refs = FileDataIngest.REFS_CF.toString();
     String fileext = FileDataIngest.REFS_FILE_EXT;
     String filename = FileDataIngest.REFS_ORIG_FILE;
     String chunk_cf = FileDataIngest.CHUNK_CF.toString();
-    
+
     row1.put(new Key("row1", refs, "hash1\0" + fileext, "C"), new Value("jpg".getBytes()));
     row1.put(new Key("row1", refs, "hash1\0" + filename, "D"), new Value("foo1.jpg".getBytes()));
     row1.put(new Key("row1", chunk_cf, "0000", "A"), new Value("V1".getBytes()));
     row1.put(new Key("row1", chunk_cf, "0000", "B"), new Value("V1".getBytes()));
     row1.put(new Key("row1", chunk_cf, "0001", "A"), new Value("V2".getBytes()));
     row1.put(new Key("row1", chunk_cf, "0001", "B"), new Value("V2".getBytes()));
-    
+
     cRow1.put(new Key("row1", refs, "hash1\0" + fileext, "C"), new Value("jpg".getBytes()));
     cRow1.put(new Key("row1", refs, "hash1\0" + filename, "D"), new Value("foo1.jpg".getBytes()));
     cRow1.put(new Key("row1", chunk_cf, "0000", "(C)|(D)"), new Value("V1".getBytes()));
     cRow1.put(new Key("row1", chunk_cf, "0001", "(C)|(D)"), new Value("V2".getBytes()));
-    
+
     cOnlyRow1.put(new Key("row1", chunk_cf, "0000", "(C)|(D)"), new Value("V1".getBytes()));
     cOnlyRow1.put(new Key("row1", chunk_cf, "0001", "(C)|(D)"), new Value("V2".getBytes()));
-    
+
     row2.put(new Key("row2", refs, "hash1\0" + fileext, "A"), new Value("jpg".getBytes()));
     row2.put(new Key("row2", refs, "hash1\0" + filename, "B"), new Value("foo1.jpg".getBytes()));
     row2.put(new Key("row2", chunk_cf, "0000", "A|B"), new Value("V1".getBytes()));
     row2.put(new Key("row2", chunk_cf, "0000", "A"), new Value("V1".getBytes()));
     row2.put(new Key("row2", chunk_cf, "0000", "(A)|(B)"), new Value("V1".getBytes()));
     row2.put(new Key("row2a", chunk_cf, "0000", "C"), new Value("V1".getBytes()));
-    
+
     cRow2.put(new Key("row2", refs, "hash1\0" + fileext, "A"), new Value("jpg".getBytes()));
     cRow2.put(new Key("row2", refs, "hash1\0" + filename, "B"), new Value("foo1.jpg".getBytes()));
     cRow2.put(new Key("row2", chunk_cf, "0000", "(A)|(B)"), new Value("V1".getBytes()));
-    
+
     cOnlyRow2.put(new Key("row2", chunk_cf, "0000", "(A)|(B)"), new Value("V1".getBytes()));
-    
+
     row3.put(new Key("row3", refs, "hash1\0w", "(A&B)|(C&(D|E))"), new Value("".getBytes()));
     row3.put(new Key("row3", refs, "hash1\0x", "A&B"), new Value("".getBytes()));
     row3.put(new Key("row3", refs, "hash1\0y", "(A&B)"), new Value("".getBytes()));
@@ -193,39 +191,39 @@ public class ChunkCombinerTest extends TestCase {
     row3.put(new Key("row3", chunk_cf, "0000", "A&B", 20), new Value("V1".getBytes()));
     row3.put(new Key("row3", chunk_cf, "0000", "(A&B)", 10), new Value("V1".getBytes()));
     row3.put(new Key("row3", chunk_cf, "0000", "(F|G)&(D|E)", 10), new Value("V1".getBytes()));
-    
+
     cRow3.put(new Key("row3", refs, "hash1\0w", "(A&B)|(C&(D|E))"), new Value("".getBytes()));
     cRow3.put(new Key("row3", refs, "hash1\0x", "A&B"), new Value("".getBytes()));
     cRow3.put(new Key("row3", refs, "hash1\0y", "(A&B)"), new Value("".getBytes()));
     cRow3.put(new Key("row3", refs, "hash1\0z", "(F|G)&(D|E)"), new Value("".getBytes()));
     cRow3.put(new Key("row3", chunk_cf, "0000", "((F|G)&(D|E))|(A&B)|(C&(D|E))", 20), new Value("V1".getBytes()));
-    
+
     cOnlyRow3.put(new Key("row3", chunk_cf, "0000", "((F|G)&(D|E))|(A&B)|(C&(D|E))", 20), new Value("V1".getBytes()));
-    
+
     badrow.put(new Key("row1", chunk_cf, "0000", "A"), new Value("V1".getBytes()));
     badrow.put(new Key("row1", chunk_cf, "0000", "B"), new Value("V2".getBytes()));
-    
+
     allRows.putAll(row1);
     allRows.putAll(row2);
     allRows.putAll(row3);
-    
+
     allCRows.putAll(cRow1);
     allCRows.putAll(cRow2);
     allCRows.putAll(cRow3);
-    
+
     allCOnlyRows.putAll(cOnlyRow1);
     allCOnlyRows.putAll(cOnlyRow2);
     allCOnlyRows.putAll(cOnlyRow3);
   }
-  
+
   private static final Collection<ByteSequence> emptyColfs = new HashSet<ByteSequence>();
-  
+
   public void test1() throws IOException {
     runTest(false, allRows, allCRows, emptyColfs);
     runTest(true, allRows, allCRows, emptyColfs);
     runTest(false, allRows, allCOnlyRows, Collections.singleton(FileDataIngest.CHUNK_CF_BS));
     runTest(true, allRows, allCOnlyRows, Collections.singleton(FileDataIngest.CHUNK_CF_BS));
-    
+
     try {
       runTest(true, badrow, null, emptyColfs);
       assertNotNull(null);
@@ -233,26 +231,26 @@ public class ChunkCombinerTest extends TestCase {
       assertNull(null);
     }
   }
-  
+
   private void runTest(boolean reseek, TreeMap<Key,Value> source, TreeMap<Key,Value> result, Collection<ByteSequence> cols) throws IOException {
     MapIterator src = new MapIterator(source);
     SortedKeyValueIterator<Key,Value> iter = new ChunkCombiner();
     iter.init(src, null, null);
     iter = iter.deepCopy(null);
     iter.seek(new Range(), cols, true);
-    
+
     TreeMap<Key,Value> seen = new TreeMap<Key,Value>();
-    
+
     while (iter.hasTop()) {
       assertFalse("already contains " + iter.getTopKey(), seen.containsKey(iter.getTopKey()));
       seen.put(new Key(iter.getTopKey()), new Value(iter.getTopValue()));
-      
+
       if (reseek)
         iter.seek(new Range(iter.getTopKey().followingKey(PartialKey.ROW_COLFAM_COLQUAL), true, null, true), cols, true);
       else
         iter.next();
     }
-    
+
     assertEquals(result, seen);
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java
index 54279e4..68af6d8 100644
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java
+++ b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java
@@ -48,7 +48,7 @@ public class ChunkInputStreamTest extends TestCase {
   List<Entry<Key,Value>> data;
   List<Entry<Key,Value>> baddata;
   List<Entry<Key,Value>> multidata;
-  
+
   {
     data = new ArrayList<Entry<Key,Value>>();
     addData(data, "a", "refs", "id\0ext", "A&B", "ext");
@@ -92,17 +92,17 @@ public class ChunkInputStreamTest extends TestCase {
     addData(multidata, "c", "~chunk", 100, 0, "A&B", "asdfjkl;");
     addData(multidata, "c", "~chunk", 100, 1, "B&C", "");
   }
-  
+
   public static void addData(List<Entry<Key,Value>> data, String row, String cf, String cq, String vis, String value) {
     data.add(new KeyValue(new Key(new Text(row), new Text(cf), new Text(cq), new Text(vis)), value.getBytes()));
   }
-  
+
   public static void addData(List<Entry<Key,Value>> data, String row, String cf, int chunkSize, int chunkCount, String vis, String value) {
     Text chunkCQ = new Text(FileDataIngest.intToBytes(chunkSize));
     chunkCQ.append(FileDataIngest.intToBytes(chunkCount), 0, 4);
     data.add(new KeyValue(new Key(new Text(row), new Text(cf), chunkCQ, new Text(vis)), value.getBytes()));
   }
-  
+
   public void testExceptionOnMultipleSetSourceWithoutClose() throws IOException {
     ChunkInputStream cis = new ChunkInputStream();
     PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<Entry<Key,Value>>(data.iterator());
@@ -116,11 +116,11 @@ public class ChunkInputStreamTest extends TestCase {
     }
     cis.close();
   }
-  
+
   public void testExceptionOnGetVisBeforeClose() throws IOException {
     ChunkInputStream cis = new ChunkInputStream();
     PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<Entry<Key,Value>>(data.iterator());
-    
+
     cis.setSource(pi);
     try {
       cis.getVisibilities();
@@ -131,13 +131,13 @@ public class ChunkInputStreamTest extends TestCase {
     cis.close();
     cis.getVisibilities();
   }
-  
+
   public void testReadIntoBufferSmallerThanChunks() throws IOException {
     ChunkInputStream cis = new ChunkInputStream();
     byte[] b = new byte[5];
-    
+
     PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<Entry<Key,Value>>(data.iterator());
-    
+
     cis.setSource(pi);
     int read;
     assertEquals(read = cis.read(b), 5);
@@ -145,7 +145,7 @@ public class ChunkInputStreamTest extends TestCase {
     assertEquals(read = cis.read(b), 3);
     assertEquals(new String(b, 0, read), "kl;");
     assertEquals(read = cis.read(b), -1);
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 5);
     assertEquals(new String(b, 0, read), "qwert");
@@ -154,7 +154,7 @@ public class ChunkInputStreamTest extends TestCase {
     assertEquals(read = cis.read(b), -1);
     assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
     cis.close();
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 5);
     assertEquals(new String(b, 0, read), "asdfj");
@@ -167,11 +167,11 @@ public class ChunkInputStreamTest extends TestCase {
     assertEquals(read = cis.read(b), -1);
     assertEquals(cis.getVisibilities().toString(), "[A&B]");
     cis.close();
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), -1);
     cis.close();
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 5);
     assertEquals(new String(b, 0, read), "asdfj");
@@ -179,53 +179,53 @@ public class ChunkInputStreamTest extends TestCase {
     assertEquals(new String(b, 0, read), "kl;");
     assertEquals(read = cis.read(b), -1);
     cis.close();
-    
+
     assertFalse(pi.hasNext());
   }
-  
+
   public void testReadIntoBufferLargerThanChunks() throws IOException {
     ChunkInputStream cis = new ChunkInputStream();
     byte[] b = new byte[20];
     int read;
     PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<Entry<Key,Value>>(data.iterator());
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 8);
     assertEquals(new String(b, 0, read), "asdfjkl;");
     assertEquals(read = cis.read(b), -1);
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 10);
     assertEquals(new String(b, 0, read), "qwertyuiop");
     assertEquals(read = cis.read(b), -1);
     assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
     cis.close();
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 16);
     assertEquals(new String(b, 0, read), "asdfjkl;asdfjkl;");
     assertEquals(read = cis.read(b), -1);
     assertEquals(cis.getVisibilities().toString(), "[A&B]");
     cis.close();
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), -1);
     cis.close();
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 8);
     assertEquals(new String(b, 0, read), "asdfjkl;");
     assertEquals(read = cis.read(b), -1);
     cis.close();
-    
+
     assertFalse(pi.hasNext());
   }
-  
+
   public void testWithAccumulo() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException {
     Connector conn = new MockInstance().getConnector("root", new PasswordToken(""));
     conn.tableOperations().create("test");
     BatchWriter bw = conn.createBatchWriter("test", new BatchWriterConfig());
-    
+
     for (Entry<Key,Value> e : data) {
       Key k = e.getKey();
       Mutation m = new Mutation(k.getRow());
@@ -233,46 +233,46 @@ public class ChunkInputStreamTest extends TestCase {
       bw.addMutation(m);
     }
     bw.close();
-    
+
     Scanner scan = conn.createScanner("test", new Authorizations("A", "B", "C", "D"));
-    
+
     ChunkInputStream cis = new ChunkInputStream();
     byte[] b = new byte[20];
     int read;
     PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<Entry<Key,Value>>(scan.iterator());
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 8);
     assertEquals(new String(b, 0, read), "asdfjkl;");
     assertEquals(read = cis.read(b), -1);
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 10);
     assertEquals(new String(b, 0, read), "qwertyuiop");
     assertEquals(read = cis.read(b), -1);
     assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
     cis.close();
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 16);
     assertEquals(new String(b, 0, read), "asdfjkl;asdfjkl;");
     assertEquals(read = cis.read(b), -1);
     assertEquals(cis.getVisibilities().toString(), "[A&B]");
     cis.close();
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), -1);
     cis.close();
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 8);
     assertEquals(new String(b, 0, read), "asdfjkl;");
     assertEquals(read = cis.read(b), -1);
     cis.close();
-    
+
     assertFalse(pi.hasNext());
   }
-  
+
   private static void assumeExceptionOnRead(ChunkInputStream cis, byte[] b) {
     try {
       cis.read(b);
@@ -282,7 +282,7 @@ public class ChunkInputStreamTest extends TestCase {
       assertNull(null);
     }
   }
-  
+
   private static void assumeExceptionOnClose(ChunkInputStream cis) {
     try {
       cis.close();
@@ -292,41 +292,41 @@ public class ChunkInputStreamTest extends TestCase {
       assertNull(null);
     }
   }
-  
+
   public void testBadData() throws IOException {
     ChunkInputStream cis = new ChunkInputStream();
     byte[] b = new byte[20];
     int read;
     PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<Entry<Key,Value>>(baddata.iterator());
-    
+
     cis.setSource(pi);
     assumeExceptionOnRead(cis, b);
     assumeExceptionOnClose(cis);
     // can still get visibilities after exception -- bad?
     assertEquals(cis.getVisibilities().toString(), "[A]");
-    
+
     cis.setSource(pi);
     assumeExceptionOnRead(cis, b);
     assumeExceptionOnClose(cis);
     assertEquals(cis.getVisibilities().toString(), "[B, C]");
-    
+
     cis.setSource(pi);
     assumeExceptionOnRead(cis, b);
     assumeExceptionOnClose(cis);
     assertEquals(cis.getVisibilities().toString(), "[D, E]");
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 8);
     assertEquals(new String(b, 0, read), "asdfjkl;");
     assertEquals(read = cis.read(b), -1);
     assertEquals(cis.getVisibilities().toString(), "[F, G]");
     cis.close();
-    
+
     cis.setSource(pi);
     assumeExceptionOnRead(cis, b);
     cis.close();
     assertEquals(cis.getVisibilities().toString(), "[I, J]");
-    
+
     try {
       cis.setSource(pi);
       assertNotNull(null);
@@ -335,49 +335,49 @@ public class ChunkInputStreamTest extends TestCase {
     }
     assumeExceptionOnClose(cis);
     assertEquals(cis.getVisibilities().toString(), "[K]");
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), -1);
     assertEquals(cis.getVisibilities().toString(), "[L]");
     cis.close();
-    
+
     assertFalse(pi.hasNext());
-    
+
     pi = new PeekingIterator<Entry<Key,Value>>(baddata.iterator());
     cis.setSource(pi);
     assumeExceptionOnClose(cis);
   }
-  
+
   public void testBadDataWithoutClosing() throws IOException {
     ChunkInputStream cis = new ChunkInputStream();
     byte[] b = new byte[20];
     int read;
     PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<Entry<Key,Value>>(baddata.iterator());
-    
+
     cis.setSource(pi);
     assumeExceptionOnRead(cis, b);
     // can still get visibilities after exception -- bad?
     assertEquals(cis.getVisibilities().toString(), "[A]");
-    
+
     cis.setSource(pi);
     assumeExceptionOnRead(cis, b);
     assertEquals(cis.getVisibilities().toString(), "[B, C]");
-    
+
     cis.setSource(pi);
     assumeExceptionOnRead(cis, b);
     assertEquals(cis.getVisibilities().toString(), "[D, E]");
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 8);
     assertEquals(new String(b, 0, read), "asdfjkl;");
     assertEquals(read = cis.read(b), -1);
     assertEquals(cis.getVisibilities().toString(), "[F, G]");
     cis.close();
-    
+
     cis.setSource(pi);
     assumeExceptionOnRead(cis, b);
     assertEquals(cis.getVisibilities().toString(), "[I, J]");
-    
+
     try {
       cis.setSource(pi);
       assertNotNull(null);
@@ -385,51 +385,51 @@ public class ChunkInputStreamTest extends TestCase {
       assertNull(null);
     }
     assertEquals(cis.getVisibilities().toString(), "[K]");
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), -1);
     assertEquals(cis.getVisibilities().toString(), "[L]");
     cis.close();
-    
+
     assertFalse(pi.hasNext());
-    
+
     pi = new PeekingIterator<Entry<Key,Value>>(baddata.iterator());
     cis.setSource(pi);
     assumeExceptionOnClose(cis);
   }
-  
+
   public void testMultipleChunkSizes() throws IOException {
     ChunkInputStream cis = new ChunkInputStream();
     byte[] b = new byte[20];
     int read;
     PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<Entry<Key,Value>>(multidata.iterator());
-    
+
     b = new byte[20];
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 8);
     assertEquals(read = cis.read(b), -1);
     cis.close();
     assertEquals(cis.getVisibilities().toString(), "[A&B]");
-    
+
     cis.setSource(pi);
     assumeExceptionOnRead(cis, b);
     assertEquals(cis.getVisibilities().toString(), "[A&B]");
-    
+
     cis.setSource(pi);
     assertEquals(read = cis.read(b), 8);
     assertEquals(new String(b, 0, read), "asdfjkl;");
     assertEquals(read = cis.read(b), -1);
     cis.close();
     assertEquals(cis.getVisibilities().toString(), "[A&B, B&C]");
-    
+
     assertFalse(pi.hasNext());
   }
-  
+
   public void testSingleByteRead() throws IOException {
     ChunkInputStream cis = new ChunkInputStream();
     PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<Entry<Key,Value>>(data.iterator());
-    
+
     cis.setSource(pi);
     assertEquals((byte) 'a', (byte) cis.read());
     assertEquals((byte) 's', (byte) cis.read());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/KeyUtilTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/KeyUtilTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/KeyUtilTest.java
index 8307069..e93331a 100644
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/KeyUtilTest.java
+++ b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/KeyUtilTest.java
@@ -18,14 +18,13 @@ package org.apache.accumulo.examples.simple.filedata;
 
 import junit.framework.TestCase;
 
-import org.apache.accumulo.examples.simple.filedata.KeyUtil;
 import org.apache.hadoop.io.Text;
 
 public class KeyUtilTest extends TestCase {
   public static void checkSeps(String... s) {
     Text t = KeyUtil.buildNullSepText(s);
     String[] rets = KeyUtil.splitNullSepText(t);
-    
+
     int length = 0;
     for (String str : s)
       length += str.length();
@@ -34,7 +33,7 @@ public class KeyUtilTest extends TestCase {
     for (int i = 0; i < s.length; i++)
       assertEquals(s[i], rets[i]);
   }
-  
+
   public void testNullSep() {
     checkSeps("abc", "d", "", "efgh");
     checkSeps("ab", "");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java
----------------------------------------------------------------------
diff --git a/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java b/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java
index 60ff4b4..c12efb5 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/AdminUtil.java
@@ -40,19 +40,19 @@ import org.apache.zookeeper.KeeperException;
  */
 public class AdminUtil<T> {
   private static final Logger log = Logger.getLogger(AdminUtil.class);
-  
+
   private boolean exitOnError = false;
-  
+
   /**
    * Default constructor
    */
   public AdminUtil() {
     this(true);
   }
-  
+
   /**
    * Constructor
-   * 
+   *
    * @param exitOnError
    *          <code>System.exit(1)</code> on error if true
    */
@@ -60,37 +60,37 @@ public class AdminUtil<T> {
     super();
     this.exitOnError = exitOnError;
   }
-  
+
   public void print(ReadOnlyTStore<T> zs, IZooReaderWriter zk, String lockPath) throws KeeperException, InterruptedException {
     print(zs, zk, lockPath, new Formatter(System.out), null, null);
   }
-  
+
   public void print(ReadOnlyTStore<T> zs, IZooReaderWriter zk, String lockPath, Formatter fmt, Set<Long> filterTxid, EnumSet<TStatus> filterStatus)
       throws KeeperException, InterruptedException {
     Map<Long,List<String>> heldLocks = new HashMap<Long,List<String>>();
     Map<Long,List<String>> waitingLocks = new HashMap<Long,List<String>>();
-    
+
     List<String> lockedIds = zk.getChildren(lockPath);
-    
+
     for (String id : lockedIds) {
       try {
         List<String> lockNodes = zk.getChildren(lockPath + "/" + id);
         lockNodes = new ArrayList<String>(lockNodes);
         Collections.sort(lockNodes);
-        
+
         int pos = 0;
         boolean sawWriteLock = false;
-        
+
         for (String node : lockNodes) {
           try {
             byte[] data = zk.getData(lockPath + "/" + id + "/" + node, null);
             String lda[] = new String(data, UTF_8).split(":");
-            
+
             if (lda[0].charAt(0) == 'W')
               sawWriteLock = true;
-            
+
             Map<Long,List<String>> locks;
-            
+
             if (pos == 0) {
               locks = heldLocks;
             } else {
@@ -100,77 +100,77 @@ public class AdminUtil<T> {
                 locks = waitingLocks;
               }
             }
-            
+
             List<String> tables = locks.get(Long.parseLong(lda[1], 16));
             if (tables == null) {
               tables = new ArrayList<String>();
               locks.put(Long.parseLong(lda[1], 16), tables);
             }
-            
+
             tables.add(lda[0].charAt(0) + ":" + id);
-            
+
           } catch (Exception e) {
             log.error(e);
           }
           pos++;
         }
-        
+
       } catch (Exception e) {
-        log.error("Failed to read locks for "+id+" continuing.", e);
+        log.error("Failed to read locks for " + id + " continuing.", e);
         fmt.format("Failed to read locks for %s continuing", id);
       }
     }
-    
+
     List<Long> transactions = zs.list();
-    
+
     long txCount = 0;
     for (Long tid : transactions) {
-      
+
       zs.reserve(tid);
-      
+
       String debug = (String) zs.getProperty(tid, "debug");
-      
+
       List<String> hlocks = heldLocks.remove(tid);
       if (hlocks == null)
         hlocks = Collections.emptyList();
-      
+
       List<String> wlocks = waitingLocks.remove(tid);
       if (wlocks == null)
         wlocks = Collections.emptyList();
-      
+
       String top = null;
       ReadOnlyRepo<T> repo = zs.top(tid);
       if (repo != null)
         top = repo.getDescription();
-      
+
       TStatus status = null;
       status = zs.getStatus(tid);
-      
+
       zs.unreserve(tid, 0);
-      
+
       if ((filterTxid != null && !filterTxid.contains(tid)) || (filterStatus != null && !filterStatus.contains(status)))
         continue;
-      
+
       ++txCount;
       fmt.format("txid: %016x  status: %-18s  op: %-15s  locked: %-15s locking: %-15s top: %s%n", tid, status, debug, hlocks, wlocks, top);
     }
     fmt.format(" %s transactions", txCount);
-    
+
     if (heldLocks.size() != 0 || waitingLocks.size() != 0) {
       fmt.format("%nThe following locks did not have an associated FATE operation%n");
       for (Entry<Long,List<String>> entry : heldLocks.entrySet())
         fmt.format("txid: %016x  locked: %s%n", entry.getKey(), entry.getValue());
-      
+
       for (Entry<Long,List<String>> entry : waitingLocks.entrySet())
         fmt.format("txid: %016x  locking: %s%n", entry.getKey(), entry.getValue());
     }
   }
-  
+
   public boolean prepDelete(TStore<T> zs, IZooReaderWriter zk, String path, String txidStr) {
     if (!checkGlobalLock(zk, path)) {
       return false;
     }
-    
+
     long txid;
     try {
       txid = Long.parseLong(txidStr, 16);
@@ -185,7 +185,7 @@ public class AdminUtil<T> {
       case UNKNOWN:
         System.out.printf("Invalid transaction ID: %016x%n", txid);
         break;
-      
+
       case IN_PROGRESS:
       case NEW:
       case FAILED:
@@ -196,16 +196,16 @@ public class AdminUtil<T> {
         state = true;
         break;
     }
-    
+
     zs.unreserve(txid, 0);
     return state;
   }
-  
+
   public boolean prepFail(TStore<T> zs, IZooReaderWriter zk, String path, String txidStr) {
     if (!checkGlobalLock(zk, path)) {
       return false;
     }
-    
+
     long txid;
     try {
       txid = Long.parseLong(txidStr, 16);
@@ -220,33 +220,33 @@ public class AdminUtil<T> {
       case UNKNOWN:
         System.out.printf("Invalid transaction ID: %016x%n", txid);
         break;
-      
+
       case IN_PROGRESS:
       case NEW:
         System.out.printf("Failing transaction: %016x (%s)%n", txid, ts);
         zs.setStatus(txid, TStatus.FAILED_IN_PROGRESS);
         state = true;
         break;
-      
+
       case SUCCESSFUL:
         System.out.printf("Transaction already completed: %016x (%s)%n", txid, ts);
         break;
-      
+
       case FAILED:
       case FAILED_IN_PROGRESS:
         System.out.printf("Transaction already failed: %016x (%s)%n", txid, ts);
         state = true;
         break;
     }
-    
+
     zs.unreserve(txid, 0);
     return state;
   }
-  
+
   public void deleteLocks(TStore<T> zs, IZooReaderWriter zk, String path, String txidStr) throws KeeperException, InterruptedException {
     // delete any locks assoc w/ fate operation
     List<String> lockedIds = zk.getChildren(path);
-    
+
     for (String id : lockedIds) {
       List<String> lockNodes = zk.getChildren(path + "/" + id);
       for (String node : lockNodes) {
@@ -258,7 +258,7 @@ public class AdminUtil<T> {
       }
     }
   }
-  
+
   public boolean checkGlobalLock(IZooReaderWriter zk, String path) {
     try {
       if (ZooLock.getLockData(zk.getZooKeeper(), path) != null) {


Mime
View raw message