accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [08/66] [abbrv] accumulo git commit: ACCUMULO-3451 Format master branch (1.7.0-SNAPSHOT)
Date Fri, 09 Jan 2015 02:44:12 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/start/src/main/java/org/apache/accumulo/start/classloader/vfs/providers/HdfsRandomAccessContent.java
----------------------------------------------------------------------
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/providers/HdfsRandomAccessContent.java b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/providers/HdfsRandomAccessContent.java
index aada2c7..a9bbe55 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/providers/HdfsRandomAccessContent.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/providers/HdfsRandomAccessContent.java
@@ -30,16 +30,16 @@ import org.apache.hadoop.fs.Path;
 /**
  * Provides random access to content in an HdfsFileObject. Currently this only supports read operations. All write operations throw an
  * {@link UnsupportedOperationException}.
- * 
+ *
  * @since 2.1
  */
 public class HdfsRandomAccessContent implements RandomAccessContent {
   private final FileSystem fs;
   private final Path path;
   private final FSDataInputStream fis;
-  
+
   /**
-   * 
+   *
    * @param path
    *          A Hadoop Path
    * @param fs
@@ -52,7 +52,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
     this.path = path;
     this.fis = this.fs.open(this.path);
   }
-  
+
   /**
    * @see org.apache.commons.vfs2.RandomAccessContent#close()
    */
@@ -60,7 +60,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void close() throws IOException {
     this.fis.close();
   }
-  
+
   /**
    * @see org.apache.commons.vfs2.RandomAccessContent#getFilePointer()
    */
@@ -68,7 +68,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public long getFilePointer() throws IOException {
     return this.fis.getPos();
   }
-  
+
   /**
    * @see org.apache.commons.vfs2.RandomAccessContent#getInputStream()
    */
@@ -76,7 +76,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public InputStream getInputStream() throws IOException {
     return this.fis;
   }
-  
+
   /**
    * @see org.apache.commons.vfs2.RandomAccessContent#length()
    */
@@ -84,7 +84,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public long length() throws IOException {
     return this.fs.getFileStatus(this.path).getLen();
   }
-  
+
   /**
    * @see java.io.DataInput#readBoolean()
    */
@@ -92,7 +92,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public boolean readBoolean() throws IOException {
     return this.fis.readBoolean();
   }
-  
+
   /**
    * @see java.io.DataInput#readByte()
    */
@@ -100,7 +100,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public byte readByte() throws IOException {
     return this.fis.readByte();
   }
-  
+
   /**
    * @see java.io.DataInput#readChar()
    */
@@ -108,7 +108,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public char readChar() throws IOException {
     return this.fis.readChar();
   }
-  
+
   /**
    * @see java.io.DataInput#readDouble()
    */
@@ -116,7 +116,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public double readDouble() throws IOException {
     return this.fis.readDouble();
   }
-  
+
   /**
    * @see java.io.DataInput#readFloat()
    */
@@ -124,7 +124,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public float readFloat() throws IOException {
     return this.fis.readFloat();
   }
-  
+
   /**
    * @see java.io.DataInput#readFully(byte[])
    */
@@ -132,7 +132,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void readFully(final byte[] b) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataInput#readFully(byte[], int, int)
    */
@@ -140,7 +140,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void readFully(final byte[] b, final int off, final int len) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataInput#readInt()
    */
@@ -148,7 +148,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public int readInt() throws IOException {
     return this.fis.readInt();
   }
-  
+
   /**
    * @see java.io.DataInput#readLine()
    */
@@ -157,7 +157,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
     BufferedReader d = new BufferedReader(new InputStreamReader(this.fis, Charset.forName("UTF-8")));
     return d.readLine();
   }
-  
+
   /**
    * @see java.io.DataInput#readLong()
    */
@@ -165,7 +165,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public long readLong() throws IOException {
     return this.fis.readLong();
   }
-  
+
   /**
    * @see java.io.DataInput#readShort()
    */
@@ -173,7 +173,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public short readShort() throws IOException {
     return this.fis.readShort();
   }
-  
+
   /**
    * @see java.io.DataInput#readUnsignedByte()
    */
@@ -181,7 +181,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public int readUnsignedByte() throws IOException {
     return this.fis.readUnsignedByte();
   }
-  
+
   /**
    * @see java.io.DataInput#readUnsignedShort()
    */
@@ -189,7 +189,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public int readUnsignedShort() throws IOException {
     return this.fis.readUnsignedShort();
   }
-  
+
   /**
    * @see java.io.DataInput#readUTF()
    */
@@ -197,7 +197,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public String readUTF() throws IOException {
     return this.fis.readUTF();
   }
-  
+
   /**
    * @see org.apache.commons.vfs2.RandomAccessContent#seek(long)
    */
@@ -205,7 +205,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void seek(final long pos) throws IOException {
     this.fis.seek(pos);
   }
-  
+
   /**
    * @see java.io.DataInput#skipBytes(int)
    */
@@ -213,7 +213,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public int skipBytes(final int n) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#write(byte[])
    */
@@ -221,7 +221,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void write(final byte[] b) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#write(byte[], int, int)
    */
@@ -229,7 +229,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void write(final byte[] b, final int off, final int len) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#write(int)
    */
@@ -237,7 +237,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void write(final int b) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#writeBoolean(boolean)
    */
@@ -245,7 +245,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void writeBoolean(final boolean v) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#writeByte(int)
    */
@@ -253,7 +253,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void writeByte(final int v) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#writeBytes(java.lang.String)
    */
@@ -261,7 +261,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void writeBytes(final String s) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#writeChar(int)
    */
@@ -269,7 +269,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void writeChar(final int v) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#writeChars(java.lang.String)
    */
@@ -277,7 +277,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void writeChars(final String s) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#writeDouble(double)
    */
@@ -285,7 +285,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void writeDouble(final double v) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#writeFloat(float)
    */
@@ -293,7 +293,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void writeFloat(final float v) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#writeInt(int)
    */
@@ -301,7 +301,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void writeInt(final int v) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#writeLong(long)
    */
@@ -309,7 +309,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void writeLong(final long v) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#writeShort(int)
    */
@@ -317,7 +317,7 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void writeShort(final int v) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   /**
    * @see java.io.DataOutput#writeUTF(java.lang.String)
    */
@@ -325,5 +325,5 @@ public class HdfsRandomAccessContent implements RandomAccessContent {
   public void writeUTF(final String s) throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/start/src/test/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoaderTest.java
----------------------------------------------------------------------
diff --git a/start/src/test/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoaderTest.java b/start/src/test/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoaderTest.java
index 3d52832..5adab86 100644
--- a/start/src/test/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoaderTest.java
+++ b/start/src/test/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoaderTest.java
@@ -132,7 +132,8 @@ public class AccumuloReloadingVFSClassLoaderTest {
   //
   // This is caused by the filed being deleted and then readded in the same monitor tick. This causes the file to ultimately register the deletion over any
   // other events.
-  @Test @Ignore
+  @Test
+  @Ignore
   public void testFastDeleteAndReAdd() throws Exception {
     FileObject testDir = vfs.resolveFile(folder1.getRoot().toURI().toString());
     FileObject[] dirContents = testDir.getChildren();
@@ -161,10 +162,10 @@ public class AccumuloReloadingVFSClassLoaderTest {
     // Update the class
     FileUtils.copyURLToFile(this.getClass().getResource("/HelloWorld.jar"), folder1.newFile("HelloWorld.jar"));
 
-    //Wait for the monitor to notice
+    // Wait for the monitor to notice
     // VFS-487 significantly wait to avoid failure
     Thread.sleep(7000);
-    
+
     Class<?> clazz2 = arvcl.getClassLoader().loadClass("test.HelloWorld");
     Object o2 = clazz2.newInstance();
     Assert.assertEquals("Hello World!", o2.toString());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/start/src/test/java/org/apache/accumulo/start/classloader/vfs/ContextManagerTest.java
----------------------------------------------------------------------
diff --git a/start/src/test/java/org/apache/accumulo/start/classloader/vfs/ContextManagerTest.java b/start/src/test/java/org/apache/accumulo/start/classloader/vfs/ContextManagerTest.java
index 65f0292..d1e6813 100644
--- a/start/src/test/java/org/apache/accumulo/start/classloader/vfs/ContextManagerTest.java
+++ b/start/src/test/java/org/apache/accumulo/start/classloader/vfs/ContextManagerTest.java
@@ -60,7 +60,7 @@ public class ContextManagerTest {
     FileUtils.copyURLToFile(this.getClass().getResource("/HelloWorld.jar"), folder2.newFile("HelloWorld.jar"));
 
     uri1 = new File(folder1.getRoot(), "HelloWorld.jar").toURI().toString();
-    uri2 = folder2.getRoot().toURI().toString()+".*";
+    uri2 = folder2.getRoot().toURI().toString() + ".*";
 
   }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/start/src/test/java/org/apache/accumulo/start/classloader/vfs/providers/ReadOnlyHdfsFileProviderTest.java
----------------------------------------------------------------------
diff --git a/start/src/test/java/org/apache/accumulo/start/classloader/vfs/providers/ReadOnlyHdfsFileProviderTest.java b/start/src/test/java/org/apache/accumulo/start/classloader/vfs/providers/ReadOnlyHdfsFileProviderTest.java
index 8873d17..b5cec83 100644
--- a/start/src/test/java/org/apache/accumulo/start/classloader/vfs/providers/ReadOnlyHdfsFileProviderTest.java
+++ b/start/src/test/java/org/apache/accumulo/start/classloader/vfs/providers/ReadOnlyHdfsFileProviderTest.java
@@ -33,12 +33,12 @@ import org.junit.Before;
 import org.junit.Test;
 
 public class ReadOnlyHdfsFileProviderTest extends AccumuloDFSBase {
-  
+
   private static final String TEST_DIR1 = getHdfsUri() + "/test-dir";
   private static final Path DIR1_PATH = new Path("/test-dir");
   private static final String TEST_FILE1 = TEST_DIR1 + "/accumulo-test-1.jar";
   private static final Path FILE1_PATH = new Path(DIR1_PATH, "accumulo-test-1.jar");
-  
+
   private DefaultFileSystemManager manager = null;
   private FileSystem hdfs = null;
 
@@ -49,16 +49,16 @@ public class ReadOnlyHdfsFileProviderTest extends AccumuloDFSBase {
     manager.init();
     this.hdfs = cluster.getFileSystem();
   }
-    
+
   private FileObject createTestFile(FileSystem hdfs) throws IOException {
-    //Create the directory
+    // Create the directory
     hdfs.mkdirs(DIR1_PATH);
     FileObject dir = manager.resolveFile(TEST_DIR1);
     Assert.assertNotNull(dir);
     Assert.assertTrue(dir.exists());
     Assert.assertTrue(dir.getType().equals(FileType.FOLDER));
-    
-    //Create the file in the directory
+
+    // Create the file in the directory
     hdfs.create(FILE1_PATH).close();
     FileObject f = manager.resolveFile(TEST_FILE1);
     Assert.assertNotNull(f);
@@ -66,84 +66,84 @@ public class ReadOnlyHdfsFileProviderTest extends AccumuloDFSBase {
     Assert.assertTrue(f.getType().equals(FileType.FILE));
     return f;
   }
-  
+
   @Test
   public void testInit() throws Exception {
     FileObject fo = manager.resolveFile(TEST_FILE1);
     Assert.assertNotNull(fo);
   }
-  
+
   @Test
   public void testExistsFails() throws Exception {
     FileObject fo = manager.resolveFile(TEST_FILE1);
     Assert.assertNotNull(fo);
     Assert.assertFalse(fo.exists());
   }
-  
+
   @Test
   public void testExistsSucceeds() throws Exception {
     FileObject fo = manager.resolveFile(TEST_FILE1);
     Assert.assertNotNull(fo);
     Assert.assertFalse(fo.exists());
-    
-    //Create the file
+
+    // Create the file
     @SuppressWarnings("unused")
     FileObject f = createTestFile(hdfs);
-    
+
   }
 
-  @Test(expected=UnsupportedOperationException.class)
+  @Test(expected = UnsupportedOperationException.class)
   public void testCanRenameTo() throws Exception {
     FileObject fo = createTestFile(this.hdfs);
     Assert.assertNotNull(fo);
     fo.canRenameTo(fo);
   }
-  
+
   @Test
   public void testDoListChildren() throws Exception {
     FileObject fo = manager.resolveFile(TEST_DIR1);
     Assert.assertNotNull(fo);
     Assert.assertFalse(fo.exists());
 
-    //Create the test file
+    // Create the test file
     FileObject file = createTestFile(hdfs);
     FileObject dir = file.getParent();
-    
+
     FileObject[] children = dir.getChildren();
     Assert.assertTrue(children.length == 1);
     Assert.assertTrue(children[0].getName().equals(file.getName()));
-    
+
   }
-  
+
   @Test
   public void testGetContentSize() throws Exception {
     FileObject fo = manager.resolveFile(TEST_DIR1);
     Assert.assertNotNull(fo);
     Assert.assertFalse(fo.exists());
 
-    //Create the test file
+    // Create the test file
     FileObject file = createTestFile(hdfs);
     Assert.assertEquals(0, file.getContent().getSize());
   }
-  
+
   @Test
   public void testGetInputStream() throws Exception {
     FileObject fo = manager.resolveFile(TEST_DIR1);
     Assert.assertNotNull(fo);
     Assert.assertFalse(fo.exists());
 
-    //Create the test file
+    // Create the test file
     FileObject file = createTestFile(hdfs);
     file.getContent().getInputStream().close();
   }
-  
+
   @Test
   public void testIsHidden() throws Exception {
     FileObject fo = manager.resolveFile(TEST_DIR1);
     Assert.assertNotNull(fo);
     Assert.assertFalse(fo.exists());
 
-    //Create the test file
+    // Create the test file
     FileObject file = createTestFile(hdfs);
     Assert.assertFalse(file.isHidden());
   }
@@ -154,7 +154,7 @@ public class ReadOnlyHdfsFileProviderTest extends AccumuloDFSBase {
     Assert.assertNotNull(fo);
     Assert.assertFalse(fo.exists());
 
-    //Create the test file
+    // Create the test file
     FileObject file = createTestFile(hdfs);
     Assert.assertTrue(file.isReadable());
   }
@@ -165,29 +165,29 @@ public class ReadOnlyHdfsFileProviderTest extends AccumuloDFSBase {
     Assert.assertNotNull(fo);
     Assert.assertFalse(fo.exists());
 
-    //Create the test file
+    // Create the test file
     FileObject file = createTestFile(hdfs);
     Assert.assertFalse(file.isWriteable());
   }
-  
+
   @Test
   public void testLastModificationTime() throws Exception {
     FileObject fo = manager.resolveFile(TEST_DIR1);
     Assert.assertNotNull(fo);
     Assert.assertFalse(fo.exists());
 
-    //Create the test file
+    // Create the test file
     FileObject file = createTestFile(hdfs);
     Assert.assertFalse(-1 == file.getContent().getLastModifiedTime());
   }
-  
+
   @Test
   public void testGetAttributes() throws Exception {
     FileObject fo = manager.resolveFile(TEST_DIR1);
     Assert.assertNotNull(fo);
     Assert.assertFalse(fo.exists());
 
-    //Create the test file
+    // Create the test file
     FileObject file = createTestFile(hdfs);
     Map<String,Object> attributes = file.getContent().getAttributes();
     Assert.assertTrue(attributes.containsKey(HdfsFileAttributes.BLOCK_SIZE.toString()));
@@ -198,14 +198,14 @@ public class ReadOnlyHdfsFileProviderTest extends AccumuloDFSBase {
     Assert.assertTrue(attributes.containsKey(HdfsFileAttributes.OWNER.toString()));
     Assert.assertTrue(attributes.containsKey(HdfsFileAttributes.PERMISSIONS.toString()));
   }
-  
-  @Test(expected=FileSystemException.class)
+
+  @Test(expected = FileSystemException.class)
   public void testRandomAccessContent() throws Exception {
     FileObject fo = manager.resolveFile(TEST_DIR1);
     Assert.assertNotNull(fo);
     Assert.assertFalse(fo.exists());
 
-    //Create the test file
+    // Create the test file
     FileObject file = createTestFile(hdfs);
     file.getContent().getRandomAccessContent(RandomAccessMode.READWRITE).close();
   }
@@ -216,7 +216,7 @@ public class ReadOnlyHdfsFileProviderTest extends AccumuloDFSBase {
     Assert.assertNotNull(fo);
     Assert.assertFalse(fo.exists());
 
-    //Create the test file
+    // Create the test file
     FileObject file = createTestFile(hdfs);
     file.getContent().getRandomAccessContent(RandomAccessMode.READ).close();
   }
@@ -227,20 +227,20 @@ public class ReadOnlyHdfsFileProviderTest extends AccumuloDFSBase {
     Assert.assertNotNull(fo);
     Assert.assertFalse(fo.exists());
 
-    //Create the test file
+    // Create the test file
     FileObject file = createTestFile(hdfs);
-    //Get a handle to the same file
+    // Get a handle to the same file
     FileObject file2 = manager.resolveFile(TEST_FILE1);
     Assert.assertEquals(file, file2);
   }
-  
+
   @After
   public void tearDown() throws Exception {
     if (null != hdfs) {
       hdfs.delete(DIR1_PATH, true);
-      hdfs.close();  
+      hdfs.close();
     }
     manager.close();
   }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
----------------------------------------------------------------------
diff --git a/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java b/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
index 8e2d534..feab493 100644
--- a/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
+++ b/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
@@ -41,7 +41,7 @@ public class AccumuloDFSBase {
   protected static Configuration conf = null;
   protected static DefaultFileSystemManager vfs = null;
   protected static MiniDFSCluster cluster = null;
- 
+
   private static URI HDFS_URI;
 
   protected static URI getHdfsUri() {
@@ -53,7 +53,7 @@ public class AccumuloDFSBase {
     System.setProperty("java.io.tmpdir", System.getProperty("user.dir") + "/target");
     // System.setProperty("org.apache.commons.logging.Log", "org.apache.commons.logging.impl.NoOpLog");
     // Logger.getRootLogger().setLevel(Level.ERROR);
-    
+
     // Put the MiniDFSCluster directory in the target directory
     System.setProperty("test.build.data", "target/build/test/data");
 
@@ -63,7 +63,7 @@ public class AccumuloDFSBase {
 
     conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission());
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024 * 1024); // 1M blocksize
-    
+
     try {
       cluster = new MiniDFSCluster(conf, 1, true, null);
       cluster.waitClusterUp();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/start/src/test/java/test/Test.java
----------------------------------------------------------------------
diff --git a/start/src/test/java/test/Test.java b/start/src/test/java/test/Test.java
index da8cd49..849199f 100644
--- a/start/src/test/java/test/Test.java
+++ b/start/src/test/java/test/Test.java
@@ -17,9 +17,9 @@
 package test;
 
 public interface Test {
-  
+
   public String hello();
-  
+
   public int add();
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/BulkImportDirectory.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/BulkImportDirectory.java b/test/src/main/java/org/apache/accumulo/test/BulkImportDirectory.java
index 8693d03..a0cc26e 100644
--- a/test/src/main/java/org/apache/accumulo/test/BulkImportDirectory.java
+++ b/test/src/main/java/org/apache/accumulo/test/BulkImportDirectory.java
@@ -36,15 +36,14 @@ import com.beust.jcommander.Parameter;
 
 public class BulkImportDirectory {
   static class Opts extends ClientOnRequiredTable {
-    @Parameter(names={"-s","--source"}, description="directory to import from")
+    @Parameter(names = {"-s", "--source"}, description = "directory to import from")
     String source = null;
-    @Parameter(names={"-f","--failures"}, description="directory to copy failures into: will be deleted before the bulk import")
+    @Parameter(names = {"-f", "--failures"}, description = "directory to copy failures into: will be deleted before the bulk import")
     String failures = null;
-    @Parameter(description="<username> <password> <tablename> <sourcedir> <failuredir>")
+    @Parameter(description = "<username> <password> <tablename> <sourcedir> <failuredir>")
     List<String> args = new ArrayList<String>();
   }
-  
-  
+
   public static void main(String[] args) throws IOException, AccumuloException, AccumuloSecurityException, TableNotFoundException {
     final FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
     Opts opts = new Opts();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java b/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
index c113b87..9b99f39 100644
--- a/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
+++ b/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
@@ -21,68 +21,69 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.cli.Help;
+import org.apache.log4j.Logger;
 
 import com.beust.jcommander.Parameter;
-import org.apache.log4j.Logger;
 
 public class CreateRFiles {
-  
+
   private static final Logger log = Logger.getLogger(CreateRFiles.class);
-  
+
   static class Opts extends Help {
-    
-    @Parameter(names="--output", description="the destiation directory")
+
+    @Parameter(names = "--output", description = "the destiation directory")
     String outputDirectory;
-    
-    @Parameter(names="--numThreads", description="number of threads to use when generating files")
+
+    @Parameter(names = "--numThreads", description = "number of threads to use when generating files")
     int numThreads = 4;
-    
-    @Parameter(names="--start", description="the start number for test data")
+
+    @Parameter(names = "--start", description = "the start number for test data")
     long start = 0;
-    
-    @Parameter(names="--end", description="the maximum number for test data")
-    long end = 10*1000*1000;
-    
-    @Parameter(names="--splits", description="the number of splits in the data")
+
+    @Parameter(names = "--end", description = "the maximum number for test data")
+    long end = 10 * 1000 * 1000;
+
+    @Parameter(names = "--splits", description = "the number of splits in the data")
     long numsplits = 4;
   }
-  
+
   public static void main(String[] args) {
     Opts opts = new Opts();
     opts.parseArgs(CreateRFiles.class.getName(), args);
-    
+
     long splitSize = Math.round((opts.end - opts.start) / (double) opts.numsplits);
-    
+
     long currStart = opts.start;
     long currEnd = opts.start + splitSize;
-    
+
     ExecutorService threadPool = Executors.newFixedThreadPool(opts.numThreads);
-    
+
     int count = 0;
     while (currEnd <= opts.end && currStart < currEnd) {
-      
-      final String tia = String.format("--rfile %s/mf%05d --timestamp 1 --size 50 --random 56 --rows %d --start %d --user root", opts.outputDirectory, count, currEnd - currStart, currStart);
-      
+
+      final String tia = String.format("--rfile %s/mf%05d --timestamp 1 --size 50 --random 56 --rows %d --start %d --user root", opts.outputDirectory, count,
+          currEnd - currStart, currStart);
+
       Runnable r = new Runnable() {
-        
+
         @Override
         public void run() {
           try {
             TestIngest.main(tia.split(" "));
           } catch (Exception e) {
-            log.error("Could not run "+TestIngest.class.getName()+".main using the input '"+tia+"'", e);
+            log.error("Could not run " + TestIngest.class.getName() + ".main using the input '" + tia + "'", e);
           }
         }
-        
+
       };
-      
+
       threadPool.execute(r);
-      
+
       count++;
       currStart = currEnd;
       currEnd = Math.min(opts.end, currStart + splitSize);
     }
-    
+
     threadPool.shutdown();
     while (!threadPool.isTerminated())
       try {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/CreateRandomRFile.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/CreateRandomRFile.java b/test/src/main/java/org/apache/accumulo/test/CreateRandomRFile.java
index 5ef1681..ada8504 100644
--- a/test/src/main/java/org/apache/accumulo/test/CreateRandomRFile.java
+++ b/test/src/main/java/org/apache/accumulo/test/CreateRandomRFile.java
@@ -33,21 +33,21 @@ import org.apache.hadoop.io.Text;
 public class CreateRandomRFile {
   private static int num;
   private static String file;
-  
+
   public static byte[] createValue(long rowid, int dataSize) {
     Random r = new Random(rowid);
     byte value[] = new byte[dataSize];
-    
+
     r.nextBytes(value);
-    
+
     // transform to printable chars
     for (int j = 0; j < value.length; j++) {
       value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
     }
-    
+
     return value;
   }
-  
+
   public static void main(String[] args) {
     if (args.length != 2) {
       System.err.println("Usage CreateRandomRFile <filename> <size>");
@@ -56,15 +56,15 @@ public class CreateRandomRFile {
     file = args[0];
     num = Integer.parseInt(args[1]);
     long rands[] = new long[num];
-    
+
     Random r = new Random();
-    
+
     for (int i = 0; i < rands.length; i++) {
       rands[i] = (r.nextLong() & 0x7fffffffffffffffl) % 10000000000l;
     }
-    
+
     Arrays.sort(rands);
-    
+
     Configuration conf = CachedConfiguration.getInstance();
     FileSKVWriter mfw;
     try {
@@ -73,25 +73,25 @@ public class CreateRandomRFile {
     } catch (IOException e) {
       throw new RuntimeException(e);
     }
-    
+
     for (int i = 0; i < rands.length; i++) {
       Text row = new Text(String.format("row_%010d", rands[i]));
       Key key = new Key(row);
-      
+
       Value dv = new Value(createValue(rands[i], 40));
-      
+
       try {
         mfw.append(key, dv);
       } catch (IOException e) {
         throw new RuntimeException(e);
       }
     }
-    
+
     try {
       mfw.close();
     } catch (IOException e) {
       throw new RuntimeException(e);
     }
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java b/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
index 5716311..1ab2a8a 100644
--- a/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
+++ b/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
@@ -27,17 +27,17 @@ import org.apache.hadoop.io.Text;
 
 abstract class MemoryUsageTest {
   abstract void addEntry(int i);
-  
+
   abstract int getEstimatedBytesPerEntry();
-  
+
   abstract void clear();
-  
+
   abstract int getNumPasses();
-  
+
   abstract String getName();
-  
+
   abstract void init();
-  
+
   public void run() {
     System.gc();
     long usedMem = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
@@ -47,53 +47,53 @@ abstract class MemoryUsageTest {
       usedMem = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
       count++;
     }
-    
+
     init();
-    
+
     for (int i = 0; i < getNumPasses(); i++) {
       addEntry(i);
     }
-    
+
     System.gc();
-    
+
     long memSize = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) - usedMem;
-    
+
     double actualBytesPerEntry = memSize / (double) getNumPasses();
     double expectedBytesPerEntry = getEstimatedBytesPerEntry();
     double diff = actualBytesPerEntry - expectedBytesPerEntry;
     double ratio = actualBytesPerEntry / expectedBytesPerEntry * 100;
-    
+
     System.out.printf("%30s | %,10d | %6.2fGB | %6.2f | %6.2f | %6.2f | %6.2f%s%n", getName(), getNumPasses(), memSize / (1024 * 1024 * 1024.0),
         actualBytesPerEntry, expectedBytesPerEntry, diff, ratio, "%");
-    
+
     clear();
-    
+
   }
-  
+
 }
 
 class TextMemoryUsageTest extends MemoryUsageTest {
-  
+
   private int keyLen;
   private int colFamLen;
   private int colQualLen;
   private int dataLen;
   private TreeMap<Text,Value> map;
   private int passes;
-  
+
   TextMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int dataLen) {
     this.keyLen = keyLen;
     this.colFamLen = colFamLen;
     this.colQualLen = colQualLen;
     this.dataLen = dataLen;
     this.passes = passes;
-    
+
   }
-  
+
   void init() {
     map = new TreeMap<Text,Value>();
   }
-  
+
   public void addEntry(int i) {
     Text key = new Text(String.format("%0" + keyLen + "d:%0" + colFamLen + "d:%0" + colQualLen + "d", i, 0, 0).getBytes());
     //
@@ -102,45 +102,45 @@ class TextMemoryUsageTest extends MemoryUsageTest {
       data[j] = (byte) (j % 10 + 65);
     }
     Value value = new Value(data);
-    
+
     map.put(key, value);
-    
+
   }
-  
+
   public void clear() {
     map.clear();
     map = null;
   }
-  
+
   public int getEstimatedBytesPerEntry() {
     return keyLen + colFamLen + colQualLen + dataLen;
   }
-  
+
   int getNumPasses() {
     return passes;
   }
-  
+
   String getName() {
     return "Text " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
   }
-  
+
 }
 
 class InMemoryMapMemoryUsageTest extends MemoryUsageTest {
-  
+
   private int keyLen;
   private int colFamLen;
   private int colQualLen;
   private int colVisLen;
   private int dataLen;
-  
+
   private InMemoryMap imm;
   private Text key;
   private Text colf;
   private Text colq;
   private ColumnVisibility colv;
   private int passes;
-  
+
   InMemoryMapMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int colVisLen, int dataLen) {
     this.keyLen = keyLen;
     this.colFamLen = colFamLen;
@@ -148,68 +148,68 @@ class InMemoryMapMemoryUsageTest extends MemoryUsageTest {
     this.dataLen = dataLen;
     this.passes = passes;
     this.colVisLen = colVisLen;
-    
+
   }
-  
+
   void init() {
     imm = new InMemoryMap(false, "/tmp");
     key = new Text();
-    
+
     colf = new Text(String.format("%0" + colFamLen + "d", 0));
     colq = new Text(String.format("%0" + colQualLen + "d", 0));
     colv = new ColumnVisibility(String.format("%0" + colVisLen + "d", 0));
   }
-  
+
   public void addEntry(int i) {
     key.set(String.format("%0" + keyLen + "d", i));
-    
+
     Mutation m = new Mutation(key);
-    
+
     byte data[] = new byte[dataLen];
     for (int j = 0; j < data.length; j++) {
       data[j] = (byte) (j % 10 + 65);
     }
     Value idata = new Value(data);
-    
+
     m.put(colf, colq, colv, idata);
-    
+
     imm.mutate(Collections.singletonList(m));
-    
+
   }
-  
+
   public int getEstimatedBytesPerEntry() {
     return keyLen + colFamLen + colQualLen + dataLen + 4 + colVisLen;
   }
-  
+
   public void clear() {
     imm = null;
     key = null;
     colf = null;
     colq = null;
   }
-  
+
   int getNumPasses() {
     return passes;
   }
-  
+
   String getName() {
     return "IMM " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
   }
 }
 
 class MutationMemoryUsageTest extends MemoryUsageTest {
-  
+
   private int keyLen;
   private int colFamLen;
   private int colQualLen;
   private int dataLen;
-  
+
   private Mutation[] mutations;
   private Text key;
   private Text colf;
   private Text colq;
   private int passes;
-  
+
   MutationMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int dataLen) {
     this.keyLen = keyLen;
     this.colFamLen = colFamLen;
@@ -217,126 +217,126 @@ class MutationMemoryUsageTest extends MemoryUsageTest {
     this.dataLen = dataLen;
     this.passes = passes;
     mutations = new Mutation[passes];
-    
+
   }
-  
+
   void init() {
     key = new Text();
-    
+
     colf = new Text(String.format("%0" + colFamLen + "d", 0));
     colq = new Text(String.format("%0" + colQualLen + "d", 0));
-    
+
     byte data[] = new byte[dataLen];
     for (int i = 0; i < data.length; i++) {
       data[i] = (byte) (i % 10 + 65);
     }
   }
-  
+
   public void addEntry(int i) {
     key.set(String.format("%0" + keyLen + "d", i));
-    
+
     Mutation m = new Mutation(key);
-    
+
     byte data[] = new byte[dataLen];
     for (int j = 0; j < data.length; j++) {
       data[j] = (byte) (j % 10 + 65);
     }
     Value idata = new Value(data);
-    
+
     m.put(colf, colq, idata);
-    
+
     mutations[i] = m;
   }
-  
+
   public int getEstimatedBytesPerEntry() {
     return keyLen + colFamLen + colQualLen + dataLen;
   }
-  
+
   public void clear() {
     key = null;
     colf = null;
     colq = null;
     mutations = null;
   }
-  
+
   int getNumPasses() {
     return passes;
   }
-  
+
   String getName() {
     return "Mutation " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
   }
 }
 
 class IntObjectMemoryUsageTest extends MemoryUsageTest {
-  
+
   private int passes;
   private Object data[];
-  
+
   static class SimpleObject {
     int d;
-    
+
     SimpleObject(int d) {
       this.d = d;
     }
   }
-  
+
   IntObjectMemoryUsageTest(int numPasses) {
     this.passes = numPasses;
   }
-  
+
   void init() {
     data = new Object[passes];
   }
-  
+
   void addEntry(int i) {
     data[i] = new SimpleObject(i);
-    
+
   }
-  
+
   void clear() {}
-  
+
   int getEstimatedBytesPerEntry() {
     return 4;
   }
-  
+
   String getName() {
     return "int obj";
   }
-  
+
   int getNumPasses() {
     return passes;
   }
-  
+
 }
 
 public class EstimateInMemMapOverhead {
-  
+
   private static void runTest(int numEntries, int keyLen, int colFamLen, int colQualLen, int colVisLen, int dataLen) {
     new IntObjectMemoryUsageTest(numEntries).run();
     new InMemoryMapMemoryUsageTest(numEntries, keyLen, colFamLen, colQualLen, colVisLen, dataLen).run();
     new TextMemoryUsageTest(numEntries, keyLen, colFamLen, colQualLen, dataLen).run();
     new MutationMemoryUsageTest(numEntries, keyLen, colFamLen, colQualLen, dataLen).run();
   }
-  
+
   public static void main(String[] args) {
     runTest(10000, 10, 4, 4, 4, 20);
     runTest(100000, 10, 4, 4, 4, 20);
     runTest(500000, 10, 4, 4, 4, 20);
     runTest(1000000, 10, 4, 4, 4, 20);
     runTest(2000000, 10, 4, 4, 4, 20);
-    
+
     runTest(10000, 20, 5, 5, 5, 500);
     runTest(100000, 20, 5, 5, 5, 500);
     runTest(500000, 20, 5, 5, 5, 500);
     runTest(1000000, 20, 5, 5, 5, 500);
     runTest(2000000, 20, 5, 5, 5, 500);
-    
+
     runTest(10000, 40, 10, 10, 10, 1000);
     runTest(100000, 40, 10, 10, 10, 1000);
     runTest(500000, 40, 10, 10, 10, 1000);
     runTest(1000000, 40, 10, 10, 10, 1000);
     runTest(2000000, 40, 10, 10, 10, 1000);
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/FaultyConditionalWriter.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/FaultyConditionalWriter.java b/test/src/main/java/org/apache/accumulo/test/FaultyConditionalWriter.java
index 7e7480f..673e61d 100644
--- a/test/src/main/java/org/apache/accumulo/test/FaultyConditionalWriter.java
+++ b/test/src/main/java/org/apache/accumulo/test/FaultyConditionalWriter.java
@@ -24,17 +24,16 @@ import java.util.Random;
 import org.apache.accumulo.core.client.ConditionalWriter;
 import org.apache.accumulo.core.data.ConditionalMutation;
 
-
 /**
  * A writer that will sometimes return unknown. When it returns unknown the condition may or may not have been written.
  */
 public class FaultyConditionalWriter implements ConditionalWriter {
-  
+
   private ConditionalWriter cw;
   private double up;
   private Random rand;
   private double wp;
-  
+
   public FaultyConditionalWriter(ConditionalWriter cw, double unknownProbability, double writeProbability) {
     this.cw = cw;
     this.up = unknownProbability;
@@ -46,7 +45,7 @@ public class FaultyConditionalWriter implements ConditionalWriter {
   public Iterator<Result> write(Iterator<ConditionalMutation> mutations) {
     ArrayList<Result> resultList = new ArrayList<Result>();
     ArrayList<ConditionalMutation> writes = new ArrayList<ConditionalMutation>();
-    
+
     while (mutations.hasNext()) {
       ConditionalMutation cm = mutations.next();
       if (rand.nextDouble() <= up && rand.nextDouble() > wp)
@@ -54,13 +53,13 @@ public class FaultyConditionalWriter implements ConditionalWriter {
       else
         writes.add(cm);
     }
-    
+
     if (writes.size() > 0) {
       Iterator<Result> results = cw.write(writes.iterator());
-      
+
       while (results.hasNext()) {
         Result result = results.next();
-        
+
         if (rand.nextDouble() <= up && rand.nextDouble() <= wp)
           result = new Result(Status.UNKNOWN, result.getMutation(), result.getTabletServer());
         resultList.add(result);
@@ -68,14 +67,14 @@ public class FaultyConditionalWriter implements ConditionalWriter {
     }
     return resultList.iterator();
   }
-  
+
   public Result write(ConditionalMutation mutation) {
     return write(Collections.singleton(mutation).iterator()).next();
   }
-  
+
   @Override
   public void close() {
     cw.close();
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/GetMasterStats.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/GetMasterStats.java b/test/src/main/java/org/apache/accumulo/test/GetMasterStats.java
index 5f9ea51..5c2cbf3 100644
--- a/test/src/main/java/org/apache/accumulo/test/GetMasterStats.java
+++ b/test/src/main/java/org/apache/accumulo/test/GetMasterStats.java
@@ -56,7 +56,7 @@ public class GetMasterStats {
     out(0, "Unassigned tablets: %d", stats.unassignedTablets);
     if (stats.badTServers != null && stats.badTServers.size() > 0) {
       out(0, "Bad servers");
-      
+
       for (Entry<String,Byte> entry : stats.badTServers.entrySet()) {
         out(1, "%s: %d", entry.getKey(), (int) entry.getValue());
       }
@@ -120,12 +120,12 @@ public class GetMasterStats {
       }
     }
   }
-  
+
   private static void out(int indent, String string, Object... args) {
     for (int i = 0; i < indent; i++) {
       System.out.print(" ");
     }
     System.out.println(String.format(string, args));
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/IMMLGBenchmark.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/IMMLGBenchmark.java b/test/src/main/java/org/apache/accumulo/test/IMMLGBenchmark.java
index f993ba1..3d86eab 100644
--- a/test/src/main/java/org/apache/accumulo/test/IMMLGBenchmark.java
+++ b/test/src/main/java/org/apache/accumulo/test/IMMLGBenchmark.java
@@ -48,21 +48,21 @@ import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 
 /**
- * 
+ *
  */
 public class IMMLGBenchmark {
   public static void main(String[] args) throws Exception {
     ZooKeeperInstance zki = new ZooKeeperInstance(new ClientConfiguration().withInstance("test16").withZkHosts("localhost"));
     Connector conn = zki.getConnector("root", new PasswordToken("secret"));
-    
+
     int numlg = Integer.parseInt(args[0]);
-    
+
     ArrayList<byte[]> cfset = new ArrayList<byte[]>();
-    
+
     for (int i = 0; i < 32; i++) {
       cfset.add(String.format("%04x", i).getBytes());
     }
-    
+
     Map<String,Stat> stats = new TreeMap<String,Stat>();
 
     for (int i = 0; i < 5; i++) {
@@ -78,14 +78,14 @@ public class IMMLGBenchmark {
 
   private static void runTest(Connector conn, int numlg, ArrayList<byte[]> cfset, Map<String,Stat> stats) throws Exception {
     String table = "immlgb";
-    
+
     try {
       conn.tableOperations().delete(table);
     } catch (TableNotFoundException tnfe) {}
 
     conn.tableOperations().create(table);
     conn.tableOperations().setProperty(table, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "snappy");
-    
+
     setupLocalityGroups(conn, numlg, cfset, table);
 
     addStat(stats, "write", write(conn, cfset, table));
@@ -96,13 +96,13 @@ public class IMMLGBenchmark {
     long t1 = System.currentTimeMillis();
     conn.tableOperations().flush(table, null, null, true);
     long t2 = System.currentTimeMillis();
-    
+
     addStat(stats, "flush", t2 - t1);
   }
-  
+
   private static void addStat(Map<String,Stat> stats, String s, long wt) {
     System.out.println(s + ":" + wt);
-    
+
     if (stats == null)
       return;
 
@@ -113,55 +113,56 @@ public class IMMLGBenchmark {
     }
     stat.addStat(wt);
   }
-  
+
   private static long scan(Connector conn, ArrayList<byte[]> cfset, String table, boolean cq) throws TableNotFoundException {
     Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
-    
+
     if (!cq)
       scanner.fetchColumnFamily(new Text(cfset.get(15)));
     else
       scanner.fetchColumn(new Text(cfset.get(15)), new Text(cfset.get(15)));
 
     long t1 = System.currentTimeMillis();
-    
+
     @SuppressWarnings("unused")
     int count = 0;
-    for (@SuppressWarnings("unused") Entry<Key,Value> entry : scanner) {
+    for (@SuppressWarnings("unused")
+    Entry<Key,Value> entry : scanner) {
       count++;
     }
-    
+
     long t2 = System.currentTimeMillis();
-    
+
     return t2 - t1;
-    
+
   }
-  
+
   private static long write(Connector conn, ArrayList<byte[]> cfset, String table) throws TableNotFoundException, MutationsRejectedException {
     Random rand = new Random();
-    
+
     byte val[] = new byte[50];
-    
+
     BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
-    
+
     long t1 = System.currentTimeMillis();
 
     for (int i = 0; i < 1 << 15; i++) {
       byte[] row = FastFormat.toZeroPaddedString(abs(rand.nextLong()), 16, 16, new byte[0]);
-      
+
       Mutation m = new Mutation(row);
       for (byte[] cf : cfset) {
         byte[] cq = FastFormat.toZeroPaddedString(rand.nextInt(1 << 16), 4, 16, new byte[0]);
         rand.nextBytes(val);
         m.put(cf, cq, val);
       }
-      
+
       bw.addMutation(m);
     }
-    
+
     bw.close();
-    
+
     long t2 = System.currentTimeMillis();
-    
+
     return t2 - t1;
   }
 
@@ -170,7 +171,7 @@ public class IMMLGBenchmark {
     if (numlg > 1) {
       int numCF = cfset.size() / numlg;
       int gNum = 0;
-      
+
       Iterator<byte[]> cfiter = cfset.iterator();
       Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
       while (cfiter.hasNext()) {
@@ -178,19 +179,19 @@ public class IMMLGBenchmark {
         for (int i = 0; i < numCF && cfiter.hasNext(); i++) {
           groupCols.add(new Text(cfiter.next()));
         }
-        
+
         groups.put("lg" + (gNum++), groupCols);
       }
-      
+
       conn.tableOperations().setLocalityGroups(table, groups);
       conn.tableOperations().offline(table);
       UtilWaitThread.sleep(1000);
       conn.tableOperations().online(table);
     }
   }
-  
+
   public static long abs(long l) {
-    l = Math.abs(l);  // abs(Long.MIN_VALUE) == Long.MIN_VALUE... 
+    l = Math.abs(l); // abs(Long.MIN_VALUE) == Long.MIN_VALUE...
     if (l < 0)
       return 0;
     return l;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/ListTables.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/ListTables.java b/test/src/main/java/org/apache/accumulo/test/ListTables.java
index 468b2d5..be8a7d3 100644
--- a/test/src/main/java/org/apache/accumulo/test/ListTables.java
+++ b/test/src/main/java/org/apache/accumulo/test/ListTables.java
@@ -18,8 +18,8 @@ package org.apache.accumulo.test;
 
 import java.util.Map.Entry;
 
-import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.server.cli.ClientOpts;
 
 /**
  * This little program is used by the functional test to get a list of table ids.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java b/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java
index c9d18e1..015cda4 100644
--- a/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/NativeMapConcurrencyTest.java
@@ -27,38 +27,38 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.FastFormat;
 import org.apache.accumulo.tserver.NativeMap;
 import org.apache.hadoop.io.Text;
+import org.apache.log4j.Logger;
 
 import com.beust.jcommander.JCommander;
 import com.beust.jcommander.Parameter;
-import org.apache.log4j.Logger;
 
 public class NativeMapConcurrencyTest {
-  
+
   private static final Logger log = Logger.getLogger(NativeMapConcurrencyTest.class);
-  
+
   private static final byte ROW_PREFIX[] = new byte[] {'r'};
   private static final byte COL_PREFIX[] = new byte[] {'c'};
-  
+
   static Mutation nm(int r) {
     return new Mutation(new Text(FastFormat.toZeroPaddedString(r, 6, 10, ROW_PREFIX)));
   }
-  
+
   private static final Text ET = new Text();
-  
+
   private static void pc(Mutation m, int c, Value v) {
     m.put(new Text(FastFormat.toZeroPaddedString(c, 3, 10, COL_PREFIX)), ET, v);
   }
-  
+
   static NativeMap create(int numRows, int numCols) {
-    
+
     NativeMap nm = new NativeMap();
-    
+
     populate(0, numRows, numCols, nm);
-    
+
     return nm;
-    
+
   }
-  
+
   private static void populate(int start, int numRows, int numCols, NativeMap nm) {
     long t1 = System.currentTimeMillis();
     int mc = 1;
@@ -71,27 +71,27 @@ public class NativeMapConcurrencyTest {
       nm.mutate(m, mc++);
     }
     long t2 = System.currentTimeMillis();
-    
+
     System.out.printf("inserted %,d in %,d %,d %,6.2f%n", (numRows * numCols), (t2 - t1), nm.size(), rate((numRows * numCols), (t2 - t1)));
   }
-  
+
   private static double rate(int num, long ms) {
     return num / (ms / 1000.0);
   }
-  
+
   static class Opts {
-    @Parameter(names="--rows", description="rows", required = true)
+    @Parameter(names = "--rows", description = "rows", required = true)
     int rows = 0;
-    @Parameter(names="--cols", description="cols")
+    @Parameter(names = "--cols", description = "cols")
     int cols = 1;
-    @Parameter(names="--threads", description="threads")
+    @Parameter(names = "--threads", description = "threads")
     int threads = 1;
-    @Parameter(names="--writeThreads", description="write threads")
+    @Parameter(names = "--writeThreads", description = "write threads")
     int writeThreads = 1;
-    @Parameter(names="-help", help=true)
+    @Parameter(names = "-help", help = true)
     boolean help = false;
   }
-  
+
   public static void main(String[] args) {
     Opts opts = new Opts();
     JCommander jc = new JCommander(opts);
@@ -105,86 +105,86 @@ public class NativeMapConcurrencyTest {
     runTest(nm, opts.rows, opts.cols, opts.threads, opts.writeThreads);
     nm.delete();
   }
-  
+
   static class ScanTask implements Runnable {
-    
+
     private NativeMap nm;
-    
+
     ScanTask(NativeMap nm) {
       this.nm = nm;
     }
-    
+
     @Override
     public void run() {
-      
+
       for (int i = 0; i < 10; i++) {
-        
+
         Iterator<Entry<Key,Value>> iter = nm.iterator();
-        
+
         long t1 = System.currentTimeMillis();
-        
+
         int count = 0;
-        
+
         while (iter.hasNext()) {
           count++;
           iter.next();
         }
-        
+
         long t2 = System.currentTimeMillis();
-        
+
         System.out.printf("%d %,d %,d %,d %,d %,6.2f%n", Thread.currentThread().getId(), (t2 - t1), t1, t2, count, rate(count, (t2 - t1)));
       }
     }
-    
+
   }
-  
+
   static class WriteTask implements Runnable {
-    
+
     private int start;
     private int rows;
     private int cols;
     private NativeMap nm;
-    
+
     WriteTask(int start, int rows, int cols, NativeMap nm) {
       this.start = start;
       this.rows = rows;
       this.cols = cols;
       this.nm = nm;
     }
-    
+
     @Override
     public void run() {
       populate(start, rows, cols, nm);
     }
-    
+
   }
-  
+
   private static void runTest(NativeMap nm, int rows, int cols, int numReadThreads, int writeThreads) {
-    
+
     Thread threads[] = new Thread[numReadThreads + writeThreads];
-    
+
     for (int i = 0; i < numReadThreads; i++) {
       threads[i] = new Thread(new ScanTask(nm));
     }
-    
+
     int start = 0;
     for (int i = numReadThreads; i < writeThreads + numReadThreads; i++) {
       threads[i] = new Thread(new WriteTask(start, rows, cols, nm));
       // start += rows;
     }
-    
+
     for (Thread thread : threads) {
       thread.start();
     }
-    
+
     for (Thread thread : threads) {
       try {
         thread.join();
       } catch (InterruptedException e) {
-        log.error("Could not join thread '"+thread.getName()+"'", e);
+        log.error("Could not join thread '" + thread.getName() + "'", e);
       }
     }
-    
+
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java b/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java
index 1e1006b..0285092 100644
--- a/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java
@@ -35,29 +35,29 @@ import org.apache.accumulo.tserver.NativeMap;
 import org.apache.hadoop.io.Text;
 
 public class NativeMapPerformanceTest {
-  
+
   private static final byte ROW_PREFIX[] = new byte[] {'r'};
   private static final byte COL_PREFIX[] = new byte[] {'c'};
-  
+
   static Key nk(int r, int c) {
     return new Key(new Text(FastFormat.toZeroPaddedString(r, 9, 10, ROW_PREFIX)), new Text(FastFormat.toZeroPaddedString(c, 6, 10, COL_PREFIX)));
   }
-  
+
   static Mutation nm(int r) {
     return new Mutation(new Text(FastFormat.toZeroPaddedString(r, 9, 10, ROW_PREFIX)));
   }
-  
+
   static Text ET = new Text();
-  
+
   private static void pc(Mutation m, int c, Value v) {
     m.put(new Text(FastFormat.toZeroPaddedString(c, 6, 10, COL_PREFIX)), ET, Long.MAX_VALUE, v);
   }
-  
+
   static void runPerformanceTest(int numRows, int numCols, int numLookups, String mapType) {
-    
+
     SortedMap<Key,Value> tm = null;
     NativeMap nm = null;
-    
+
     if (mapType.equals("SKIP_LIST"))
       tm = new ConcurrentSkipListMap<Key,Value>();
     else if (mapType.equals("TREE_MAP"))
@@ -66,12 +66,12 @@ public class NativeMapPerformanceTest {
       nm = new NativeMap();
     else
       throw new IllegalArgumentException(" map type must be SKIP_LIST, TREE_MAP, or NATIVE_MAP");
-    
+
     Random rand = new Random(19);
-    
+
     // puts
     long tps = System.currentTimeMillis();
-    
+
     if (nm != null) {
       for (int i = 0; i < numRows; i++) {
         int row = rand.nextInt(1000000000);
@@ -94,9 +94,9 @@ public class NativeMapPerformanceTest {
         }
       }
     }
-    
+
     long tpe = System.currentTimeMillis();
-    
+
     // Iteration
     Iterator<Entry<Key,Value>> iter;
     if (nm != null) {
@@ -104,15 +104,15 @@ public class NativeMapPerformanceTest {
     } else {
       iter = tm.entrySet().iterator();
     }
-    
+
     long tis = System.currentTimeMillis();
-    
+
     while (iter.hasNext()) {
       iter.next();
     }
-    
+
     long tie = System.currentTimeMillis();
-    
+
     rand = new Random(19);
     int rowsToLookup[] = new int[numLookups];
     int colsToLookup[] = new int[numLookups];
@@ -122,13 +122,13 @@ public class NativeMapPerformanceTest {
       for (int j = 0; j < numCols; j++) {
         col = rand.nextInt(1000000);
       }
-      
+
       rowsToLookup[i] = row;
       colsToLookup[i] = col;
     }
-    
+
     // get
-    
+
     long tgs = System.currentTimeMillis();
     if (nm != null) {
       for (int i = 0; i < numLookups; i++) {
@@ -146,51 +146,51 @@ public class NativeMapPerformanceTest {
       }
     }
     long tge = System.currentTimeMillis();
-    
+
     long memUsed = 0;
     if (nm != null) {
       memUsed = nm.getMemoryUsed();
     }
-    
+
     int size = (nm == null ? tm.size() : nm.size());
-    
+
     // delete
     long tds = System.currentTimeMillis();
-    
+
     if (nm != null)
       nm.delete();
-    
+
     long tde = System.currentTimeMillis();
-    
+
     if (tm != null)
       tm.clear();
-    
+
     System.gc();
     System.gc();
     System.gc();
     System.gc();
-    
+
     UtilWaitThread.sleep(3000);
-    
+
     System.out.printf("mapType:%10s   put rate:%,6.2f  scan rate:%,6.2f  get rate:%,6.2f  delete time : %6.2f  mem : %,d%n", "" + mapType, (numRows * numCols)
         / ((tpe - tps) / 1000.0), (size) / ((tie - tis) / 1000.0), numLookups / ((tge - tgs) / 1000.0), (tde - tds) / 1000.0, memUsed);
-    
+
   }
-  
+
   public static void main(String[] args) {
-    
+
     if (args.length != 3) {
       throw new IllegalArgumentException("Usage : " + NativeMapPerformanceTest.class.getName() + " <map type> <rows> <columns>");
     }
-    
+
     String mapType = args[0];
     int rows = Integer.parseInt(args[1]);
     int cols = Integer.parseInt(args[2]);
-    
+
     runPerformanceTest(rows, cols, 10000, mapType);
     runPerformanceTest(rows, cols, 10000, mapType);
     runPerformanceTest(rows, cols, 10000, mapType);
-    
+
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/NativeMapStressTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/NativeMapStressTest.java b/test/src/main/java/org/apache/accumulo/test/NativeMapStressTest.java
index c8e821b..72831d8 100644
--- a/test/src/main/java/org/apache/accumulo/test/NativeMapStressTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/NativeMapStressTest.java
@@ -36,144 +36,144 @@ import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 
 public class NativeMapStressTest {
-  
+
   private static final Logger log = Logger.getLogger(NativeMapStressTest.class);
-  
+
   public static void main(String[] args) {
     testLotsOfMapDeletes(true);
     testLotsOfMapDeletes(false);
     testLotsOfOverwrites();
     testLotsOfGetsAndScans();
   }
-  
+
   private static void put(NativeMap nm, String row, String val, int mc) {
     Mutation m = new Mutation(new Text(row));
     m.put(new Text(), new Text(), Long.MAX_VALUE, new Value(val.getBytes(UTF_8)));
     nm.mutate(m, mc);
   }
-  
+
   private static void testLotsOfGetsAndScans() {
-    
+
     ArrayList<Thread> threads = new ArrayList<Thread>();
-    
+
     final int numThreads = 8;
     final int totalGets = 100000000;
     final int mapSizePerThread = (int) (4000000 / (double) numThreads);
     final int getsPerThread = (int) (totalGets / (double) numThreads);
-    
+
     for (int tCount = 0; tCount < numThreads; tCount++) {
       Runnable r = new Runnable() {
         @Override
         public void run() {
           NativeMap nm = new NativeMap();
-          
+
           Random r = new Random();
-          
+
           OpTimer opTimer = new OpTimer(log, Level.INFO);
-          
+
           opTimer.start("Creating map of size " + mapSizePerThread);
-          
+
           for (int i = 0; i < mapSizePerThread; i++) {
             String row = String.format("r%08d", i);
             String val = row + "v";
             put(nm, row, val, i);
           }
-          
+
           opTimer.stop("Created map of size " + nm.size() + " in %DURATION%");
-          
+
           opTimer.start("Doing " + getsPerThread + " gets()");
-          
+
           for (int i = 0; i < getsPerThread; i++) {
             String row = String.format("r%08d", r.nextInt(mapSizePerThread));
             String val = row + "v";
-            
+
             Value value = nm.get(new Key(new Text(row)));
             if (value == null || !value.toString().equals(val)) {
               log.error("nm.get(" + row + ") failed");
             }
           }
-          
+
           opTimer.stop("Finished " + getsPerThread + " gets in %DURATION%");
-          
+
           int scanned = 0;
-          
+
           opTimer.start("Doing " + getsPerThread + " random iterations");
-          
+
           for (int i = 0; i < getsPerThread; i++) {
             int startRow = r.nextInt(mapSizePerThread);
             String row = String.format("r%08d", startRow);
-            
+
             Iterator<Entry<Key,Value>> iter = nm.iterator(new Key(new Text(row)));
-            
+
             int count = 0;
-            
+
             while (iter.hasNext() && count < 10) {
               String row2 = String.format("r%08d", startRow + count);
               String val2 = row2 + "v";
-              
+
               Entry<Key,Value> entry = iter.next();
               if (!entry.getValue().toString().equals(val2) || !entry.getKey().equals(new Key(new Text(row2)))) {
                 log.error("nm.iter(" + row2 + ") failed row = " + row + " count = " + count + " row2 = " + row + " val2 = " + val2);
               }
-              
+
               count++;
             }
-            
+
             scanned += count;
           }
-          
+
           opTimer.stop("Finished " + getsPerThread + " random iterations (scanned = " + scanned + ") in %DURATION%");
-          
+
           nm.delete();
         }
       };
-      
+
       Thread t = new Thread(r);
       t.start();
-      
+
       threads.add(t);
     }
-    
+
     for (Thread thread : threads) {
       try {
         thread.join();
       } catch (InterruptedException e) {
-        log.error("Could not join thread '"+thread.getName()+"'.", e);
+        log.error("Could not join thread '" + thread.getName() + "'.", e);
         throw new RuntimeException(e);
       }
     }
   }
-  
+
   private static void testLotsOfMapDeletes(final boolean doRemoves) {
     final int numThreads = 8;
     final int rowRange = 10000;
     final int mapsPerThread = 50;
     final int totalInserts = 100000000;
     final int insertsPerMapPerThread = (int) (totalInserts / (double) numThreads / mapsPerThread);
-    
+
     System.out.println("insertsPerMapPerThread " + insertsPerMapPerThread);
-    
+
     ArrayList<Thread> threads = new ArrayList<Thread>();
-    
+
     for (int i = 0; i < numThreads; i++) {
       Runnable r = new Runnable() {
         @Override
         public void run() {
-          
+
           int inserts = 0;
           int removes = 0;
-          
+
           for (int i = 0; i < mapsPerThread; i++) {
-            
+
             NativeMap nm = new NativeMap();
-            
+
             for (int j = 0; j < insertsPerMapPerThread; j++) {
               String row = String.format("r%08d", j % rowRange);
               String val = row + "v";
               put(nm, row, val, j);
               inserts++;
             }
-            
+
             if (doRemoves) {
               Iterator<Entry<Key,Value>> iter = nm.iterator();
               while (iter.hasNext()) {
@@ -182,61 +182,61 @@ public class NativeMapStressTest {
                 removes++;
               }
             }
-            
+
             nm.delete();
           }
-          
+
           System.out.println("inserts " + inserts + " removes " + removes + " " + Thread.currentThread().getName());
         }
       };
-      
+
       Thread t = new Thread(r);
       t.start();
-      
+
       threads.add(t);
     }
-    
+
     for (Thread thread : threads) {
       try {
         thread.join();
       } catch (InterruptedException e) {
-        log.error("Could not join thread '"+thread.getName()+"'.", e);
+        log.error("Could not join thread '" + thread.getName() + "'.", e);
         throw new RuntimeException(e);
       }
     }
   }
-  
+
   private static void testLotsOfOverwrites() {
     final Map<Integer,NativeMap> nativeMaps = new HashMap<Integer,NativeMap>();
-    
+
     int numThreads = 8;
     final int insertsPerThread = (int) (100000000 / (double) numThreads);
     final int rowRange = 10000;
     final int numMaps = 50;
-    
+
     ArrayList<Thread> threads = new ArrayList<Thread>();
-    
+
     for (int i = 0; i < numThreads; i++) {
       Runnable r = new Runnable() {
         @Override
         public void run() {
           Random r = new Random();
           int inserts = 0;
-          
+
           for (int i = 0; i < insertsPerThread / 100.0; i++) {
             int map = r.nextInt(numMaps);
-            
+
             NativeMap nm;
-            
+
             synchronized (nativeMaps) {
               nm = nativeMaps.get(map);
               if (nm == null) {
                 nm = new NativeMap();
                 nativeMaps.put(map, nm);
-                
+
               }
             }
-            
+
             synchronized (nm) {
               for (int j = 0; j < 100; j++) {
                 String row = String.format("r%08d", r.nextInt(rowRange));
@@ -246,30 +246,30 @@ public class NativeMapStressTest {
               }
             }
           }
-          
+
           System.out.println("inserts " + inserts + " " + Thread.currentThread().getName());
         }
       };
-      
+
       Thread t = new Thread(r);
       t.start();
-      
+
       threads.add(t);
     }
-    
+
     for (Thread thread : threads) {
       try {
         thread.join();
       } catch (InterruptedException e) {
-        log.error("Could not join thread '"+thread.getName()+"'.", e);
+        log.error("Could not join thread '" + thread.getName() + "'.", e);
         throw new RuntimeException(e);
       }
     }
-    
+
     Set<Entry<Integer,NativeMap>> es = nativeMaps.entrySet();
     for (Entry<Integer,NativeMap> entry : es) {
       entry.getValue().delete();
     }
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/NullBatchWriter.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/NullBatchWriter.java b/test/src/main/java/org/apache/accumulo/test/NullBatchWriter.java
index d260c95..3bb2f7f 100644
--- a/test/src/main/java/org/apache/accumulo/test/NullBatchWriter.java
+++ b/test/src/main/java/org/apache/accumulo/test/NullBatchWriter.java
@@ -21,10 +21,10 @@ import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.data.Mutation;
 
 public class NullBatchWriter implements BatchWriter {
-  
+
   private int mutationsAdded;
   private long startTime;
-  
+
   @Override
   public void addMutation(Mutation m) throws MutationsRejectedException {
     if (mutationsAdded == 0) {
@@ -33,23 +33,23 @@ public class NullBatchWriter implements BatchWriter {
     mutationsAdded++;
     m.numBytes();
   }
-  
+
   @Override
   public void addMutations(Iterable<Mutation> iterable) throws MutationsRejectedException {
     for (Mutation mutation : iterable) {
       addMutation(mutation);
     }
   }
-  
+
   @Override
   public void close() throws MutationsRejectedException {
     flush();
   }
-  
+
   @Override
   public void flush() throws MutationsRejectedException {
     System.out.printf("Mutation add rate : %,6.2f mutations/sec%n", mutationsAdded / ((System.currentTimeMillis() - startTime) / 1000.0));
     mutationsAdded = 0;
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java b/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
index b6a9bda..713f3ae 100644
--- a/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
+++ b/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
@@ -41,43 +41,43 @@ import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.hadoop.io.Text;
+import org.apache.log4j.Logger;
 
 import com.beust.jcommander.Parameter;
-import org.apache.log4j.Logger;
 
 public class QueryMetadataTable {
   private static final Logger log = Logger.getLogger(QueryMetadataTable.class);
-  
+
   private static String principal;
   private static AuthenticationToken token;
-  
+
   static String location;
-  
+
   static class MDTQuery implements Runnable {
     private Text row;
-    
+
     MDTQuery(Text row) {
       this.row = row;
     }
-    
+
     @Override
     public void run() {
       try {
         KeyExtent extent = new KeyExtent(row, (Text) null);
-        
+
         Connector connector = HdfsZooInstance.getInstance().getConnector(principal, token);
         Scanner mdScanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
         Text row = extent.getMetadataEntry();
-        
+
         mdScanner.setRange(new Range(row));
-        
+
         for (Entry<Key,Value> entry : mdScanner) {
           if (!entry.getKey().getRow().equals(row))
             break;
         }
-        
+
       } catch (TableNotFoundException e) {
-        log.error("Table '"+MetadataTable.NAME+"' not found.", e);
+        log.error("Table '" + MetadataTable.NAME + "' not found.", e);
         throw new RuntimeException(e);
       } catch (AccumuloException e) {
         log.error("AccumuloException encountered.", e);
@@ -88,28 +88,28 @@ public class QueryMetadataTable {
       }
     }
   }
-  
+
   static class Opts extends ClientOpts {
     @Parameter(names = "--numQueries", description = "number of queries to run")
     int numQueries = 1;
     @Parameter(names = "--numThreads", description = "number of threads used to run the queries")
     int numThreads = 1;
   }
-  
+
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     Opts opts = new Opts();
     ScannerOpts scanOpts = new ScannerOpts();
     opts.parseArgs(QueryMetadataTable.class.getName(), args, scanOpts);
-    
+
     Connector connector = opts.getConnector();
     Scanner scanner = connector.createScanner(MetadataTable.NAME, opts.auths);
     scanner.setBatchSize(scanOpts.scanBatchSize);
     Text mdrow = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), null));
-    
+
     HashSet<Text> rowSet = new HashSet<Text>();
-    
+
     int count = 0;
-    
+
     for (Entry<Key,Value> entry : scanner) {
       System.out.print(".");
       if (count % 72 == 0) {
@@ -119,37 +119,37 @@ public class QueryMetadataTable {
         System.out.println(entry.getKey() + " " + entry.getValue());
         location = entry.getValue().toString();
       }
-      
+
       if (!entry.getKey().getRow().toString().startsWith(MetadataTable.ID))
         rowSet.add(entry.getKey().getRow());
       count++;
     }
-    
+
     System.out.printf(" %,d%n", count);
-    
+
     ArrayList<Text> rows = new ArrayList<Text>(rowSet);
-    
+
     Random r = new Random();
-    
+
     ExecutorService tp = Executors.newFixedThreadPool(opts.numThreads);
-    
+
     long t1 = System.currentTimeMillis();
-    
+
     for (int i = 0; i < opts.numQueries; i++) {
       int index = r.nextInt(rows.size());
       MDTQuery mdtq = new MDTQuery(rows.get(index));
       tp.submit(mdtq);
     }
-    
+
     tp.shutdown();
-    
+
     try {
       tp.awaitTermination(1, TimeUnit.HOURS);
     } catch (InterruptedException e) {
       log.error("Failed while awaiting the ExcecutorService to terminate.", e);
       throw new RuntimeException(e);
     }
-    
+
     long t2 = System.currentTimeMillis();
     double delta = (t2 - t1) / 1000.0;
     System.out.println("time : " + delta + "  queries per sec : " + (opts.numQueries / delta));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java b/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java
index 7b373c4..2615ae2 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java
@@ -40,7 +40,7 @@ import com.beust.jcommander.Parameter;
 
 public class TestBinaryRows {
   private static final long byteOnes;
-  
+
   static {
     // safely build Byte.SIZE number of 1s as a long; not that I think Byte.SIZE will ever be anything but 8, but just for fun
     long b = 1;
@@ -48,51 +48,51 @@ public class TestBinaryRows {
       b |= (1L << i);
     byteOnes = b;
   }
-  
+
   static byte[] encodeLong(long l) {
     byte[] ba = new byte[Long.SIZE / Byte.SIZE];
-    
+
     // parse long into a sequence of bytes
     for (int i = 0; i < ba.length; ++i)
       ba[i] = (byte) (byteOnes & (l >>> (Byte.SIZE * (ba.length - i - 1))));
-    
+
     return ba;
   }
-  
+
   static long decodeLong(byte ba[]) {
     // validate byte array
     if (ba.length > Long.SIZE / Byte.SIZE)
       throw new IllegalArgumentException("Byte array of size " + ba.length + " is too big to hold a long");
-    
+
     // build the long from the bytes
     long l = 0;
     for (int i = 0; i < ba.length; ++i)
       l |= (byteOnes & ba[i]) << (Byte.SIZE * (ba.length - i - 1));
-    
+
     return l;
   }
-  
+
   public static class Opts extends ClientOnRequiredTable {
-    @Parameter(names="--mode", description="either 'ingest', 'delete', 'randomLookups', 'split', 'verify', 'verifyDeleted'", required=true)
+    @Parameter(names = "--mode", description = "either 'ingest', 'delete', 'randomLookups', 'split', 'verify', 'verifyDeleted'", required = true)
     public String mode;
-    @Parameter(names="--start", description="the lowest numbered row")
+    @Parameter(names = "--start", description = "the lowest numbered row")
     public long start = 0;
-    @Parameter(names="--count", description="number of rows to ingest", required=true)
+    @Parameter(names = "--count", description = "number of rows to ingest", required = true)
     public long num = 0;
   }
-  
+
   public static void runTest(Connector connector, Opts opts, BatchWriterOpts bwOpts, ScannerOpts scanOpts) throws Exception {
-    
+
     final Text CF = new Text("cf"), CQ = new Text("cq");
     final byte[] CF_BYTES = "cf".getBytes(UTF_8), CQ_BYTES = "cq".getBytes(UTF_8);
     if (opts.mode.equals("ingest") || opts.mode.equals("delete")) {
       BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
       boolean delete = opts.mode.equals("delete");
-      
+
       for (long i = 0; i < opts.num; i++) {
         byte[] row = encodeLong(i + opts.start);
         String value = "" + (i + opts.start);
-        
+
         Mutation m = new Mutation(new Text(row));
         if (delete) {
           m.putDelete(CF, CQ);
@@ -101,7 +101,7 @@ public class TestBinaryRows {
         }
         bw.addMutation(m);
       }
-      
+
       bw.close();
     } else if (opts.mode.equals("verifyDeleted")) {
       Scanner s = connector.createScanner(opts.getTableName(), opts.auths);
@@ -110,22 +110,22 @@ public class TestBinaryRows {
       Key stopKey = new Key(encodeLong(opts.start + opts.num - 1), CF_BYTES, CQ_BYTES, new byte[0], 0);
       s.setBatchSize(50000);
       s.setRange(new Range(startKey, stopKey));
-      
+
       for (Entry<Key,Value> entry : s) {
         throw new Exception("ERROR : saw entries in range that should be deleted ( first value : " + entry.getValue().toString() + ")");
       }
-      
+
     } else if (opts.mode.equals("verify")) {
       long t1 = System.currentTimeMillis();
-      
+
       Scanner s = connector.createScanner(opts.getTableName(), opts.auths);
       Key startKey = new Key(encodeLong(opts.start), CF_BYTES, CQ_BYTES, new byte[0], Long.MAX_VALUE);
       Key stopKey = new Key(encodeLong(opts.start + opts.num - 1), CF_BYTES, CQ_BYTES, new byte[0], 0);
       s.setBatchSize(scanOpts.scanBatchSize);
       s.setRange(new Range(startKey, stopKey));
-      
+
       long i = opts.start;
-      
+
       for (Entry<Key,Value> e : s) {
         Key k = e.getKey();
         Value v = e.getValue();
@@ -134,81 +134,81 @@ public class TestBinaryRows {
 
         i++;
       }
-      
+
       if (i != opts.start + opts.num) {
         throw new Exception("ERROR : did not see expected number of rows, saw " + (i - opts.start) + " expected " + opts.num);
       }
-      
+
       long t2 = System.currentTimeMillis();
-      
+
       System.out.printf("time : %9.2f secs%n", ((t2 - t1) / 1000.0));
       System.out.printf("rate : %9.2f entries/sec%n", opts.num / ((t2 - t1) / 1000.0));
-      
+
     } else if (opts.mode.equals("randomLookups")) {
       int numLookups = 1000;
-      
+
       Random r = new Random();
-      
+
       long t1 = System.currentTimeMillis();
-      
+
       for (int i = 0; i < numLookups; i++) {
         long row = ((r.nextLong() & 0x7fffffffffffffffl) % opts.num) + opts.start;
-        
+
         Scanner s = connector.createScanner(opts.getTableName(), opts.auths);
         s.setBatchSize(scanOpts.scanBatchSize);
         Key startKey = new Key(encodeLong(row), CF_BYTES, CQ_BYTES, new byte[0], Long.MAX_VALUE);
         Key stopKey = new Key(encodeLong(row), CF_BYTES, CQ_BYTES, new byte[0], 0);
         s.setRange(new Range(startKey, stopKey));
-        
+
         Iterator<Entry<Key,Value>> si = s.iterator();
-        
+
         if (si.hasNext()) {
           Entry<Key,Value> e = si.next();
           Key k = e.getKey();
           Value v = e.getValue();
-          
+
           checkKeyValue(row, k, v);
-          
+
           if (si.hasNext()) {
             throw new Exception("ERROR : lookup on " + row + " returned more than one result ");
           }
-          
+
         } else {
           throw new Exception("ERROR : lookup on " + row + " failed ");
         }
       }
-      
+
       long t2 = System.currentTimeMillis();
-      
+
       System.out.printf("time    : %9.2f secs%n", ((t2 - t1) / 1000.0));
       System.out.printf("lookups : %9d keys%n", numLookups);
       System.out.printf("rate    : %9.2f lookups/sec%n", numLookups / ((t2 - t1) / 1000.0));
-      
+
     } else if (opts.mode.equals("split")) {
       TreeSet<Text> splits = new TreeSet<Text>();
       int shift = (int) opts.start;
       int count = (int) opts.num;
-      
+
       for (long i = 0; i < count; i++) {
         long splitPoint = i << shift;
-        
+
         splits.add(new Text(encodeLong(splitPoint)));
         System.out.printf("added split point 0x%016x  %,12d%n", splitPoint, splitPoint);
       }
-      
+
       connector.tableOperations().create(opts.getTableName());
       connector.tableOperations().addSplits(opts.getTableName(), splits);
-      
+
     } else {
       throw new Exception("ERROR : " + opts.mode + " is not a valid operation.");
     }
   }
-  
+
   private static void checkKeyValue(long expected, Key k, Value v) throws Exception {
     if (expected != decodeLong(TextUtil.getBytes(k.getRow()))) {
       throw new Exception("ERROR : expected row " + expected + " saw " + decodeLong(TextUtil.getBytes(k.getRow())));
     }
-    
+
     if (!v.toString().equals("" + expected)) {
       throw new Exception("ERROR : expected value " + expected + " saw " + v.toString());
     }
@@ -219,7 +219,7 @@ public class TestBinaryRows {
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     ScannerOpts scanOpts = new ScannerOpts();
     opts.parseArgs(TestBinaryRows.class.getName(), args, scanOpts, bwOpts);
-    
+
     try {
       runTest(opts.getConnector(), opts, bwOpts, scanOpts);
     } catch (Exception e) {


Mime
View raw message