accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vi...@apache.org
Subject svn commit: r1433166 [9/20] - in /accumulo/branches/ACCUMULO-259: ./ assemble/ assemble/platform/ assemble/scripts/ assemble/scripts/init.d/ bin/ conf/examples/1GB/native-standalone/ conf/examples/1GB/standalone/ conf/examples/2GB/native-standalone/ co...
Date Mon, 14 Jan 2013 22:03:34 GMT
Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/admin/FindMaxTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/admin/FindMaxTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/admin/FindMaxTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/admin/FindMaxTest.java Mon Jan 14 22:03:24 2013
@@ -49,7 +49,7 @@ public class FindMaxTest extends TestCas
   public void test1() throws Exception {
     MockInstance mi = new MockInstance();
     
-    Connector conn = mi.getConnector("root", "foo");
+    Connector conn = mi.getConnector("root", "");
     conn.tableOperations().create("foo");
     
     BatchWriter bw = conn.createBatchWriter("foo", new BatchWriterConfig());

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/admin/TableOperationsHelperTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/admin/TableOperationsHelperTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/admin/TableOperationsHelperTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/admin/TableOperationsHelperTest.java Mon Jan 14 22:03:24 2013
@@ -112,6 +112,7 @@ public class TableOperationsHelperTest {
     public void rename(String oldTableName, String newTableName) throws AccumuloSecurityException, TableNotFoundException, AccumuloException,
         TableExistsException {}
     
+    @Deprecated
     @Override
     public void flush(String tableName) throws AccumuloException, AccumuloSecurityException {}
     

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormatTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormatTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormatTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormatTest.java Mon Jan 14 22:03:24 2013
@@ -27,9 +27,8 @@ import org.apache.accumulo.core.conf.Pro
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.ContextFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.junit.After;
@@ -37,13 +36,13 @@ import org.junit.Before;
 import org.junit.Test;
 
 public class AccumuloFileOutputFormatTest {
-  static JobContext job;
+  static Job job;
   static TaskAttemptContext tac;
   static Path f = null;
   
   @Before
-  public void setup() {
-    job = ContextFactory.createJobContext();
+  public void setup() throws IOException {
+    job = new Job();
     
     Path file = new Path("target/");
     f = new Path(file, "_temporary");
@@ -88,9 +87,24 @@ public class AccumuloFileOutputFormatTes
   }
   
   @Test
+  public void writeBadVisibility() throws IOException, InterruptedException {
+    AccumuloFileOutputFormat afof = new AccumuloFileOutputFormat();
+    RecordWriter<Key,Value> rw = afof.getRecordWriter(tac);
+    
+    Path file = afof.getDefaultWorkFile(tac, ".rf");
+
+    rw.write(new Key("r1", "cf1", "cq1", "A&B"), new Value("".getBytes()));
+    rw.write(new Key("r1", "cf1", "cq2", "A&B"), new Value("".getBytes()));
+    try {
+      rw.write(new Key("r1", "cf1", "cq2", "A&"), new Value("".getBytes()));
+      assertFalse(true);
+    } catch (Exception e) {}
+    
+    file.getFileSystem(tac.getConfiguration()).delete(file.getParent(), true);
+  }
+
+  @Test
   public void validateConfiguration() throws IOException, InterruptedException {
-    Configuration conf = job.getConfiguration();
-    AccumuloConfiguration acuconf = AccumuloConfiguration.getDefaultConfiguration();
     
     int a = 7;
     long b = 300l;
@@ -98,17 +112,18 @@ public class AccumuloFileOutputFormatTes
     long d = 10l;
     String e = "type";
     
-    AccumuloFileOutputFormat.setReplication(conf, a);
-    AccumuloFileOutputFormat.setDFSBlockSize(conf, b);
-    AccumuloFileOutputFormat.setCompressedBlockSize(conf, c);
-    AccumuloFileOutputFormat.setCompressedBlockSizeIndex(conf, d);
-    AccumuloFileOutputFormat.setCompressionType(conf, e);
-    
-    assertEquals(a, conf.getInt(Property.TABLE_FILE_REPLICATION.getKey(), acuconf.getCount(Property.TABLE_FILE_REPLICATION)));
-    assertEquals(b, conf.getLong(Property.TABLE_FILE_BLOCK_SIZE.getKey(), acuconf.getMemoryInBytes(Property.TABLE_FILE_BLOCK_SIZE)));
-    assertEquals(c, conf.getLong(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), acuconf.getMemoryInBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE)));
-    assertEquals(d,
-        conf.getLong(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey(), acuconf.getMemoryInBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX)));
-    assertEquals(e, conf.get(Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), acuconf.get(Property.TABLE_FILE_COMPRESSION_TYPE)));
+    AccumuloFileOutputFormat.setReplication(job, a);
+    AccumuloFileOutputFormat.setFileBlockSize(job, b);
+    AccumuloFileOutputFormat.setDataBlockSize(job, c);
+    AccumuloFileOutputFormat.setIndexBlockSize(job, d);
+    AccumuloFileOutputFormat.setCompressionType(job, e);
+    
+    AccumuloConfiguration acuconf = AccumuloFileOutputFormat.getAccumuloConfiguration(job);
+    
+    assertEquals(a, acuconf.getCount(Property.TABLE_FILE_REPLICATION));
+    assertEquals(b, acuconf.getMemoryInBytes(Property.TABLE_FILE_BLOCK_SIZE));
+    assertEquals(c, acuconf.getMemoryInBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE));
+    assertEquals(d, acuconf.getMemoryInBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX));
+    assertEquals(e, acuconf.get(Property.TABLE_FILE_COMPRESSION_TYPE));
   }
 }

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitionerTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitionerTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitionerTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitionerTest.java Mon Jan 14 22:03:24 2013
@@ -18,9 +18,10 @@ package org.apache.accumulo.core.client.
 
 import static org.junit.Assert.assertTrue;
 
-import org.apache.accumulo.core.util.ContextFactory;
+import java.io.IOException;
+
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.Job;
 import org.junit.Test;
 
 public class RangePartitionerTest {
@@ -28,7 +29,7 @@ public class RangePartitionerTest {
   private static Text[] cutArray = new Text[] {new Text("A"), new Text("B"), new Text("C")};
   
   @Test
-  public void testNoSubBins() {
+  public void testNoSubBins() throws IOException {
     for (int i = -2; i < 2; ++i) {
       checkExpectedBins(i, new String[] {"A", "B", "C"}, new int[] {0, 1, 2});
       checkExpectedBins(i, new String[] {"C", "A", "B"}, new int[] {2, 0, 1});
@@ -37,7 +38,7 @@ public class RangePartitionerTest {
   }
   
   @Test
-  public void testSubBins() {
+  public void testSubBins() throws IOException {
     checkExpectedRangeBins(2, new String[] {"A", "B", "C"}, new int[] {1, 3, 5});
     checkExpectedRangeBins(2, new String[] {"C", "A", "B"}, new int[] {5, 1, 3});
     checkExpectedRangeBins(2, new String[] {"", "AA", "BB", "CC"}, new int[] {1, 3, 5, 7});
@@ -51,15 +52,15 @@ public class RangePartitionerTest {
     checkExpectedRangeBins(10, new String[] {"", "AA", "BB", "CC"}, new int[] {9, 19, 29, 39});
   }
   
-  private RangePartitioner prepPartitioner(int numSubBins) {
-    JobContext job = ContextFactory.createJobContext();
+  private RangePartitioner prepPartitioner(int numSubBins) throws IOException {
+    Job job = new Job();
     RangePartitioner.setNumSubBins(job, numSubBins);
     RangePartitioner rp = new RangePartitioner();
     rp.setConf(job.getConfiguration());
     return rp;
   }
   
-  private void checkExpectedRangeBins(int numSubBins, String[] strings, int[] rangeEnds) {
+  private void checkExpectedRangeBins(int numSubBins, String[] strings, int[] rangeEnds) throws IOException {
     assertTrue(strings.length == rangeEnds.length);
     for (int i = 0; i < strings.length; ++i) {
       int endRange = rangeEnds[i];
@@ -70,7 +71,7 @@ public class RangePartitionerTest {
     }
   }
   
-  private void checkExpectedBins(int numSubBins, String[] strings, int[] bins) {
+  private void checkExpectedBins(int numSubBins, String[] strings, int[] bins) throws IOException {
     assertTrue(strings.length == bins.length);
     for (int i = 0; i < strings.length; ++i) {
       int bin = bins[i], part = prepPartitioner(numSubBins).findPartition(new Text(strings[i]), cutArray, numSubBins);

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mock/MockConnectorTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mock/MockConnectorTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mock/MockConnectorTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mock/MockConnectorTest.java Mon Jan 14 22:03:24 2013
@@ -16,17 +16,15 @@
  */
 package org.apache.accumulo.core.client.mock;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertFalse;
-import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.Map.Entry;
 import java.util.Random;
 
-import junit.framework.Assert;
-
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -85,7 +83,9 @@ public class MockConnectorTest {
   @Test
   public void testChangeAuths() throws Exception {
     Connector c = new MockConnector("root", new MockInstance());
-    c.securityOperations().createUser("greg", new byte[] {}, new Authorizations("A", "B", "C"));
+    c.securityOperations().createUser("greg", new byte[] {});
+    assertTrue(c.securityOperations().getUserAuthorizations("greg").isEmpty());
+    c.securityOperations().changeUserAuthorizations("greg", new Authorizations("A".getBytes()));
     assertTrue(c.securityOperations().getUserAuthorizations("greg").contains("A".getBytes()));
     c.securityOperations().changeUserAuthorizations("greg", new Authorizations("X", "Y", "Z"));
     assertTrue(c.securityOperations().getUserAuthorizations("greg").contains("X".getBytes()));
@@ -343,8 +343,8 @@ public class MockConnectorTest {
       AccumuloSecurityException{
     String name = "an-interesting-instance-name";
     Instance mockInstance = new MockInstance(name);
-    Assert.assertEquals(mockInstance, mockInstance.getConnector("foo", "bar").getInstance());
-    Assert.assertEquals(name, mockInstance.getConnector("foo","bar").getInstance().getInstanceName());
+    assertEquals(mockInstance, mockInstance.getConnector("foo", "bar").getInstance());
+    assertEquals(name, mockInstance.getConnector("foo","bar").getInstance().getInstanceName());
   }
 
 }

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mock/MockTableOperationsTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mock/MockTableOperationsTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mock/MockTableOperationsTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/client/mock/MockTableOperationsTest.java Mon Jan 14 22:03:24 2013
@@ -58,6 +58,7 @@ import org.apache.accumulo.core.util.Pai
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -249,7 +250,7 @@ public class MockTableOperationsTest {
         testFiles.importPath.toString(), testFiles.failurePath.toString(),
         false);
   }
-
+  
   @Test(expected = IOException.class)
   public void testFailsWithNonEmptyFailureDirectory() throws Throwable {
     Instance instance = new MockInstance("foo");
@@ -264,4 +265,26 @@ public class MockTableOperationsTest {
         false);
   }
   
+  @Test
+  public void testDeleteRows() throws Exception {
+    Instance instance = new MockInstance("rows");
+    Connector connector = instance.getConnector("user", "foo");
+    TableOperations to = connector.tableOperations();
+    to.create("test");
+    BatchWriter bw = connector.createBatchWriter("test", new BatchWriterConfig());
+    for (int r = 0; r < 20; r++) {
+      Mutation m = new Mutation("" + r);
+      for (int c = 0; c < 5; c++) {
+        m.put(new Text("cf"), new Text("" + c), new Value(("" + c).getBytes()));
+      }
+      bw.addMutation(m);
+    }
+    bw.flush();
+    to.deleteRows("test", new Text("1"), new Text("2"));
+    Scanner s = connector.createScanner("test", Constants.NO_AUTHS);
+    for (Entry<Key, Value> entry : s) {
+      Assert.assertTrue(entry.getKey().getRow().toString().charAt(0) != '1');
+    }
+  }
+  
 }

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/conf/PropertyTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/conf/PropertyTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/conf/PropertyTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/conf/PropertyTest.java Mon Jan 14 22:03:24 2013
@@ -92,5 +92,11 @@ public class PropertyTest {
     typeCheckValidFormat(PropertyType.HOSTLIST, "localhost", "server1,server2,server3", "server1:1111,server2:3333", "localhost:1111", "server2:1111",
         "www.server", "www.server:1111", "www.server.com", "www.server.com:111");
     typeCheckInvalidFormat(PropertyType.HOSTLIST, ":111", "local host");
+    
+    typeCheckValidFormat(PropertyType.ABSOLUTEPATH, "/foo", "/foo/c", "/");
+    // in hadoop 2.0 Path only normalizes Windows paths properly when run on a Windows system
+    // this makes the following checks fail
+    // typeCheckValidFormat(PropertyType.ABSOLUTEPATH, "d:\\foo12", "c:\\foo\\g", "c:\\foo\\c", "c:\\");
+    typeCheckInvalidFormat(PropertyType.ABSOLUTEPATH, "foo12", "foo/g", "foo\\c");
   }
 }

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java Mon Jan 14 22:03:24 2013
@@ -30,6 +30,29 @@ import org.apache.accumulo.core.security
 import org.apache.hadoop.io.Text;
 
 public class MutationTest extends TestCase {
+  
+  private static String toHexString(byte[] ba) {
+    StringBuilder str = new StringBuilder();
+    for (int i = 0; i < ba.length; i++) {
+      str.append(String.format("%x", ba[i]));
+    }
+    return str.toString();
+  }
+
+  /* Test constructing a Mutation using a byte buffer. The byte array
+   * returned as the row is converted to a hexadecimal string for easy
+   * comparision.
+   */
+  public void testByteConstructor() {
+    Mutation m = new Mutation("0123456789".getBytes());
+    assertEquals("30313233343536373839", toHexString(m.getRow()));
+  }
+  
+  public void testLimitedByteConstructor() {
+    Mutation m = new Mutation("0123456789".getBytes(), 2, 5);
+    assertEquals("3233343536", toHexString(m.getRow()));
+  }
+  
   public void test1() {
     Mutation m = new Mutation(new Text("r1"));
     m.put(new Text("cf1"), new Text("cq1"), new Value("v1".getBytes()));
@@ -185,7 +208,7 @@ public class MutationTest extends TestCa
   }
   
   public void testPutsString() {
-    Mutation m = new Mutation(new Text("r1"));
+    Mutation m = new Mutation("r1");
     
     m.put("cf1", "cq1", nv("v1"));
     m.put("cf2", "cq2", new ColumnVisibility("cv2"), nv("v2"));
@@ -216,7 +239,7 @@ public class MutationTest extends TestCa
   }
   
   public void testPutsStringString() {
-    Mutation m = new Mutation(new Text("r1"));
+    Mutation m = new Mutation("r1");
     
     m.put("cf1", "cq1", "v1");
     m.put("cf2", "cq2", new ColumnVisibility("cv2"), "v2");
@@ -229,6 +252,7 @@ public class MutationTest extends TestCa
     m.putDelete("cf8", "cq8", new ColumnVisibility("cv8"), 8l);
     
     assertEquals(8, m.size());
+    assertEquals("r1", new String(m.getRow()));
     
     List<ColumnUpdate> updates = m.getUpdates();
     
@@ -246,6 +270,37 @@ public class MutationTest extends TestCa
     assertEquals(updates.get(7), "cf8", "cq8", "cv8", 8l, true, true, "");
   }
   
+  public void testByteArrays() {
+    Mutation m = new Mutation("r1".getBytes());
+    
+    m.put("cf1".getBytes(), "cq1".getBytes(), "v1".getBytes());
+    m.put("cf2".getBytes(), "cq2".getBytes(), new ColumnVisibility("cv2"), "v2".getBytes());
+    m.put("cf3".getBytes(), "cq3".getBytes(), 3l, "v3".getBytes());
+    m.put("cf4".getBytes(), "cq4".getBytes(), new ColumnVisibility("cv4"), 4l, "v4".getBytes());
+    
+    m.putDelete("cf5".getBytes(), "cq5".getBytes());
+    m.putDelete("cf6".getBytes(), "cq6".getBytes(), new ColumnVisibility("cv6"));
+    m.putDelete("cf7".getBytes(), "cq7".getBytes(), 7l);
+    m.putDelete("cf8".getBytes(), "cq8".getBytes(), new ColumnVisibility("cv8"), 8l);
+    
+    assertEquals(8, m.size());
+    
+    List<ColumnUpdate> updates = m.getUpdates();
+    
+    assertEquals(8, m.size());
+    assertEquals(8, updates.size());
+    
+    assertEquals(updates.get(0), "cf1", "cq1", "", 0l, false, false, "v1");
+    assertEquals(updates.get(1), "cf2", "cq2", "cv2", 0l, false, false, "v2");
+    assertEquals(updates.get(2), "cf3", "cq3", "", 3l, true, false, "v3");
+    assertEquals(updates.get(3), "cf4", "cq4", "cv4", 4l, true, false, "v4");
+    
+    assertEquals(updates.get(4), "cf5", "cq5", "", 0l, false, true, "");
+    assertEquals(updates.get(5), "cf6", "cq6", "cv6", 0l, false, true, "");
+    assertEquals(updates.get(6), "cf7", "cq7", "", 7l, true, true, "");
+    assertEquals(updates.get(7), "cf8", "cq8", "cv8", 8l, true, true, "");
+  }
+
   /**
    * Test for regression on bug 3422. If a {@link Mutation} object is reused for multiple calls to readFields, the mutation would previously be "locked in" to
    * the first set of column updates (and value lengths). Hadoop input formats reuse objects when reading, so if Mutations are used with an input format (or as
@@ -402,15 +457,19 @@ public class MutationTest extends TestCa
     dos.close();
     long newSize = dos.size();
     assertTrue(newSize < oldSize);
-    System.out.println(String.format("%d %d %.2f%%", newSize - exampleLen, oldSize - exampleLen, (newSize-exampleLen) * 100. / (oldSize - exampleLen)));
+    assertEquals(10, newSize - exampleLen);
+    assertEquals(68, oldSize - exampleLen);
+    // I am converting to integer to avoid comparing floats which are inaccurate
+    assertEquals(14705, (int)(((newSize-exampleLen) * 100. / (oldSize - exampleLen)) * 1000));
+    StringBuilder sb = new StringBuilder();
     byte[] ba = bos.toByteArray();
     for (int i = 0; i < bos.size(); i += 4) {
       for (int j = i; j < bos.size() && j < i + 4; j++) {
-        System.out.append(String.format("%02x", ba[j]));
+        sb.append(String.format("%02x", ba[j]));
       }
-      System.out.append(" ");
+      sb.append(" ");
     }
-    System.out.println();
+    assertEquals("80322031 32333435 36373839 20313233 34353637 38392031 32333435 36373839 20313233 34353637 38392031 32333435 36373839 06000000 00000001 ", sb.toString());
     
   }
   

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java Mon Jan 14 22:03:24 2013
@@ -75,7 +75,7 @@ public class MultiLevelIndexTest extends
     FSDataInputStream in = new FSDataInputStream(bais);
     CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length, CachedConfiguration.getInstance());
     
-    Reader reader = new Reader(_cbr, RFile.RINDEX_VER_6);
+    Reader reader = new Reader(_cbr, RFile.RINDEX_VER_7);
     BlockRead rootIn = _cbr.getMetaBlock("root");
     reader.readFields(rootIn);
     rootIn.close();

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java Mon Jan 14 22:03:24 2013
@@ -16,6 +16,10 @@
  */
 package org.apache.accumulo.core.file.rfile;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
@@ -29,8 +33,6 @@ import java.util.Iterator;
 import java.util.Random;
 import java.util.Set;
 
-import junit.framework.TestCase;
-
 import org.apache.accumulo.core.data.ArrayByteSequence;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
@@ -53,8 +55,9 @@ import org.apache.hadoop.fs.Seekable;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
+import org.junit.Test;
 
-public class RFileTest extends TestCase {
+public class RFileTest {
   
   private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<ByteSequence>();
   
@@ -200,18 +203,19 @@ public class RFileTest extends TestCase 
     }
   }
   
-  private Key nk(String row, String cf, String cq, String cv, long ts) {
+  static Key nk(String row, String cf, String cq, String cv, long ts) {
     return new Key(row.getBytes(), cf.getBytes(), cq.getBytes(), cv.getBytes(), ts);
   }
   
-  private Value nv(String val) {
+  static Value nv(String val) {
     return new Value(val.getBytes());
   }
   
-  private String nf(String prefix, int i) {
+  static String nf(String prefix, int i) {
     return String.format(prefix + "%06d", i);
   }
   
+  @Test
   public void test1() throws IOException {
     
     // test an emprt file
@@ -230,6 +234,7 @@ public class RFileTest extends TestCase 
     trf.closeReader();
   }
   
+  @Test
   public void test2() throws IOException {
     
     // test an rfile with one entry
@@ -266,6 +271,7 @@ public class RFileTest extends TestCase 
     trf.closeReader();
   }
   
+  @Test
   public void test3() throws IOException {
     
     // test an rfile with multiple rows having multiple columns
@@ -423,6 +429,7 @@ public class RFileTest extends TestCase 
     assertFalse(evi.hasNext());
   }
   
+  @Test
   public void test4() throws IOException {
     TestRFile trf = new TestRFile();
     
@@ -465,6 +472,7 @@ public class RFileTest extends TestCase 
     }
   }
   
+  @Test
   public void test5() throws IOException {
     
     TestRFile trf = new TestRFile();
@@ -493,6 +501,7 @@ public class RFileTest extends TestCase 
     trf.closeReader();
   }
   
+  @Test
   public void test6() throws IOException {
     
     TestRFile trf = new TestRFile();
@@ -525,6 +534,7 @@ public class RFileTest extends TestCase 
     trf.closeReader();
   }
   
+  @Test
   public void test7() throws IOException {
     // these test excercise setting the end key of a range
     
@@ -576,6 +586,7 @@ public class RFileTest extends TestCase 
     trf.reader.close();
   }
   
+  @Test
   public void test8() throws IOException {
     TestRFile trf = new TestRFile();
     
@@ -692,6 +703,7 @@ public class RFileTest extends TestCase 
     return cfs;
   }
   
+  @Test
   public void test9() throws IOException {
     TestRFile trf = new TestRFile();
     
@@ -833,6 +845,7 @@ public class RFileTest extends TestCase 
     
   }
   
+  @Test
   public void test10() throws IOException {
     
     // test empty locality groups
@@ -961,6 +974,7 @@ public class RFileTest extends TestCase 
     trf.closeReader();
   }
   
+  @Test
   public void test11() throws IOException {
     // test locality groups with more than two entries
     
@@ -1065,6 +1079,7 @@ public class RFileTest extends TestCase 
     trf.closeReader();
   }
   
+  @Test
   public void test12() throws IOException {
     // test inserting column fams not in locality groups
     
@@ -1096,6 +1111,7 @@ public class RFileTest extends TestCase 
     
   }
   
+  @Test
   public void test13() throws IOException {
     // test inserting column fam in default loc group that was in
     // previous locality group
@@ -1137,6 +1153,7 @@ public class RFileTest extends TestCase 
     
   }
   
+  @Test
   public void test14() throws IOException {
     // test starting locality group after default locality group was started
     
@@ -1162,6 +1179,7 @@ public class RFileTest extends TestCase 
     trf.writer.close();
   }
   
+  @Test
   public void test16() throws IOException {
     TestRFile trf = new TestRFile();
     
@@ -1180,6 +1198,7 @@ public class RFileTest extends TestCase 
     trf.closeWriter();
   }
   
+  @Test
   public void test17() throws IOException {
     // add alot of the same keys to rfile that cover multiple blocks...
     // this should cause the keys in the index to be exactly the same...
@@ -1318,6 +1337,7 @@ public class RFileTest extends TestCase 
     assertEquals(nonExcluded, colFamsSeen);
   }
   
+  @Test
   public void test18() throws IOException {
     // test writing more column families to default LG than it will track
     
@@ -1369,6 +1389,7 @@ public class RFileTest extends TestCase 
     trf.closeReader();
   }
   
+  @Test
   public void test19() throws IOException {
     // test RFile metastore
     TestRFile trf = new TestRFile();
@@ -1421,9 +1442,16 @@ public class RFileTest extends TestCase 
     trf.closeReader();
   }
   
+  @Test(expected = NullPointerException.class)
+  public void testMissingUnreleasedVersions() throws Exception {
+    runVersionTest(5);
+  }
+  
+  @Test
   public void testOldVersions() throws Exception {
     runVersionTest(3);
     runVersionTest(4);
+    runVersionTest(6);
   }
   
   private void runVersionTest(int version) throws IOException {

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/file/rfile/RelativeKeyTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/file/rfile/RelativeKeyTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/file/rfile/RelativeKeyTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/file/rfile/RelativeKeyTest.java Mon Jan 14 22:03:24 2013
@@ -16,13 +16,29 @@
  */
 package org.apache.accumulo.core.file.rfile;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.assertEquals;
 
-/**
- * 
- */
-public class RelativeKeyTest extends TestCase {
-  public void test1() {
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.EOFException;
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.accumulo.core.data.ArrayByteSequence;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.rfile.RelativeKey.MByteSequence;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class RelativeKeyTest {
+  
+  @Test
+  public void testBasicRelativeKey() {
     assertEquals(1, RelativeKey.nextArraySize(0));
     assertEquals(1, RelativeKey.nextArraySize(1));
     assertEquals(2, RelativeKey.nextArraySize(2));
@@ -44,4 +60,203 @@ public class RelativeKeyTest extends Tes
     assertEquals(Integer.MAX_VALUE, RelativeKey.nextArraySize(Integer.MAX_VALUE));
   }
   
+  @Test
+  public void testCommonPrefix() {
+    // exact matches
+    ArrayByteSequence exact = new ArrayByteSequence("abc");
+    assertEquals(-1, RelativeKey.getCommonPrefix(exact, exact));
+    assertEquals(-1, commonPrefixHelper("", ""));
+    assertEquals(-1, commonPrefixHelper("a", "a"));
+    assertEquals(-1, commonPrefixHelper("aa", "aa"));
+    assertEquals(-1, commonPrefixHelper("aaa", "aaa"));
+    assertEquals(-1, commonPrefixHelper("abab", "abab"));
+    assertEquals(-1, commonPrefixHelper(new String("aaa"), new ArrayByteSequence("aaa").toString()));
+    assertEquals(-1, commonPrefixHelper("abababababab".substring(3, 6), "ccababababcc".substring(3, 6)));
+    
+    // no common prefix
+    assertEquals(0, commonPrefixHelper("", "a"));
+    assertEquals(0, commonPrefixHelper("a", ""));
+    assertEquals(0, commonPrefixHelper("a", "b"));
+    assertEquals(0, commonPrefixHelper("aaaa", "bbbb"));
+    
+    // some common prefix
+    assertEquals(1, commonPrefixHelper("a", "ab"));
+    assertEquals(1, commonPrefixHelper("ab", "ac"));
+    assertEquals(1, commonPrefixHelper("ab", "ac"));
+    assertEquals(2, commonPrefixHelper("aa", "aaaa"));
+    assertEquals(4, commonPrefixHelper("aaaaa", "aaaab"));
+  }
+  
+  private int commonPrefixHelper(String a, String b) {
+    return RelativeKey.getCommonPrefix(new ArrayByteSequence(a), new ArrayByteSequence(b));
+  }
+  
+  @Test
+  public void testReadWritePrefix() throws IOException {
+    Key prevKey = new Key("row1", "columnfamily1", "columnqualifier1", "columnvisibility1", 1000);
+    Key newKey = new Key("row2", "columnfamily2", "columnqualifier2", "columnvisibility2", 3000);
+    RelativeKey expected = new RelativeKey(prevKey, newKey);
+    
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    DataOutputStream out = new DataOutputStream(baos);
+    expected.write(out);
+    
+    RelativeKey actual = new RelativeKey();
+    actual.setPrevKey(prevKey);
+    actual.readFields(new DataInputStream(new ByteArrayInputStream(baos.toByteArray())));
+    
+    assertEquals(expected.getKey(), actual.getKey());
+  }
+  
+  private static ArrayList<Key> expectedKeys;
+  private static ArrayList<Value> expectedValues;
+  private static ArrayList<Integer> expectedPositions;
+  private static ByteArrayOutputStream baos;
+  
+  @BeforeClass
+  public static void initSource() throws IOException {
+    int initialListSize = 10000;
+    
+    baos = new ByteArrayOutputStream();
+    DataOutputStream out = new DataOutputStream(baos);
+    
+    expectedKeys = new ArrayList<Key>(initialListSize);
+    expectedValues = new ArrayList<Value>(initialListSize);
+    expectedPositions = new ArrayList<Integer>(initialListSize);
+    
+    Key prev = null;
+    int val = 0;
+    for (int row = 0; row < 4; row++) {
+      String rowS = RFileTest.nf("r_", row);
+      for (int cf = 0; cf < 4; cf++) {
+        String cfS = RFileTest.nf("cf_", cf);
+        for (int cq = 0; cq < 4; cq++) {
+          String cqS = RFileTest.nf("cq_", cq);
+          for (int cv = 'A'; cv < 'A' + 4; cv++) {
+            String cvS = "" + (char) cv;
+            for (int ts = 4; ts > 0; ts--) {
+              Key k = RFileTest.nk(rowS, cfS, cqS, cvS, ts);
+              k.setDeleted(true);
+              Value v = RFileTest.nv("" + val);
+              expectedPositions.add(out.size());
+              new RelativeKey(prev, k).write(out);
+              prev = k;
+              v.write(out);
+              expectedKeys.add(k);
+              expectedValues.add(v);
+              
+              k = RFileTest.nk(rowS, cfS, cqS, cvS, ts);
+              v = RFileTest.nv("" + val);
+              expectedPositions.add(out.size());
+              new RelativeKey(prev, k).write(out);
+              prev = k;
+              v.write(out);
+              expectedKeys.add(k);
+              expectedValues.add(v);
+              
+              val++;
+            }
+          }
+        }
+      }
+    }
+  }
+  
+  private DataInputStream in;
+  
+  @Before
+  public void setupDataInputStream() {
+    in = new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
+    in.mark(0);
+  }
+  
+  @Test
+  public void testSeekBeforeEverything() throws IOException {
+    Key seekKey = new Key();
+    Key prevKey = new Key();
+    Key currKey = null;
+    MByteSequence value = new MByteSequence(new byte[64], 0, 0);
+    
+    RelativeKey.SkippR skippr = RelativeKey.fastSkip(in, seekKey, value, prevKey, currKey);
+    assertEquals(1, skippr.skipped);
+    assertEquals(new Key(), skippr.prevKey);
+    assertEquals(expectedKeys.get(0), skippr.rk.getKey());
+    assertEquals(expectedValues.get(0).toString(), value.toString());
+    
+    // ensure we can advance after fastskip
+    skippr.rk.readFields(in);
+    assertEquals(expectedKeys.get(1), skippr.rk.getKey());
+    
+    in.reset();
+    
+    seekKey = new Key("a", "b", "c", "d", 1);
+    seekKey.setDeleted(true);
+    skippr = RelativeKey.fastSkip(in, seekKey, value, prevKey, currKey);
+    assertEquals(1, skippr.skipped);
+    assertEquals(new Key(), skippr.prevKey);
+    assertEquals(expectedKeys.get(0), skippr.rk.getKey());
+    assertEquals(expectedValues.get(0).toString(), value.toString());
+    
+    skippr.rk.readFields(in);
+    assertEquals(expectedKeys.get(1), skippr.rk.getKey());
+  }
+  
+  @Test(expected = EOFException.class)
+  public void testSeekAfterEverything() throws IOException {
+    Key seekKey = new Key("s", "t", "u", "v", 1);
+    Key prevKey = new Key();
+    Key currKey = null;
+    MByteSequence value = new MByteSequence(new byte[64], 0, 0);
+    
+    RelativeKey.fastSkip(in, seekKey, value, prevKey, currKey);
+  }
+  
+  @Test
+  public void testSeekMiddle() throws IOException {
+    int seekIndex = expectedKeys.size() / 2;
+    Key seekKey = expectedKeys.get(seekIndex);
+    Key prevKey = new Key();
+    Key currKey = null;
+    MByteSequence value = new MByteSequence(new byte[64], 0, 0);
+    
+    RelativeKey.SkippR skippr = RelativeKey.fastSkip(in, seekKey, value, prevKey, currKey);
+    
+    assertEquals(seekIndex + 1, skippr.skipped);
+    assertEquals(expectedKeys.get(seekIndex - 1), skippr.prevKey);
+    assertEquals(expectedKeys.get(seekIndex), skippr.rk.getKey());
+    assertEquals(expectedValues.get(seekIndex).toString(), value.toString());
+    
+    skippr.rk.readFields(in);
+    assertEquals(expectedValues.get(seekIndex + 1).toString(), value.toString());
+    
+    // try fast skipping to a key that does not exist
+    in.reset();
+    Key fKey = expectedKeys.get(seekIndex).followingKey(PartialKey.ROW_COLFAM_COLQUAL);
+    int i;
+    for (i = seekIndex; expectedKeys.get(i).compareTo(fKey) < 0; i++) {}
+    
+    skippr = RelativeKey.fastSkip(in, expectedKeys.get(i), value, prevKey, currKey);
+    assertEquals(i + 1, skippr.skipped);
+    assertEquals(expectedKeys.get(i - 1), skippr.prevKey);
+    assertEquals(expectedKeys.get(i), skippr.rk.getKey());
+    assertEquals(expectedValues.get(i).toString(), value.toString());
+    
+    // try fast skipping to our current location
+    skippr = RelativeKey.fastSkip(in, expectedKeys.get(i), value, expectedKeys.get(i - 1), expectedKeys.get(i));
+    assertEquals(0, skippr.skipped);
+    assertEquals(expectedKeys.get(i - 1), skippr.prevKey);
+    assertEquals(expectedKeys.get(i), skippr.rk.getKey());
+    assertEquals(expectedValues.get(i).toString(), value.toString());
+    
+    // try fast skipping 1 column family ahead from our current location, testing fastskip from middle of block as opposed to stating at beginning of block
+    fKey = expectedKeys.get(i).followingKey(PartialKey.ROW_COLFAM);
+    int j;
+    for (j = i; expectedKeys.get(j).compareTo(fKey) < 0; j++) {}
+    skippr = RelativeKey.fastSkip(in, fKey, value, expectedKeys.get(i - 1), expectedKeys.get(i));
+    assertEquals(j - i, skippr.skipped);
+    assertEquals(expectedKeys.get(j - 1), skippr.prevKey);
+    assertEquals(expectedKeys.get(j), skippr.rk.getKey());
+    assertEquals(expectedValues.get(j).toString(), value.toString());
+    
+  }
 }

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/CombinerTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/CombinerTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/CombinerTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/CombinerTest.java Mon Jan 14 22:03:24 2013
@@ -569,6 +569,42 @@ public class CombinerTest {
   }
   
   @Test
+  public void sumAllColumns() throws IOException {
+    TreeMap<Key,Value> tm = new TreeMap<Key,Value>();
+    tm.put(new Key("r", "count", "a", 1), new Value("1".getBytes()));
+    tm.put(new Key("r", "count", "a", 2), new Value("1".getBytes()));
+    tm.put(new Key("r", "count", "b", 3), new Value("1".getBytes()));
+    tm.put(new Key("r", "count", "b", 4), new Value("1".getBytes()));
+    tm.put(new Key("r", "count", "b", 5), new Value("1".getBytes()));
+    tm.put(new Key("r", "count", "c", 6), new Value("1".getBytes()));
+    SortedMapIterator smi = new SortedMapIterator(tm);
+    Combiner iter = new SummingCombiner();
+    IteratorSetting s = new IteratorSetting(10, "s", SummingCombiner.class);
+    SummingCombiner.setColumns(s, Collections.singletonList(new IteratorSetting.Column("count")));
+    SummingCombiner.setEncodingType(s, LongCombiner.StringEncoder.class);
+    iter.init(smi, s.getOptions(), new DefaultIteratorEnvironment());
+    Combiner iter2 = new SummingCombiner();
+    IteratorSetting s2 = new IteratorSetting(10, "s2", SummingCombiner.class);
+    SummingCombiner.setColumns(s2, Collections.singletonList(new IteratorSetting.Column("count","a")));
+    SummingCombiner.setEncodingType(s2, LongCombiner.StringEncoder.class);
+    iter2.init(iter, s.getOptions(), new DefaultIteratorEnvironment());
+    iter2.seek(new Range(), EMPTY_COL_FAMS, false);
+    
+    assertTrue(iter2.hasTop());
+    assertEquals("2", iter2.getTopValue().toString());
+    iter2.next();
+    assertTrue(iter2.hasTop());
+    assertEquals("3", iter2.getTopValue().toString());
+    iter2.next();
+    assertTrue(iter2.hasTop());
+    assertEquals("1", iter2.getTopValue().toString());
+    iter2.next();
+    assertFalse(iter2.hasTop());
+  }
+  
+
+  
+  @Test
   public void maxMinTest() throws IOException {
     Encoder<Long> encoder = LongCombiner.VAR_LEN_ENCODER;
     

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/FilterTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/FilterTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/FilterTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/FilterTest.java Mon Jan 14 22:03:24 2013
@@ -17,6 +17,7 @@
 package org.apache.accumulo.core.iterators.user;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -197,6 +198,8 @@ public class FilterTest {
     AgeOffFilter.setTTL(is, 101l);
     AgeOffFilter.setCurrentTime(is, 1001l);
     AgeOffFilter.setNegate(is, true);
+    assertTrue(((AgeOffFilter) a).validateOptions(is.getOptions()));
+    assertFalse(((AgeOffFilter) a).validateOptions(EMPTY_OPTS));
     a.init(new SortedMapIterator(tm), is.getOptions(), null);
     a = a.deepCopy(null);
     SortedKeyValueIterator<Key,Value> copy = a.deepCopy(null);
@@ -223,6 +226,7 @@ public class FilterTest {
     assertTrue(tm.size() == 1000);
     
     ColumnAgeOffFilter a = new ColumnAgeOffFilter();
+    assertTrue(a.validateOptions(is.getOptions()));
     a.init(new SortedMapIterator(tm), is.getOptions(), new DefaultIteratorEnvironment());
     a.overrideCurrentTime(ts);
     a.seek(new Range(), EMPTY_COL_FAMS, false);
@@ -429,6 +433,7 @@ public class FilterTest {
     assertEquals(size(a), 89);
     
     TimestampFilter.setStart(is, "19990101000011GMT", false);
+    assertTrue(a.validateOptions(is.getOptions()));
     a.init(new SortedMapIterator(tm), is.getOptions(), null);
     a.seek(new Range(), EMPTY_COL_FAMS, false);
     assertEquals(size(a), 88);
@@ -440,9 +445,29 @@ public class FilterTest {
     assertEquals(size(a), 32);
     
     TimestampFilter.setEnd(is, "19990101000031GMT", false);
+    assertTrue(a.validateOptions(is.getOptions()));
     a.init(new SortedMapIterator(tm), is.getOptions(), null);
     a.seek(new Range(), EMPTY_COL_FAMS, false);
     assertEquals(size(a), 31);
+    
+    TimestampFilter.setEnd(is, 253402300800001l, true);
+    a.init(new SortedMapIterator(tm), is.getOptions(), null);
+    
+    is.clearOptions();
+    is.addOption(TimestampFilter.START, "19990101000011GMT");
+    assertTrue(a.validateOptions(is.getOptions()));
+    a.init(new SortedMapIterator(tm), is.getOptions(), null);
+    a.seek(new Range(), EMPTY_COL_FAMS, false);
+    assertEquals(size(a), 89);
+    
+    is.clearOptions();
+    is.addOption(TimestampFilter.END, "19990101000031GMT");
+    assertTrue(a.validateOptions(is.getOptions()));
+    a.init(new SortedMapIterator(tm), is.getOptions(), null);
+    a.seek(new Range(), EMPTY_COL_FAMS, false);
+    assertEquals(size(a), 32);
+    
+    assertFalse(a.validateOptions(EMPTY_OPTS));
   }
   
   @Test

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/IntersectingIteratorTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/IntersectingIteratorTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/IntersectingIteratorTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/IntersectingIteratorTest.java Mon Jan 14 22:03:24 2013
@@ -26,7 +26,6 @@ import java.util.Map.Entry;
 import java.util.Random;
 import java.util.TreeMap;
 
-import junit.framework.Assert;
 import junit.framework.TestCase;
 
 import org.apache.accumulo.core.Constants;
@@ -288,10 +287,10 @@ public class IntersectingIteratorTest ex
     bs.addScanIterator(ii);
     bs.setRanges(Collections.singleton(new Range()));
     Iterator<Entry<Key,Value>> iterator = bs.iterator();
-    Assert.assertTrue(iterator.hasNext());
+    assertTrue(iterator.hasNext());
     Entry<Key,Value> next = iterator.next();
     Key key = next.getKey();
-    Assert.assertEquals(key.getColumnQualifier(), new Text("5000000000000000"));
-    Assert.assertFalse(iterator.hasNext());
+    assertEquals(key.getColumnQualifier(), new Text("5000000000000000"));
+    assertFalse(iterator.hasNext());
   }
 }

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/RegExFilterTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/RegExFilterTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/RegExFilterTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/iterators/user/RegExFilterTest.java Mon Jan 14 22:03:24 2013
@@ -59,7 +59,7 @@ public class RegExFilterTest extends Tes
     IteratorSetting is = new IteratorSetting(1, RegExFilter.class);
     RegExFilter.setRegexs(is, ".*2", null, null, null, false);
     
-    rei.validateOptions(is.getOptions());
+    assertTrue(rei.validateOptions(is.getOptions()));
     rei.init(new SortedMapIterator(tm), is.getOptions(), new DefaultIteratorEnvironment());
     rei.seek(new Range(), EMPTY_COL_FAMS, false);
     
@@ -87,7 +87,7 @@ public class RegExFilterTest extends Tes
     is.clearOptions();
     
     RegExFilter.setRegexs(is, null, "ya.*", null, null, false);
-    rei.validateOptions(is.getOptions());
+    assertTrue(rei.validateOptions(is.getOptions()));
     rei.init(new SortedMapIterator(tm), is.getOptions(), new DefaultIteratorEnvironment());
     rei.seek(new Range(), EMPTY_COL_FAMS, false);
     
@@ -100,7 +100,7 @@ public class RegExFilterTest extends Tes
     is.clearOptions();
     
     RegExFilter.setRegexs(is, null, null, ".*01", null, false);
-    rei.validateOptions(is.getOptions());
+    assertTrue(rei.validateOptions(is.getOptions()));
     rei.init(new SortedMapIterator(tm), is.getOptions(), new DefaultIteratorEnvironment());
     rei.seek(new Range(), EMPTY_COL_FAMS, false);
     
@@ -113,7 +113,7 @@ public class RegExFilterTest extends Tes
     is.clearOptions();
     
     RegExFilter.setRegexs(is, null, null, null, ".*at", false);
-    rei.validateOptions(is.getOptions());
+    assertTrue(rei.validateOptions(is.getOptions()));
     rei.init(new SortedMapIterator(tm), is.getOptions(), new DefaultIteratorEnvironment());
     rei.seek(new Range(), EMPTY_COL_FAMS, false);
     

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java Mon Jan 14 22:03:24 2013
@@ -82,6 +82,11 @@ public class ColumnVisibilityTest {
   @Test
   public void testNormalization() {
     normalized("a", "a", "(a)", "a", "b|a", "a|b", "(b)|a", "a|b", "(b|(a|c))&x", "x&(a|b|c)", "(((a)))", "a");
+    final String normForm = "a&b&c";
+    normalized("b&c&a", normForm, "c&b&a", normForm, "a&(b&c)", normForm, "(a&c)&b", normForm);
+
+    // this an expression that's basically `expr | expr`
+    normalized("(d&c&b&a)|(b&c&a&d)", "a&b&c&d");
   }
   
   @Test

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/util/LocalityGroupUtilTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/util/LocalityGroupUtilTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/util/LocalityGroupUtilTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/util/LocalityGroupUtilTest.java Mon Jan 14 22:03:24 2013
@@ -20,7 +20,7 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
-import junit.framework.Assert;
+import static org.junit.Assert.*;
 
 import org.apache.accumulo.core.conf.ConfigurationCopy;
 import org.apache.accumulo.core.data.ArrayByteSequence;
@@ -39,18 +39,18 @@ public class LocalityGroupUtilTest {
     conf.set("table.groups.enabled", "lg1");
     try {
       Map<String,Set<ByteSequence>> groups = LocalityGroupUtil.getLocalityGroups(conf);
-      Assert.assertEquals(1, groups.size());
-      Assert.assertNotNull(groups.get("lg1"));
-      Assert.assertEquals(2, groups.get("lg1").size());
-      Assert.assertTrue(groups.get("lg1").contains(new ArrayByteSequence("cf1")));
+      assertEquals(1, groups.size());
+      assertNotNull(groups.get("lg1"));
+      assertEquals(2, groups.get("lg1").size());
+      assertTrue(groups.get("lg1").contains(new ArrayByteSequence("cf1")));
     } catch (LocalityGroupConfigurationError err) {
-      Assert.fail();
+      fail();
     }
     try {
       conf.set("table.group.lg2", "cf1");
       conf.set("table.groups.enabled", "lg1,lg2");
       LocalityGroupUtil.getLocalityGroups(conf);
-      Assert.fail();
+      fail();
     } catch (LocalityGroupConfigurationError err) {}
   }
   
@@ -71,8 +71,8 @@ public class LocalityGroupUtilTest {
     
     ByteSequence bs2 = LocalityGroupUtil.decodeColumnFamily(ecf);
     
-    Assert.assertEquals(bs1, bs2);
-    Assert.assertEquals(ecf, LocalityGroupUtil.encodeColumnFamily(bs2));
+    assertEquals(bs1, bs2);
+    assertEquals(ecf, LocalityGroupUtil.encodeColumnFamily(bs2));
     
     // test encoding multiple column fams containing binary data
     HashSet<Text> in = new HashSet<Text>();
@@ -83,7 +83,7 @@ public class LocalityGroupUtilTest {
     in2.add(new ArrayByteSequence(test2));
     Set<ByteSequence> out = LocalityGroupUtil.decodeColumnFamilies(LocalityGroupUtil.encodeColumnFamilies(in));
     
-    Assert.assertEquals(in2, out);
+    assertEquals(in2, out);
   }
   
 }

Modified: accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/util/shell/command/FormatterCommandTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/util/shell/command/FormatterCommandTest.java?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/util/shell/command/FormatterCommandTest.java (original)
+++ accumulo/branches/ACCUMULO-259/core/src/test/java/org/apache/accumulo/core/util/shell/command/FormatterCommandTest.java Mon Jan 14 22:03:24 2013
@@ -23,7 +23,7 @@ import java.io.Writer;
 import java.util.Iterator;
 import java.util.Map.Entry;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -50,7 +50,7 @@ public class FormatterCommandTest {
     // Keep the Shell AUDIT log off the test output
     Logger.getLogger(Shell.class).setLevel(Level.WARN);
     
-    final String[] args = new String[] {"-fake", "-u", "root", "-p", "passwd"};
+    final String[] args = new String[] {"--fake", "-u", "root", "-p", ""};
    
     final String[] commands = createCommands();
     

Modified: accumulo/branches/ACCUMULO-259/docs/examples/README.batch
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/docs/examples/README.batch?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/docs/examples/README.batch (original)
+++ accumulo/branches/ACCUMULO-259/docs/examples/README.batch Mon Jan 14 22:03:24 2013
@@ -40,8 +40,8 @@ Before you run this, you must ensure tha
 You must also create the table, batchtest1, ahead of time. (In the shell, use "createtable batchtest1")
 
     $ ./bin/accumulo shell -u username -e "createtable batchtest1"
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.SequentialBatchWriter instance zookeepers username password batchtest1 0 10000 50 20000000 500 20 exampleVis
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner instance zookeepers username password batchtest1 100 0 10000 50 20 exampleVis
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.SequentialBatchWriter -i instance -z zookeepers -u username -p password -t batchtest1 --start 0 --num 10000 --size 50 --batchMemory 20M --batchLatency 500 --batchThreads 20 --vis exampleVis
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner -i instance -z zookeepers -u username -p password -t batchtest1 --num 100 --min 0 --max 10000 --size 50 --scanThreads 20 --vis exampleVis
     07 11:33:11,103 [client.CountingVerifyingReceiver] INFO : Generating 100 random queries...
     07 11:33:11,112 [client.CountingVerifyingReceiver] INFO : finished
     07 11:33:11,260 [client.CountingVerifyingReceiver] INFO : 694.44 lookups/sec   0.14 secs

Modified: accumulo/branches/ACCUMULO-259/docs/examples/README.bloom
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/docs/examples/README.bloom?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/docs/examples/README.bloom (original)
+++ accumulo/branches/ACCUMULO-259/docs/examples/README.bloom Mon Jan 14 22:03:24 2013
@@ -39,7 +39,7 @@ Below 1 million random values are insert
 generated rows range between 0 and 1 billion.  The random number generator is
 initialized with the seed 7.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter -s 7 instance zookeepers username password bloom_test 1000000 0 1000000000 50 2000000 60000 3 exampleVis
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test --num 1000000 -min 0 -max 1000000000 -valueSize 50 -batchMemory 2M -batchLatency 60s -batchThreads 3 --vis exampleVis
 
 Below the table is flushed:
 
@@ -50,7 +50,7 @@ After the flush completes, 500 random qu
 same seed is used to generate the queries, therefore everything is found in the
 table.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner -s 7 instance zookeepers username password bloom_test 500 0 1000000000 50 20 exampleVis
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test --num 500 --min 0 --max 1000000000 --size 50 -batchThreads 20 --vis exampleVis
     Generating 500 random queries...finished
     96.19 lookups/sec   5.20 secs
     num results : 500
@@ -62,7 +62,7 @@ Below another 500 queries are performed,
 in nothing being found.  In this case the lookups are much faster because of
 the bloom filters.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner -s 8 instance zookeepers username password bloom_test 500 0 1000000000 50 20 exampleVis
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 8 -i instance -z zookeepers -u username -p password -t bloom_test --num 500 --min 0 --max 1000000000 --size 50 -batchThreads 20 -auths exampleVis
     Generating 500 random queries...finished
     2212.39 lookups/sec   0.23 secs
     num results : 0
@@ -113,11 +113,12 @@ The commands for creating the first tabl
     username@instance bloom_test1> config -t bloom_test1 -s table.compaction.major.ratio=7
     username@instance bloom_test1> exit
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter -s 7 instance zookeepers username password bloom_test1 1000000 0 1000000000 50 2000000 60000 3 exampleVis
+    $ ARGS="-i instance -z zookeepers -u username -p password -t bloom_test1 --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --auths exampleVis"
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 $ARGS
     $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter -s 8 instance zookeepers username password bloom_test1 1000000 0 1000000000 50 2000000 60000 3 exampleVis
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 8 $ARGS
     $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter -s 9 instance zookeepers username password bloom_test1 1000000 0 1000000000 50 2000000 60000 3 exampleVis
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 9 $ARGS
     $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
 
 The commands for creating the second table with bloom filers are below.
@@ -136,18 +137,19 @@ The commands for creating the second tab
     username@instance bloom_test2> config -t bloom_test2 -s table.bloom.enabled=true
     username@instance bloom_test2> exit
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter -s 7 instance zookeepers username password bloom_test2 1000000 0 1000000000 50 2000000 60000 3 exampleVis
+    $ ARGS="-i instance -z zookeepers -u username -p password -t bloom_test2 --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --auths exampleVis"
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 $ARGS
     $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter -s 8 instance zookeepers username password bloom_test2 1000000 0 1000000000 50 2000000 60000 3 exampleVis
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 8 $ARGS
     $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter -s 9 instance zookeepers username password bloom_test2 1000000 0 1000000000 50 2000000 60000 3 exampleVis
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 9 $ARGS
     $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
 
 Below 500 lookups are done against the table without bloom filters using random
 NG seed 7.  Even though only one map file will likely contain entries for this
 seed, all map files will be interrogated.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner -s 7 instance zookeepers username password bloom_test1 500 0 1000000000 50 20 exampleVis
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test1 --num 500 --min 0 --max 1000000000 --size 50 --scanThreads 20 --auths exampleVis
     Generating 500 random queries...finished
     35.09 lookups/sec  14.25 secs
     num results : 500
@@ -159,7 +161,7 @@ Below the same lookups are done against 
 lookups were 2.86 times faster because only one map file was used, even though three
 map files existed.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner -s 7 instance zookeepers username password bloom_test2 500 0 1000000000 50 20 exampleVis
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test2 --num 500 --min 0 --max 1000000000 --size 50 -scanThreads 20 --auths exampleVis
     Generating 500 random queries...finished
     99.03 lookups/sec   5.05 secs
     num results : 500

Modified: accumulo/branches/ACCUMULO-259/docs/examples/README.bulkIngest
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/docs/examples/README.bulkIngest?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/docs/examples/README.bulkIngest (original)
+++ accumulo/branches/ACCUMULO-259/docs/examples/README.bulkIngest Mon Jan 14 22:03:24 2013
@@ -25,10 +25,10 @@ accumulo.  Then we verify the 1000 rows 
 first two arguments to all of the commands except for GenerateTestData are the
 accumulo instance name, and a comma-separated list of zookeepers.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.mapreduce.bulk.SetupTable instance zookeepers username password test_bulk row_00000333 row_00000666
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.mapreduce.bulk.GenerateTestData 0 1000 bulk/test_1.txt
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.mapreduce.bulk.SetupTable -i instance -z zookeepers -u username -p password -t test_bulk row_00000333 row_00000666
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.mapreduce.bulk.GenerateTestData --start-row 0 --count 1000 --output bulk/test_1.txt
     
-    $ ./bin/tool.sh lib/examples-simple-*[^c].jar org.apache.accumulo.examples.simple.mapreduce.bulk.BulkIngestExample instance zookeepers username password test_bulk bulk tmp/bulkWork
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.mapreduce.bulk.VerifyIngest instance zookeepers username password test_bulk 0 1000
+    $ ./bin/tool.sh lib/examples-simple-*[^cs].jar org.apache.accumulo.examples.simple.mapreduce.bulk.BulkIngestExample -i instance -z zookeepers -u username -p password -t test_bulk bulk tmp/bulkWork
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.mapreduce.bulk.VerifyIngest -i instance -z zookeepers -u username -p password -t test_bulk --start-row 0 --count 1000
 
 For a high level discussion of bulk ingest, see the docs dir.

Modified: accumulo/branches/ACCUMULO-259/docs/examples/README.dirlist
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/docs/examples/README.dirlist?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/docs/examples/README.dirlist (original)
+++ accumulo/branches/ACCUMULO-259/docs/examples/README.dirlist Mon Jan 14 22:03:24 2013
@@ -31,7 +31,7 @@ This example shows how to use Accumulo t
  
 To begin, ingest some data with Ingest.java.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.Ingest instance zookeepers username password dirTable indexTable dataTable exampleVis 100000 /local/username/workspace
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.Ingest -i instance -z zookeepers -u username -p password --vis exampleVis --chunkSize 100000 /local/username/workspace
 
 This may take some time if there are large files in the /local/username/workspace directory.  If you use 0 instead of 100000 on the command line, the ingest will run much faster, but it will not put any file data into Accumulo (the dataTable will be empty).
 Note that running this example will create tables dirTable, indexTable, and dataTable in Accumulo that you should delete when you have completed the example.
@@ -43,20 +43,20 @@ To browse the data ingested, use Viewer.
 
 then run the Viewer:
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.Viewer instance zookeepers username password dirTable dataTable exampleVis /local/username/workspace
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.Viewer -i instance -z zookeepers -u username -p password -t dirTable --dataTable dataTable --auths exampleVis --path /local/username/workspace
 
 To list the contents of specific directories, use QueryUtil.java.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil instance zookeepers username password dirTable exampleVis /local/username
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil instance zookeepers username password dirTable exampleVis /local/username/workspace
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis --path /local/username
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis --path /local/username/workspace
 
 To perform searches on file or directory names, also use QueryUtil.java.  Search terms must contain no more than one wild card and cannot contain "/".
 *Note* these queries run on the _indexTable_ table instead of the dirTable table.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil instance zookeepers username password indexTable exampleVis filename -search
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil instance zookeepers username password indexTable exampleVis 'filename*' -search
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil instance zookeepers username password indexTable exampleVis '*jar' -search
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil instance zookeepers username password indexTable exampleVis filename*jar -search
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path filename --search
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path 'filename*' --search
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path '*jar' --search
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis iipath 'filename*jar' --search
 
 To count the number of direct children (directories and files) and descendants (children and children's descendants, directories and files), run the FileCount over the dirTable table.
 The results are written back to the same table.  FileCount reads from and writes to Accumulo.  This requires scan authorizations for the read and a visibility for the data written.

Modified: accumulo/branches/ACCUMULO-259/docs/examples/README.filedata
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/docs/examples/README.filedata?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/docs/examples/README.filedata (original)
+++ accumulo/branches/ACCUMULO-259/docs/examples/README.filedata Mon Jan 14 22:03:24 2013
@@ -32,7 +32,7 @@ This example is coupled with the dirlist
 
 If you haven't already run the README.dirlist example, ingest a file with FileDataIngest.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.filedata.FileDataIngest instance zookeepers username password dataTable exampleVis 1000 $ACCUMULO_HOME/README
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.filedata.FileDataIngest -i instance -z zookeepers -u username -p password -t dataTable --auths exampleVis --chunk 1000 $ACCUMULO_HOME/README
 
 Open the accumulo shell and look at the data.  The row is the MD5 hash of the file, which you can verify by running a command such as 'md5sum' on the file.
 
@@ -40,7 +40,7 @@ Open the accumulo shell and look at the 
 
 Run the CharacterHistogram MapReduce to add some information about the file.
 
-    $ bin/tool.sh lib/examples-simple*[^c].jar org.apache.accumulo.examples.simple.filedata.CharacterHistogram instance zookeepers username password dataTable exampleVis exampleVis
+    $ bin/tool.sh lib/examples-simple*[^cs].jar org.apache.accumulo.examples.simple.filedata.CharacterHistogram -i instance -z zookeepers -u username -p password -t dataTable --auths exampleVis --vis exampleVis
 
 Scan again to see the histogram stored in the 'info' column family.
 

Modified: accumulo/branches/ACCUMULO-259/docs/examples/README.helloworld
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/docs/examples/README.helloworld?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/docs/examples/README.helloworld (original)
+++ accumulo/branches/ACCUMULO-259/docs/examples/README.helloworld Mon Jan 14 22:03:24 2013
@@ -32,11 +32,11 @@ Create a table called 'hellotable':
 
 Launch a Java program that inserts data with a BatchWriter:
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.InsertWithBatchWriter instance zookeepers username password hellotable 
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.InsertWithBatchWriter -i instance -z zookeepers -u username -p password -t hellotable 
 
 Alternatively, the same data can be inserted using MapReduce writers:
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.InsertWithOutputFormat instance zookeepers username password hellotable 
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.InsertWithOutputFormat -i instance -z zookeepers -u username -p password -t hellotable 
 
 On the accumulo status page at the URL below (where 'master' is replaced with the name or IP of your accumulo master), you should see 50K entries
 	
@@ -49,4 +49,4 @@ To view the entries, use the shell to sc
 
 You can also use a Java class to scan the table:
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.ReadData instance zookeepers username password hellotable row_0 row_1001
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.ReadData -i instance -z zookeepers -u username -p password -t hellotable --startKey row_0 --endKey row_1001

Modified: accumulo/branches/ACCUMULO-259/docs/examples/README.isolation
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/docs/examples/README.isolation?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/docs/examples/README.isolation (original)
+++ accumulo/branches/ACCUMULO-259/docs/examples/README.isolation Mon Jan 14 22:03:24 2013
@@ -31,7 +31,7 @@ reading the row at the same time a mutat
 Below, Interference Test is run without isolation enabled for 5000 iterations
 and it reports problems.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.isolation.InterferenceTest instance zookeepers username password isotest 5000 false
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.isolation.InterferenceTest -i instance -z zookeepers -u username -p password -t isotest --iterations 5000
     ERROR Columns in row 053 had multiple values [53, 4553]
     ERROR Columns in row 061 had multiple values [561, 61]
     ERROR Columns in row 070 had multiple values [570, 1070]
@@ -44,7 +44,7 @@ and it reports problems.
 Below, Interference Test is run with isolation enabled for 5000 iterations and
 it reports no problems.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.isolation.InterferenceTest instance zookeepers username password isotest 5000 true
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.isolation.InterferenceTest -i instance -z zookeepers -u username -p password -t isotest --iterations 5000 --isolated
     finished
 
 

Modified: accumulo/branches/ACCUMULO-259/docs/examples/README.mapred
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/docs/examples/README.mapred?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/docs/examples/README.mapred (original)
+++ accumulo/branches/ACCUMULO-259/docs/examples/README.mapred Mon Jan 14 22:03:24 2013
@@ -50,7 +50,7 @@ for the column family count.
 
 After creating the table, run the word count map reduce job.
 
-    $ bin/tool.sh lib/examples-simple*[^c].jar org.apache.accumulo.examples.simple.mapreduce.WordCount instance zookeepers /user/username/wc wordCount -u username -p password
+    $ bin/tool.sh lib/examples-simple*[^cs].jar org.apache.accumulo.examples.simple.mapreduce.WordCount -i instance -z zookeepers  --input /user/username/wc wordCount -u username -p password
     
     11/02/07 18:20:11 INFO input.FileInputFormat: Total input paths to process : 1
     11/02/07 18:20:12 INFO mapred.JobClient: Running job: job_201102071740_0003

Modified: accumulo/branches/ACCUMULO-259/docs/examples/README.maxmutation
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/docs/examples/README.maxmutation?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/docs/examples/README.maxmutation (original)
+++ accumulo/branches/ACCUMULO-259/docs/examples/README.maxmutation Mon Jan 14 22:03:24 2013
@@ -19,9 +19,9 @@ Notice:    Licensed to the Apache Softwa
 This an example of how to limit the size of mutations that will be accepted into
 a table.  Under the default configuration, accumulo does not provide a limitation
 on the size of mutations that can be ingested.  Poorly behaved writers might
-inadvertently create mutations so large, that they cause the tablet servers
-or logger processes to run out of memory.  A simple contraint can be added to
-a table to reject very large mutations.
+inadvertently create mutations so large, that they cause the tablet servers to 
+run out of memory.  A simple contraint can be added to a table to reject very 
+large mutations.
 
     $ ./bin/accumulo shell -u username -p password
     
@@ -42,6 +42,6 @@ Now the table will reject any mutation t
 working memory of the tablet server.  The following command attempts to ingest 
 a single row with 10000 columns, which exceeds the memory limit:
 
-    $ ./bin/accumulo org.apache.accumulo.server.test.TestIngest 1 0 10000
+    $ ./bin/accumulo org.apache.accumulo.server.test.TestIngest -i instance -z zookeepers -u username -p password --rows 1 --cols 10000 
 ERROR : Constraint violates : ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.MaxMutationSize, violationCode:0, violationDescription:mutation exceeded maximum size of 188160, numberOfViolatingMutations:1)
 

Modified: accumulo/branches/ACCUMULO-259/docs/examples/README.shard
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/docs/examples/README.shard?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/docs/examples/README.shard (original)
+++ accumulo/branches/ACCUMULO-259/docs/examples/README.shard Mon Jan 14 22:03:24 2013
@@ -32,12 +32,12 @@ To run these example programs, create tw
 After creating the tables, index some files.  The following command indexes all of the java files in the Accumulo source code.
 
     $ cd /local/username/workspace/accumulo/
-    $ find src -name "*.java" | xargs ./bin/accumulo org.apache.accumulo.examples.simple.shard.Index instance zookeepers shard username password 30
+    $ find core/src server/src -name "*.java" | xargs ./bin/accumulo org.apache.accumulo.examples.simple.shard.Index -i instance -z zookeepers -t shard -u username -p password --partitions 30
 
 The following command queries the index to find all files containing 'foo' and 'bar'.
 
     $ cd $ACCUMULO_HOME
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query instance zookeepers shard username password foo bar
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query -i instance -z zookeepers -t shard -u username -p password foo bar
     /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/ColumnVisibilityTest.java
     /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/client/mock/MockConnectorTest.java
     /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/VisibilityEvaluatorTest.java
@@ -52,11 +52,12 @@ The following command queries the index 
 
 In order to run ContinuousQuery, we need to run Reverse.java to populate doc2term.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Reverse instance zookeepers shard doc2term username password
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Reverse -i instance -z zookeepers --shardTable shard --doc2Term doc2term -u username -p password
 
-Below ContinuousQuery is run using 5 terms.  So it selects 5 random terms from each document, then it continually randomly selects one set of 5 terms and queries.  It prints the number of matching documents and the time in seconds.
+Below ContinuousQuery is run using 5 terms.  So it selects 5 random terms from each document, then it continually 
+randomly selects one set of 5 terms and queries.  It prints the number of matching documents and the time in seconds.
 
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.ContinuousQuery instance zookeepers shard doc2term username password 5
+    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.ContinuousQuery -i instance -z zookeepers --shardTable shard --doc2Term doc2term -u username -p password --terms 5
     [public, core, class, binarycomparable, b] 2  0.081
     [wordtodelete, unindexdocument, doctablename, putdelete, insert] 1  0.041
     [import, columnvisibilityinterpreterfactory, illegalstateexception, cv, columnvisibility] 1  0.049

Modified: accumulo/branches/ACCUMULO-259/docs/examples/README.visibility
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/docs/examples/README.visibility?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/docs/examples/README.visibility (original)
+++ accumulo/branches/ACCUMULO-259/docs/examples/README.visibility Mon Jan 14 22:03:24 2013
@@ -128,4 +128,4 @@ It can be set with the following constra
     row f3:q3 [(apple&carrot)|broccoli|spinach]    v3
     row f4:q4 [spinach|broccoli]    v4
     username@instance vistest> 
-    
\ No newline at end of file
+    

Propchange: accumulo/branches/ACCUMULO-259/examples/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Mon Jan 14 22:03:24 2013
@@ -3,3 +3,4 @@ target
 .classpath
 .project
 lib
+accumulo-examples.iml

Propchange: accumulo/branches/ACCUMULO-259/examples/instamo/
------------------------------------------------------------------------------
--- svn:ignore (added)
+++ svn:ignore Mon Jan 14 22:03:24 2013
@@ -0,0 +1,5 @@
+target
+.settings
+.classpath
+.project
+instamo.iml

Copied: accumulo/branches/ACCUMULO-259/examples/instamo/src/main/java/org/apache/accumulo/instamo/MapReduceExample.java (from r1432174, accumulo/trunk/examples/instamo/src/main/java/org/apache/accumulo/instamo/MapReduceExample.java)
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/instamo/src/main/java/org/apache/accumulo/instamo/MapReduceExample.java?p2=accumulo/branches/ACCUMULO-259/examples/instamo/src/main/java/org/apache/accumulo/instamo/MapReduceExample.java&p1=accumulo/trunk/examples/instamo/src/main/java/org/apache/accumulo/instamo/MapReduceExample.java&r1=1432174&r2=1433166&rev=1433166&view=diff
==============================================================================
--- accumulo/trunk/examples/instamo/src/main/java/org/apache/accumulo/instamo/MapReduceExample.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/instamo/src/main/java/org/apache/accumulo/instamo/MapReduceExample.java Mon Jan 14 22:03:24 2013
@@ -17,11 +17,11 @@
 package org.apache.accumulo.instamo;
 
 import java.io.File;
-import java.util.HashMap;
 import java.util.UUID;
 
 import org.apache.accumulo.server.test.continuous.ContinuousIngest;
 import org.apache.accumulo.server.test.continuous.ContinuousVerify;
+import org.apache.accumulo.test.MacConfig;
 import org.apache.accumulo.test.MiniAccumuloCluster;
 import org.apache.commons.io.FileUtils;
 
@@ -52,7 +52,7 @@ public class MapReduceExample {
     File tmpDir = new File(FileUtils.getTempDirectory(), "macc-" + UUID.randomUUID().toString());
     
     try {
-      MiniAccumuloCluster la = new MiniAccumuloCluster(tmpDir, "pass1234", new HashMap<String,String>());
+      MiniAccumuloCluster la = new MiniAccumuloCluster(new MacConfig(tmpDir, "pass1234"));
       la.start();
       
       System.out.println("\n   ---- Running Mapred Against Accumulo\n");

Copied: accumulo/branches/ACCUMULO-259/examples/instamo/src/test/java/org/apache/accumulo/instamo/ExampleAccumuloUnitTest.java (from r1432174, accumulo/trunk/examples/instamo/src/test/java/org/apache/accumulo/instamo/ExampleAccumuloUnitTest.java)
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/instamo/src/test/java/org/apache/accumulo/instamo/ExampleAccumuloUnitTest.java?p2=accumulo/branches/ACCUMULO-259/examples/instamo/src/test/java/org/apache/accumulo/instamo/ExampleAccumuloUnitTest.java&p1=accumulo/trunk/examples/instamo/src/test/java/org/apache/accumulo/instamo/ExampleAccumuloUnitTest.java&r1=1432174&r2=1433166&rev=1433166&view=diff
==============================================================================
--- accumulo/trunk/examples/instamo/src/test/java/org/apache/accumulo/instamo/ExampleAccumuloUnitTest.java (original)
+++ accumulo/branches/ACCUMULO-259/examples/instamo/src/test/java/org/apache/accumulo/instamo/ExampleAccumuloUnitTest.java Mon Jan 14 22:03:24 2013
@@ -17,9 +17,7 @@ package org.apache.accumulo.instamo;
  */
 
 
-import java.util.HashMap;
-
-import org.apache.accumulo.instamo.AccumuloApp;
+import org.apache.accumulo.test.MacConfig;
 import org.apache.accumulo.test.MiniAccumuloCluster;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -41,7 +39,7 @@ public class ExampleAccumuloUnitTest {
 
     folder.create();
     
-    accumulo = new MiniAccumuloCluster(folder.getRoot(), "superSecret", new HashMap<String,String>());
+    accumulo = new MiniAccumuloCluster(new MacConfig(folder.getRoot(), "superSecret"));
     
     accumulo.start();
     

Modified: accumulo/branches/ACCUMULO-259/examples/pom.xml
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/pom.xml?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/pom.xml (original)
+++ accumulo/branches/ACCUMULO-259/examples/pom.xml Mon Jan 14 22:03:24 2013
@@ -28,6 +28,7 @@
 
   <modules>
     <module>simple</module>
+    <module>instamo</module>
   </modules>
 
   <repositories>

Propchange: accumulo/branches/ACCUMULO-259/examples/simple/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Mon Jan 14 22:03:24 2013
@@ -1,2 +1,3 @@
 .*
 target
+examples-simple.iml

Modified: accumulo/branches/ACCUMULO-259/examples/simple/pom.xml
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-259/examples/simple/pom.xml?rev=1433166&r1=1433165&r2=1433166&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-259/examples/simple/pom.xml (original)
+++ accumulo/branches/ACCUMULO-259/examples/simple/pom.xml Mon Jan 14 22:03:24 2013
@@ -26,17 +26,49 @@
   <modelVersion>4.0.0</modelVersion>
   <artifactId>examples-simple</artifactId>
   <name>examples-simple</name>
-
+  
+  <profiles>
+    <!-- profile for building against Hadoop 1.0.x
+    Activate by not specifying hadoop.profile -->
+    <profile>
+      <id>hadoop-1.0</id>
+      <activation>
+        <property>
+          <name>!hadoop.profile</name>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-core</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+    <!-- profile for building against Hadoop 2.0.x
+    Activate using: mvn -Dhadoop.profile=2.0 -->
+    <profile>
+      <id>hadoop-2.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>2.0</value>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-client</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+  </profiles>
+  
   <dependencies>
     <dependency>
       <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-core</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-core</artifactId>
-    </dependency>
-    <dependency>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>
     </dependency>



Mime
View raw message