accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [03/50] [abbrv] ACCUMULO-1537 converted many more functional tests to integration tests
Date Wed, 17 Jul 2013 02:33:05 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/ServerSideErrorTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ServerSideErrorTest.java b/test/src/main/java/org/apache/accumulo/test/functional/ServerSideErrorTest.java
deleted file mode 100644
index a972b65..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/ServerSideErrorTest.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.Combiner;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.hadoop.io.Text;
-
-public class ServerSideErrorTest extends FunctionalTest {
-  
-  @Override
-  public void cleanup() throws Exception {}
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Collections.emptyList();
-  }
-  
-  @Override
-  public void run() throws Exception {
-    
-    // Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
-    // logger.setLevel(Level.TRACE);
-    
-    getConnector().tableOperations().create("tt");
-    IteratorSetting is = new IteratorSetting(5, "Bad Aggregator", BadCombiner.class);
-    Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("acf")));
-    getConnector().tableOperations().attachIterator("tt", is);
-    
-    BatchWriter bw = getConnector().createBatchWriter("tt", new BatchWriterConfig());
-    
-    Mutation m = new Mutation(new Text("r1"));
-    m.put(new Text("acf"), new Text("foo"), new Value("1".getBytes()));
-    
-    bw.addMutation(m);
-    
-    bw.close();
-    
-    // try to scan table
-    Scanner scanner = getConnector().createScanner("tt", Authorizations.EMPTY);
-    
-    boolean caught = false;
-    try {
-      for (Entry<Key,Value> entry : scanner) {
-        entry.getKey();
-      }
-    } catch (Exception e) {
-      caught = true;
-    }
-    
-    if (!caught)
-      throw new Exception("Scan did not fail");
-    
-    // try to batch scan the table
-    BatchScanner bs = getConnector().createBatchScanner("tt", Authorizations.EMPTY, 2);
-    bs.setRanges(Collections.singleton(new Range()));
-    
-    caught = false;
-    try {
-      for (Entry<Key,Value> entry : bs) {
-        entry.getKey();
-      }
-      bs.close();
-    } catch (Exception e) {
-      caught = true;
-    }
-    if (!caught)
-      throw new Exception("batch scan did not fail");
-    
-    // remove the bad agg so accumulo can shutdown
-    TableOperations to = getConnector().tableOperations();
-    for (Entry<String,String> e : to.getProperties("tt")) {
-      to.removeProperty("tt", e.getKey());
-    }
-    
-    UtilWaitThread.sleep(500);
-    
-    // should be able to scan now
-    scanner = getConnector().createScanner("tt", Authorizations.EMPTY);
-    for (Entry<Key,Value> entry : scanner) {
-      entry.getKey();
-    }
-    
-    // set a non existant iterator, should cause scan to fail on server side
-    scanner.addScanIterator(new IteratorSetting(100, "bogus", "com.bogus.iterator"));
-    
-    caught = false;
-    try {
-      for (Entry<Key,Value> entry : scanner) {
-        // should error
-        entry.getKey();
-      }
-    } catch (Exception e) {
-      caught = true;
-    }
-    
-    if (!caught)
-      throw new Exception("Scan did not fail");
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/SparseColumnFamilyTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SparseColumnFamilyTest.java b/test/src/main/java/org/apache/accumulo/test/functional/SparseColumnFamilyTest.java
deleted file mode 100644
index 1ab77ba..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/SparseColumnFamilyTest.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.io.Text;
-
-/**
- * This test recreates issue ACCUMULO-516. Until that issue is fixed this test should time out.
- */
-public class SparseColumnFamilyTest extends FunctionalTest {
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Collections.emptyList();
-  }
-  
-  @Override
-  public void run() throws Exception {
-    getConnector().tableOperations().create("scftt");
-    
-    BatchWriter bw = getConnector().createBatchWriter("scftt", new BatchWriterConfig());
-    
-    // create file in the tablet that has mostly column family 0, with a few entries for column family 1
-    
-    bw.addMutation(nm(0, 1, 0));
-    for (int i = 1; i < 99999; i++) {
-      bw.addMutation(nm(i * 2, 0, i));
-    }
-    bw.addMutation(nm(99999 * 2, 1, 99999));
-    bw.flush();
-    
-    getConnector().tableOperations().flush("scftt", null, null, true);
-    
-    // create a file that has column family 1 and 0 interleaved
-    for (int i = 0; i < 100000; i++) {
-      bw.addMutation(nm(i * 2 + 1, i % 2 == 0 ? 0 : 1, i));
-    }
-    bw.close();
-    
-    getConnector().tableOperations().flush("scftt", null, null, true);
-    
-    Scanner scanner = getConnector().createScanner("scftt", Authorizations.EMPTY);
-    
-    for (int i = 0; i < 200; i++) {
-      
-      // every time we search for column family 1, it will scan the entire file
-      // that has mostly column family 0 until the bug is fixed
-      scanner.setRange(new Range(String.format("%06d", i), null));
-      scanner.clearColumns();
-      scanner.setBatchSize(3);
-      scanner.fetchColumnFamily(new Text(String.format("%03d", 1)));
-      
-      long t1 = System.currentTimeMillis();
-      Iterator<Entry<Key,Value>> iter = scanner.iterator();
-      if (iter.hasNext()) {
-        Entry<Key,Value> entry = iter.next();
-        if (!"001".equals(entry.getKey().getColumnFamilyData().toString())) {
-          throw new Exception();
-        }
-      }
-      long t2 = System.currentTimeMillis();
-      
-      System.out.println("time " + (t2 - t1));
-      
-    }
-  }
-  
-  /**
-   * @param i
-   * @param j
-   * @param k
-   * @return
-   */
-  private Mutation nm(int row, int cf, int val) {
-    Mutation m = new Mutation(String.format("%06d", row));
-    m.put(String.format("%03d", cf), "", "" + val);
-    return m;
-  }
-  
-  @Override
-  public void cleanup() throws Exception {}
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/TimeoutTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/TimeoutTest.java b/test/src/main/java/org/apache/accumulo/test/functional/TimeoutTest.java
deleted file mode 100644
index e7e045d..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/TimeoutTest.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TimedOutException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-
-/**
- * 
- */
-public class TimeoutTest extends FunctionalTest {
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Collections.emptyList();
-  }
-  
-  @Override
-  public void run() throws Exception {
-    testBatchScannerTimeout();
-    testBatchWriterTimeout();
-  }
-  
-  public void testBatchWriterTimeout() throws Exception {
-    Connector conn = getConnector();
-    
-    conn.tableOperations().create("foo1");
-    
-    conn.tableOperations().addConstraint("foo1", SlowConstraint.class.getName());
-    
-    // give constraint time to propogate through zookeeper
-    UtilWaitThread.sleep(250);
-    
-    BatchWriter bw = conn.createBatchWriter("foo1", new BatchWriterConfig().setTimeout(3, TimeUnit.SECONDS));
-    
-    Mutation mut = new Mutation("r1");
-    mut.put("cf1", "cq1", "v1");
-    
-    bw.addMutation(mut);
-    try {
-      bw.close();
-      throw new Exception("batch writer did not timeout");
-    } catch (MutationsRejectedException mre) {
-      if (!(mre.getCause() instanceof TimedOutException)) {
-        throw mre;
-      }
-    }
-  }
-  
-  public void testBatchScannerTimeout() throws Exception {
-    getConnector().tableOperations().create("timeout");
-    
-    BatchWriter bw = getConnector().createBatchWriter("timeout", new BatchWriterConfig());
-    
-    Mutation m = new Mutation("r1");
-    m.put("cf1", "cq1", "v1");
-    m.put("cf1", "cq2", "v2");
-    m.put("cf1", "cq3", "v3");
-    m.put("cf1", "cq4", "v4");
-    
-    bw.addMutation(m);
-    
-    bw.close();
-    
-    BatchScanner bs = getConnector().createBatchScanner("timeout", Authorizations.EMPTY, 2);
-    bs.setTimeout(1, TimeUnit.SECONDS);
-    bs.setRanges(Collections.singletonList(new Range()));
-    
-    // should not timeout
-    for (Entry<Key,Value> entry : bs) {
-      entry.getKey();
-    }
-    
-    IteratorSetting iterSetting = new IteratorSetting(100, SlowIterator.class);
-    iterSetting.addOption("sleepTime", 2000 + "");
-    getConnector().tableOperations().attachIterator("timeout", iterSetting);
-    UtilWaitThread.sleep(250);
-    
-    try {
-      for (Entry<Key,Value> entry : bs) {
-        entry.getKey();
-      }
-      throw new Exception("batch scanner did not time out");
-    } catch (TimedOutException toe) {
-      // toe.printStackTrace();
-    }
-    
-    bs.close();
-  }
-  
-  @Override
-  public void cleanup() throws Exception {
-    
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
index b8e9b18..ad9e3fa 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
@@ -32,17 +32,22 @@ public class DeleteIT extends MacTest {
   @Test(timeout=60*1000)
   public void test() throws Exception {
     Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    deleteTest(c);
+    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+  }
+
+  public static void deleteTest(Connector c) throws Exception {
     VerifyIngest.Opts vopts = new VerifyIngest.Opts();
     TestIngest.Opts opts = new TestIngest.Opts();
-    vopts.rows = opts.rows = 10000;
+    vopts.rows = opts.rows = 1000;
     vopts.cols = opts.cols = 1;
     vopts.random = opts.random = 56;
-    opts.createTable = true;
     TestIngest.ingest(c, opts, new BatchWriterOpts());
     assertEquals(0, cluster.exec(TestRandomDeletes.class, "-p", MacTest.PASSWORD, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers()).waitFor());
     TestIngest.ingest(c, opts, new BatchWriterOpts());
     VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
-    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
   }
   
+  
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
index 56636ed..96e425c 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
@@ -16,8 +16,7 @@
  */
 package org.apache.accumulo.test.functional;
 
-import static org.junit.Assert.*;
-import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.util.Collections;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java b/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
new file mode 100644
index 0000000..1698193
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+/**
+ * See ACCUMULO-779
+ */
+public class FateStarvationIT extends MacTest {
+  
+  @Test(timeout=60*1000)
+  public void run() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    
+    c.tableOperations().addSplits("test_ingest", TestIngest.getSplitPoints(0, 100000, 50));
+    
+    TestIngest.Opts opts = new TestIngest.Opts();
+    opts.random = 89;
+    opts.timestamp = 7;
+    opts.dataSize = 50;
+    opts.rows = 100000;
+    opts.cols = 1;
+    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    
+    c.tableOperations().flush("test_ingest", null, null, true);
+    
+    List<Text> splits = new ArrayList<Text>(TestIngest.getSplitPoints(0, 100000, 67));
+    Random rand = new Random();
+    
+    for (int i = 0; i < 100; i++) {
+      int idx1 = rand.nextInt(splits.size() - 1);
+      int idx2 = rand.nextInt(splits.size() - (idx1 + 1)) + idx1 + 1;
+      
+      c.tableOperations().compact("test_ingest", splits.get(idx1), splits.get(idx2), false, false);
+    }
+    
+    c.tableOperations().offline("test_ingest");
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
index 8b8dc61..1466153 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
@@ -34,6 +34,7 @@ import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
@@ -149,6 +150,15 @@ public class FunctionalTestUtils {
     return readAll(new FileInputStream(c.getConfig().getLogDir() + "/" + klass.getSimpleName() + "_" + p.hashCode() + ".out"));
   }
   
+  static Mutation nm(String row, String cf, String cq, Value value) {
+    Mutation m = new Mutation(new Text(row));
+    m.put(new Text(cf), new Text(cq), value);
+    return m;
+  }
+  
+  static Mutation nm(String row, String cf, String cq, String value) {
+    return nm(row, cf, cq, new Value(value.getBytes()));
+  }
 
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
new file mode 100644
index 0000000..eb5c0fa
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MemoryUnit;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.server.gc.SimpleGarbageCollector;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class GarbageCollectorIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String, String> settings = new HashMap<String, String>();
+    settings.put(Property.GC_CYCLE_START.getKey(), "1");
+    settings.put(Property.GC_CYCLE_DELAY.getKey(), "1");
+    settings.put(Property.TSERV_MAXMEM.getKey(), "5K");
+    settings.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
+    cfg.setSiteConfig(settings);
+  }
+
+  @Test(timeout=60*1000)
+  public void gcTest() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
+    TestIngest.Opts opts = new TestIngest.Opts();
+    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+    vopts.rows = opts.rows = 10000;
+    vopts.cols = opts.cols = 1;
+    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    c.tableOperations().compact("test_ingest", null, null, true, true);
+    int before = countFiles();
+    while (true) {
+      UtilWaitThread.sleep(1000);
+      int more = countFiles();
+      if (more <= before)
+        break;
+      before = more;
+    }
+    Process gc = cluster.exec(SimpleGarbageCollector.class);
+    UtilWaitThread.sleep(5*1000);
+    int after = countFiles();
+    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+    assertTrue(after < before);
+    gc.destroy();
+  }
+  
+  @Test(timeout=60*1000)
+  public void gcLotsOfCandidatesIT() throws Exception {
+    log.info("Filling !METADATA table with bogus delete flags");
+    Connector c = getConnector();
+    addEntries(c, new BatchWriterOpts());
+    cluster.getConfig().setDefaultMemory(10, MemoryUnit.MEGABYTE);
+    Process gc = cluster.exec(SimpleGarbageCollector.class);
+    UtilWaitThread.sleep(10*1000);
+    String output = FunctionalTestUtils.readAll(cluster, SimpleGarbageCollector.class, gc);
+    gc.destroy();
+    assertTrue(output.contains("delete candidates has exceeded"));
+  }
+
+  private int countFiles() throws Exception {
+    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+    int result = 0;
+    Path path = new Path(cluster.getConfig().getDir()+"/accumulo/tables/1/*/*.rf");
+    for (@SuppressWarnings("unused") FileStatus entry : fs.globStatus(path)) {
+      result++;
+    }
+    return result;
+  }
+  
+  public static void addEntries(Connector conn, BatchWriterOpts bwOpts) throws Exception {
+    conn.securityOperations().grantTablePermission(conn.whoami(), MetadataTable.NAME, TablePermission.WRITE);
+    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, bwOpts.getBatchWriterConfig());
+    
+    for (int i = 0; i < 100000; ++i) {
+      final Text emptyText = new Text("");
+      Text row = new Text(String.format("%s%s%020d%s", MetadataTable.DELETED_RANGE.getStartKey().getRow().toString(), "/", i,
+          "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj"));
+      Mutation delFlag = new Mutation(row);
+      delFlag.put(emptyText, emptyText, new Value(new byte[] {}));
+      bw.addMutation(delFlag);
+    }
+    bw.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
new file mode 100644
index 0000000..6e06934
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Collections;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.hadoop.io.Text;
+import org.apache.log4j.Logger;
+import org.junit.Test;
+
+public class LargeRowIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "10ms"));
+  }
+
+  private static final int SEED = 42;
+  private static final String REG_TABLE_NAME = "lr";
+  private static final String PRE_SPLIT_TABLE_NAME = "lrps";
+  private static final int NUM_ROWS = 100;
+  private static final int ROW_SIZE = 1 << 17;
+  private static final int NUM_PRE_SPLITS = 9;
+  private static final int SPLIT_THRESH = ROW_SIZE * NUM_ROWS / NUM_PRE_SPLITS;
+  
+  @Test(timeout=60*1000)
+  public void run() throws Exception {
+    Random r = new Random();
+    byte rowData[] = new byte[ROW_SIZE];
+    r.setSeed(SEED + 1);
+    TreeSet<Text> splitPoints = new TreeSet<Text>();
+    for (int i = 0; i < NUM_PRE_SPLITS; i++) {
+      r.nextBytes(rowData);
+      TestIngest.toPrintableChars(rowData);
+      splitPoints.add(new Text(rowData));
+    }
+    Connector c = getConnector();
+    c.tableOperations().create(REG_TABLE_NAME);
+    c.tableOperations().create(PRE_SPLIT_TABLE_NAME);
+    c.tableOperations().addSplits(PRE_SPLIT_TABLE_NAME, splitPoints);
+    test1(c);
+    test2(c);
+  }
+  
+  private void test1(Connector c) throws Exception {
+    
+    basicTest(c, REG_TABLE_NAME, 0);
+    
+    c.tableOperations().setProperty(REG_TABLE_NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "" + SPLIT_THRESH);
+    
+    UtilWaitThread.sleep(12000);
+    Logger.getLogger(LargeRowIT.class).warn("checking splits");
+    FunctionalTestUtils.checkSplits(c, REG_TABLE_NAME, NUM_PRE_SPLITS / 2, NUM_PRE_SPLITS * 4);
+    
+    verify(c, REG_TABLE_NAME);
+  }
+  
+  private void test2(Connector c) throws Exception {
+    basicTest(c, PRE_SPLIT_TABLE_NAME, NUM_PRE_SPLITS);
+  }
+  
+  private void basicTest(Connector c, String table, int expectedSplits) throws Exception {
+    BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
+    
+    Random r = new Random();
+    byte rowData[] = new byte[ROW_SIZE];
+    
+    r.setSeed(SEED);
+    
+    for (int i = 0; i < NUM_ROWS; i++) {
+      
+      r.nextBytes(rowData);
+      TestIngest.toPrintableChars(rowData);
+      
+      Mutation mut = new Mutation(new Text(rowData));
+      mut.put(new Text(""), new Text(""), new Value(("" + i).getBytes()));
+      bw.addMutation(mut);
+    }
+    
+    bw.close();
+    
+    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+    
+    verify(c, table);
+    
+    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+    
+    c.tableOperations().flush(table, null, null, false);
+    
+    // verify while table flush is running
+    verify(c, table);
+    
+    // give split time to complete
+    c.tableOperations().flush(table, null, null, true);
+    
+    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+    
+    verify(c, table);
+    
+    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+  }
+  
+  private void verify(Connector c, String table) throws Exception {
+    Random r = new Random();
+    byte rowData[] = new byte[ROW_SIZE];
+    
+    r.setSeed(SEED);
+    
+    Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
+    
+    for (int i = 0; i < NUM_ROWS; i++) {
+      
+      r.nextBytes(rowData);
+      TestIngest.toPrintableChars(rowData);
+      
+      scanner.setRange(new Range(new Text(rowData)));
+      
+      int count = 0;
+      
+      for (Entry<Key,Value> entry : scanner) {
+        if (!entry.getKey().getRow().equals(new Text(rowData))) {
+          throw new Exception("verification failed, unexpected row i =" + i);
+        }
+        if (!entry.getValue().equals(Integer.toString(i).getBytes())) {
+          throw new Exception("verification failed, unexpected value i =" + i + " value = " + entry.getValue());
+        }
+        count++;
+      }
+      
+      if (count != 1) {
+        throw new Exception("verification failed, unexpected count i =" + i + " count=" + count);
+      }
+      
+    }
+    
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
new file mode 100644
index 0000000..4ffef57
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class LogicalTimeIT extends MacTest {
+
+  
+  
+  @Test(timeout=120*1000)
+  public void run() throws Exception {
+    int tc = 0;
+    Connector c = getConnector();
+    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"a"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"z"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"a", "z"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"a", "c", "z"}, null, null, "b", 3l);
+    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"a", "y", "z"}, null, null, "b", 3l);
+    
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, null, "b", 3l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, null, "b", 3l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, null, "b", 3l);
+    
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, "h", "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, "h", "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, "h", "b", 1l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, "h", "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, "h", "b", 3l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, "h", "b", 3l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, "h", "b", 2l);
+    
+  }
+  
+  private void runMergeTest(Connector conn, String table, String[] splits, String[] inserts, String start, String end, String last, long expected) throws Exception {
+    log.info("table " + table);
+    conn.tableOperations().create(table, true, TimeType.LOGICAL);
+    TreeSet<Text> splitSet = new TreeSet<Text>();
+    for (String split : splits) {
+      splitSet.add(new Text(split));
+    }
+    conn.tableOperations().addSplits(table, splitSet);
+    
+    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    for (String row : inserts) {
+      Mutation m = new Mutation(row);
+      m.put("cf", "cq", "v");
+      bw.addMutation(m);
+    }
+    
+    bw.flush();
+    
+    conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
+    
+    Mutation m = new Mutation(last);
+    m.put("cf", "cq", "v");
+    bw.addMutation(m);
+    bw.flush();
+    
+    Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
+    scanner.setRange(new Range(last));
+    
+    bw.close();
+    
+    long time = scanner.iterator().next().getKey().getTimestamp();
+    if (time != expected)
+      throw new RuntimeException("unexpected time " + time + " " + expected);
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
new file mode 100644
index 0000000..2b84d49
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.security.MessageDigest;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.examples.simple.mapreduce.RowHash;
+import org.apache.hadoop.io.Text;
+import org.codehaus.plexus.util.Base64;
+import org.junit.Test;
+
+public class MapReduceIT extends MacTest {
+  
+  static final String tablename = "mapredf";
+  static final String input_cf = "cf-HASHTYPE";
+  static final String input_cq = "cq-NOTHASHED";
+  static final String input_cfcq = input_cf + ":" + input_cq;
+  static final String output_cq = "cq-MD4BASE64";
+  static final String output_cfcq =  input_cf + ":" + output_cq;
+  
+  @Test
+  public void test() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create(tablename);
+    BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig());
+    for (int i = 0; i < 10; i++) {
+      Mutation m = new Mutation("" + i);
+      m.put(input_cf, input_cq, "row" + i);
+      bw.addMutation(m);
+    }
+    bw.close();
+    
+    Process hash = cluster.exec(RowHash.class, 
+        "-i", cluster.getInstanceName(),
+        "-z", cluster.getZooKeepers(),
+        "-u", "root",
+        "-p", MacTest.PASSWORD,
+        "-t", tablename,
+        "--column", input_cfcq);
+    assertEquals(0, hash.waitFor());
+    
+    Scanner s = c.createScanner(tablename, Authorizations.EMPTY);
+    s.fetchColumn(new Text(input_cf), new Text(output_cq));
+    int i = 0;
+    for (Entry<Key,Value> entry : s) {
+      MessageDigest md = MessageDigest.getInstance("MD5");
+      byte[] check = Base64.encodeBase64(md.digest(("row" + i).getBytes()));
+      assertEquals(entry.getValue().toString(), new String(check));
+      i++;
+    }
+    
+  }
+
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
new file mode 100644
index 0000000..2934fd2
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+/**
+ * A functional test that exercises hitting the max open file limit on a tablet server. This test assumes there are one or two tablet servers.
+ */
+
+public class MaxOpenIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String, String> conf = new HashMap<String, String>();
+    conf.put(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), "4");
+    conf.put(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "1");
+    conf.put(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), "2");
+    cfg.setSiteConfig(conf);
+  }
+
+  private static final int NUM_TABLETS = 16;
+  private static final int NUM_TO_INGEST = 10000;
+  
+  @Test
+  public void run() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_MAJC_RATIO.getKey(), "10");
+    c.tableOperations().addSplits("test_ingest", TestIngest.getSplitPoints(0, NUM_TO_INGEST, NUM_TABLETS));
+    
+    // the following loop should create three tablets in each map file
+    for (int i = 0; i < 3; i++) {
+      TestIngest.Opts opts = new TestIngest.Opts();
+      opts.timestamp = i;
+      opts.dataSize = 50;
+      opts.rows = NUM_TO_INGEST;
+      opts.cols = 1;
+      opts.random = i;
+      TestIngest.ingest(c, opts, new BatchWriterOpts());
+      
+      c.tableOperations().flush("test_ingest", null, null, true);
+      FunctionalTestUtils.checkRFiles(c, "test_ingest", NUM_TABLETS, NUM_TABLETS, i + 1, i + 1);
+    }
+    
+    List<Range> ranges = new ArrayList<Range>(NUM_TO_INGEST);
+    
+    for (int i = 0; i < NUM_TO_INGEST; i++) {
+      ranges.add(new Range(TestIngest.generateRow(i, 0)));
+    }
+    
+    long time1 = batchScan(c, ranges, 1);
+    // run it again, now that stuff is cached on the client and sever
+    time1 = batchScan(c, ranges, 1);
+    long time2 = batchScan(c, ranges, NUM_TABLETS);
+    
+    System.out.printf("Single thread scan time   %6.2f %n", time1 / 1000.0);
+    System.out.printf("Multiple thread scan time %6.2f %n", time2 / 1000.0);
+    
+  }
+  
+  private long batchScan(Connector c, List<Range> ranges, int threads) throws Exception {
+    BatchScanner bs = c.createBatchScanner("test_ingest", TestIngest.AUTHS, threads);
+    
+    bs.setRanges(ranges);
+    
+    int count = 0;
+    
+    long t1 = System.currentTimeMillis();
+    
+    byte rval[] = new byte[50];
+    Random random = new Random();
+    
+    for (Entry<Key,Value> entry : bs) {
+      count++;
+      int row = VerifyIngest.getRow(entry.getKey());
+      int col = VerifyIngest.getCol(entry.getKey());
+      
+      if (row < 0 || row >= NUM_TO_INGEST) {
+        throw new Exception("unexcepted row " + row);
+      }
+      
+      rval = TestIngest.genRandomValue(random, rval, 2, row, col);
+      
+      if (entry.getValue().compareTo(rval) != 0) {
+        throw new Exception("unexcepted value row=" + row + " col=" + col);
+      }
+    }
+    
+    long t2 = System.currentTimeMillis();
+    
+    bs.close();
+    
+    if (count != NUM_TO_INGEST) {
+      throw new Exception("Batch Scan did not return expected number of values " + count);
+    }
+    
+    return t2 - t1;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
new file mode 100644
index 0000000..4428277
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.*;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Merge;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MergeIT extends MacTest {
+  
+  SortedSet<Text> splits(String [] points) {
+    SortedSet<Text> result = new TreeSet<Text>();
+    for (String point : points)
+      result.add(new Text(point));
+    return result;
+  }
+  
+  @Test(timeout=30*1000)
+  public void merge() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test");
+    c.tableOperations().addSplits("test", splits("a b c d e f g h i j k".split(" ")));
+    BatchWriter bw = c.createBatchWriter("test", new BatchWriterConfig());
+    for (String row : "a b c d e f g h i j k".split(" ")) {
+      Mutation m = new Mutation(row);
+      m.put("cf", "cq", "value");
+      bw.addMutation(m);
+    }
+    bw.close();
+    c.tableOperations().flush("test", null, null, true);
+    c.tableOperations().merge("test", new Text("c1"), new Text("f1"));
+    assertEquals(8, c.tableOperations().listSplits("test").size());
+  }
+  
+  @Test(timeout=30*1000)
+  public void mergeSize() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("merge");
+    c.tableOperations().addSplits("merge", splits("a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")));
+    BatchWriter bw = c.createBatchWriter("merge", new BatchWriterConfig());
+    for (String row : "c e f y".split(" ")) {
+      Mutation m = new Mutation(row);
+      m.put("cf", "cq", "mersydotesanddozeydotesanlittolamsiedives");
+      bw.addMutation(m);
+    }
+    bw.close();
+    c.tableOperations().flush("merge", null, null, true);
+    Merge merge = new Merge();
+    merge.mergomatic(c, "merge", null, null, 100, false);
+    assertArrayEquals("b c d e f x y".split(" "), toStrings(c.tableOperations().listSplits("merge")));
+    merge.mergomatic(c, "merge", null, null, 100, true);
+    assertArrayEquals("c e f y".split(" "), toStrings(c.tableOperations().listSplits("merge")));
+  }
+
+  private String[] toStrings(Collection<Text> listSplits) {
+    String[] result = new String[listSplits.size()];
+    int i = 0;
+    for (Text t : listSplits) {
+      result[i++] = t.toString();
+    }
+    return result;
+  }
+  
+  private String[] ns(String... strings) {
+    return strings;
+  }
+  
+  @Test(timeout=120*1000)
+  public void mergeTest() throws Exception {
+    int tc = 0;
+    Connector c = getConnector();
+    runMergeTest(c, "foo" + tc++, ns(), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
+    
+    runMergeTest(c, "foo" + tc++, ns("m"), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
+    runMergeTest(c, "foo" + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns("m", "n"), ns(null, "z"));
+    runMergeTest(c, "foo" + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns(null, "b"), ns("l", "m"));
+    
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns(), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns(null, "s"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("c", "m"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("n", "r"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns(null, "s"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns(null, "s"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("q", "r"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("aa", "b"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("r", "s"), ns(null, "z"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("l", "m"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns("q", "r"));
+    
+  }
+  
+  private void runMergeTest(Connector c, String table, String[] splits, String[] expectedSplits, String[] inserts, String[] start, String[] end) throws Exception {
+    int count = 0;
+    
+    for (String s : start) {
+      for (String e : end) {
+        runMergeTest(c, table + "_" + count++, splits, expectedSplits, inserts, s, e);
+      }
+    }
+  }
+  
+  private void runMergeTest(Connector conn, String table, String[] splits, String[] expectedSplits, String[] inserts, String start, String end) throws Exception {
+    System.out.println("Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);
+    
+    conn.tableOperations().create(table, true, TimeType.LOGICAL);
+    TreeSet<Text> splitSet = new TreeSet<Text>();
+    for (String split : splits) {
+      splitSet.add(new Text(split));
+    }
+    conn.tableOperations().addSplits(table, splitSet);
+    
+    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    HashSet<String> expected = new HashSet<String>();
+    for (String row : inserts) {
+      Mutation m = new Mutation(row);
+      m.put("cf", "cq", row);
+      bw.addMutation(m);
+      expected.add(row);
+    }
+    
+    bw.close();
+    
+    conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
+    
+    Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
+    
+    HashSet<String> observed = new HashSet<String>();
+    for (Entry<Key,Value> entry : scanner) {
+      String row = entry.getKey().getRowData().toString();
+      if (!observed.add(row)) {
+        throw new Exception("Saw data twice " + table + " " + row);
+      }
+    }
+    
+    if (!observed.equals(expected)) {
+      throw new Exception("data inconsistency " + table + " " + observed + " != " + expected);
+    }
+    
+    HashSet<Text> currentSplits = new HashSet<Text>(conn.tableOperations().listSplits(table));
+    HashSet<Text> ess = new HashSet<Text>();
+    for (String es : expectedSplits) {
+      ess.add(new Text(es));
+    }
+    
+    if (!currentSplits.equals(ess)) {
+      throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess);
+    }
+
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
new file mode 100644
index 0000000..bbfdcbe
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.*;
+
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.RootTable;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MergeMetaIT extends MacTest {
+  
+  @Test(timeout=30*1000)
+  public void mergeMeta() throws Exception {
+    Connector c = getConnector();
+    SortedSet<Text> splits = new TreeSet<Text>();
+    for (String id : "1 2 3 4 5".split(" ")) {
+      splits.add(new Text(id));
+    }
+    c.tableOperations().addSplits(MetadataTable.NAME, splits);
+    for (String tableName : "a1 a2 a3 a4 a5".split(" ")) {
+      c.tableOperations().create(tableName);
+    }
+    c.tableOperations().merge(MetadataTable.NAME, null, null);
+    UtilWaitThread.sleep(2*1000);
+    Scanner s = c.createScanner(RootTable.NAME, Authorizations.EMPTY);
+    s.setRange(MetadataTable.DELETED_RANGE);
+    int count = 0;
+    for (@SuppressWarnings("unused") Entry<Key,Value> e : s) {
+      count++;
+    }
+    assertTrue(count > 0);
+    assertEquals(0, c.tableOperations().listSplits(MetadataTable.NAME).size());
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
new file mode 100644
index 0000000..678eb8d
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
@@ -0,0 +1,475 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.security.SecurityErrorCode;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.SystemPermission;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class PermissionsIT extends MacTest {
+  private static final String TEST_USER = "test_user";
+  private static final PasswordToken TEST_PASS = new PasswordToken("test_password");
+  
+  @Test(timeout=60*1000)
+  public void systemPermissionsTest() throws Exception {
+    // verify that the test is being run by root
+    Connector c = getConnector();
+    verifyHasOnlyTheseSystemPermissions(c, c.whoami(), SystemPermission.values());
+    
+    // create the test user
+    c.securityOperations().createLocalUser(TEST_USER, TEST_PASS);
+    Connector test_user_conn = c.getInstance().getConnector(TEST_USER, TEST_PASS);
+    verifyHasNoSystemPermissions(c, TEST_USER, SystemPermission.values());
+    
+    // test each permission
+    for (SystemPermission perm : SystemPermission.values()) {
+      log.debug("Verifying the " + perm + " permission");
+      
+      // verify GRANT can't be granted
+      if (perm.equals(SystemPermission.GRANT)) {
+        try {
+          c.securityOperations().grantSystemPermission(TEST_USER, perm);
+        } catch (AccumuloSecurityException e) {
+          verifyHasNoSystemPermissions(c, TEST_USER, perm);
+          continue;
+        }
+        throw new IllegalStateException("Should NOT be able to grant GRANT");
+      }
+      
+      // test permission before and after granting it
+      testMissingSystemPermission(c, test_user_conn, perm);
+      c.securityOperations().grantSystemPermission(TEST_USER, perm);
+      verifyHasOnlyTheseSystemPermissions(c, TEST_USER, perm);
+      testGrantedSystemPermission(c, test_user_conn, perm);
+      c.securityOperations().revokeSystemPermission(TEST_USER, perm);
+      verifyHasNoSystemPermissions(c, TEST_USER, perm);
+    }
+  }
+  
+  static Map<String, String> map(Iterable<Entry<String,String>> i) {
+    Map<String, String> result = new HashMap<String, String>();
+    for (Entry<String, String> e : i) {
+      result.put(e.getKey(), e.getValue());
+    }
+    return result;
+  }
+  
+  private static void testMissingSystemPermission(Connector root_conn, Connector test_user_conn, SystemPermission perm) throws AccumuloException,
+  TableExistsException, AccumuloSecurityException, TableNotFoundException {
+    String tableName, user, password = "password";
+    log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
+    
+    // test permission prior to granting it
+    switch (perm) {
+      case CREATE_TABLE:
+        tableName = "__CREATE_TABLE_WITHOUT_PERM_TEST__";
+        try {
+          test_user_conn.tableOperations().create(tableName);
+          throw new IllegalStateException("Should NOT be able to create a table");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || root_conn.tableOperations().list().contains(tableName))
+            throw e;
+        }
+        break;
+      case DROP_TABLE:
+        tableName = "__DROP_TABLE_WITHOUT_PERM_TEST__";
+        root_conn.tableOperations().create(tableName);
+        try {
+          test_user_conn.tableOperations().delete(tableName);
+          throw new IllegalStateException("Should NOT be able to delete a table");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.tableOperations().list().contains(tableName))
+            throw e;
+        }
+        break;
+      case ALTER_TABLE:
+        tableName = "__ALTER_TABLE_WITHOUT_PERM_TEST__";
+        root_conn.tableOperations().create(tableName);
+        try {
+          test_user_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
+          throw new IllegalStateException("Should NOT be able to set a table property");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
+              || map(root_conn.tableOperations().getProperties(tableName)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+            throw e;
+        }
+        root_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
+        try {
+          test_user_conn.tableOperations().removeProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey());
+          throw new IllegalStateException("Should NOT be able to remove a table property");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
+              || !map(root_conn.tableOperations().getProperties(tableName)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+            throw e;
+        }
+        String table2 = tableName + "2";
+        try {
+          test_user_conn.tableOperations().rename(tableName, table2);
+          throw new IllegalStateException("Should NOT be able to rename a table");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.tableOperations().list().contains(tableName)
+              || root_conn.tableOperations().list().contains(table2))
+            throw e;
+        }
+        break;
+      case CREATE_USER:
+        user = "__CREATE_USER_WITHOUT_PERM_TEST__";
+        try {
+          test_user_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
+          throw new IllegalStateException("Should NOT be able to create a user");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
+              || root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
+            throw e;
+        }
+        break;
+      case DROP_USER:
+        user = "__DROP_USER_WITHOUT_PERM_TEST__";
+        root_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
+        try {
+          test_user_conn.securityOperations().dropLocalUser(user);
+          throw new IllegalStateException("Should NOT be able to delete a user");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
+              || !root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
+            throw e;
+        }
+        break;
+      case ALTER_USER:
+        user = "__ALTER_USER_WITHOUT_PERM_TEST__";
+        root_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
+        try {
+          test_user_conn.securityOperations().changeUserAuthorizations(user, new Authorizations("A", "B"));
+          throw new IllegalStateException("Should NOT be able to alter a user");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
+            throw e;
+        }
+        break;
+      case SYSTEM:
+        // test for system permission would go here
+        break;
+      default:
+        throw new IllegalArgumentException("Unrecognized System Permission: " + perm);
+    }
+  }
+  
+  private static void testGrantedSystemPermission(Connector root_conn, Connector test_user_conn, SystemPermission perm) throws AccumuloException,
+  AccumuloSecurityException, TableNotFoundException, TableExistsException {
+    String tableName, user, password = "password";
+    log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
+    
+    // test permission after granting it
+    switch (perm) {
+      case CREATE_TABLE:
+        tableName = "__CREATE_TABLE_WITH_PERM_TEST__";
+        test_user_conn.tableOperations().create(tableName);
+        if (!root_conn.tableOperations().list().contains(tableName))
+          throw new IllegalStateException("Should be able to create a table");
+        break;
+      case DROP_TABLE:
+        tableName = "__DROP_TABLE_WITH_PERM_TEST__";
+        root_conn.tableOperations().create(tableName);
+        test_user_conn.tableOperations().delete(tableName);
+        if (root_conn.tableOperations().list().contains(tableName))
+          throw new IllegalStateException("Should be able to delete a table");
+        break;
+      case ALTER_TABLE:
+        tableName = "__ALTER_TABLE_WITH_PERM_TEST__";
+        String table2 = tableName + "2";
+        root_conn.tableOperations().create(tableName);
+        test_user_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
+        Map<String,String> properties = map(root_conn.tableOperations().getProperties(tableName));
+        if (!properties.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+          throw new IllegalStateException("Should be able to set a table property");
+        test_user_conn.tableOperations().removeProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey());
+        properties = map(root_conn.tableOperations().getProperties(tableName));
+        if (properties.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+          throw new IllegalStateException("Should be able to remove a table property");
+        test_user_conn.tableOperations().rename(tableName, table2);
+        if (root_conn.tableOperations().list().contains(tableName) || !root_conn.tableOperations().list().contains(table2))
+          throw new IllegalStateException("Should be able to rename a table");
+        break;
+      case CREATE_USER:
+        user = "__CREATE_USER_WITH_PERM_TEST__";
+        test_user_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
+        if (!root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
+          throw new IllegalStateException("Should be able to create a user");
+        break;
+      case DROP_USER:
+        user = "__DROP_USER_WITH_PERM_TEST__";
+        root_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
+        test_user_conn.securityOperations().dropLocalUser(user);
+        if (root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
+          throw new IllegalStateException("Should be able to delete a user");
+        break;
+      case ALTER_USER:
+        user = "__ALTER_USER_WITH_PERM_TEST__";
+        root_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
+        test_user_conn.securityOperations().changeUserAuthorizations(user, new Authorizations("A", "B"));
+        if (root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
+          throw new IllegalStateException("Should be able to alter a user");
+        break;
+      case SYSTEM:
+        // test for system permission would go here
+        break;
+      default:
+        throw new IllegalArgumentException("Unrecognized System Permission: " + perm);
+    }
+  }
+  
+  private static void verifyHasOnlyTheseSystemPermissions(Connector root_conn, String user, SystemPermission... perms) throws AccumuloException,
+  AccumuloSecurityException {
+    List<SystemPermission> permList = Arrays.asList(perms);
+    for (SystemPermission p : SystemPermission.values()) {
+      if (permList.contains(p)) {
+        // should have these
+        if (!root_conn.securityOperations().hasSystemPermission(user, p))
+          throw new IllegalStateException(user + " SHOULD have system permission " + p);
+      } else {
+        // should not have these
+        if (root_conn.securityOperations().hasSystemPermission(user, p))
+          throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
+      }
+    }
+  }
+  
+  private static void verifyHasNoSystemPermissions(Connector root_conn, String user, SystemPermission... perms) throws AccumuloException,
+  AccumuloSecurityException {
+    for (SystemPermission p : perms)
+      if (root_conn.securityOperations().hasSystemPermission(user, p))
+        throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
+  }
+  
+  private static final String TEST_TABLE = "__TABLE_PERMISSION_TEST__";
+  
+  @Test
+  public void tablePermissionTest() throws Exception {
+    // create the test user
+    Connector c = getConnector();
+    c.securityOperations().createLocalUser(TEST_USER, TEST_PASS);
+    Connector test_user_conn = c.getInstance().getConnector(TEST_USER, TEST_PASS);
+    
+    // check for read-only access to metadata table
+    verifyHasOnlyTheseTablePermissions(c, c.whoami(), MetadataTable.NAME, TablePermission.READ,
+        TablePermission.ALTER_TABLE);
+    verifyHasOnlyTheseTablePermissions(c, TEST_USER, MetadataTable.NAME, TablePermission.READ);
+    
+    // test each permission
+    for (TablePermission perm : TablePermission.values()) {
+      log.debug("Verifying the " + perm + " permission");
+      
+      // test permission before and after granting it
+      createTestTable(c);
+      testMissingTablePermission(c, test_user_conn, perm);
+      c.securityOperations().grantTablePermission(TEST_USER, TEST_TABLE, perm);
+      verifyHasOnlyTheseTablePermissions(c, TEST_USER, TEST_TABLE, perm);
+      testGrantedTablePermission(c, test_user_conn, perm);
+      
+      createTestTable(c);
+      c.securityOperations().revokeTablePermission(TEST_USER, TEST_TABLE, perm);
+      verifyHasNoTablePermissions(c, TEST_USER, TEST_TABLE, perm);
+    }
+  }
+  
+  private void createTestTable(Connector c) throws Exception,
+  MutationsRejectedException {
+    if (!c.tableOperations().exists(TEST_TABLE)) {
+      // create the test table
+      c.tableOperations().create(TEST_TABLE);
+      // put in some initial data
+      BatchWriter writer = c.createBatchWriter(TEST_TABLE, new BatchWriterConfig());
+      Mutation m = new Mutation(new Text("row"));
+      m.put(new Text("cf"), new Text("cq"), new Value("val".getBytes()));
+      writer.addMutation(m);
+      writer.close();
+      
+      // verify proper permissions for creator and test user
+      verifyHasOnlyTheseTablePermissions(c, c.whoami(), TEST_TABLE, TablePermission.values());
+      verifyHasNoTablePermissions(c, TEST_USER, TEST_TABLE, TablePermission.values());
+      
+    }
+  }
+  
+  private static void testMissingTablePermission(Connector root_conn, Connector test_user_conn, TablePermission perm) throws Exception {
+    Scanner scanner;
+    BatchWriter writer;
+    Mutation m;
+    log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
+    
+    // test permission prior to granting it
+    switch (perm) {
+      case READ:
+        try {
+          scanner = test_user_conn.createScanner(TEST_TABLE, Authorizations.EMPTY);
+          int i = 0;
+          for (Entry<Key,Value> entry : scanner)
+            i += 1 + entry.getKey().getRowData().length();
+          if (i != 0)
+            throw new IllegalStateException("Should NOT be able to read from the table");
+        } catch (RuntimeException e) {
+          AccumuloSecurityException se = (AccumuloSecurityException) e.getCause();
+          if (se.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+            throw se;
+        }
+        break;
+      case WRITE:
+        try {
+          writer = test_user_conn.createBatchWriter(TEST_TABLE, new BatchWriterConfig());
+          m = new Mutation(new Text("row"));
+          m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
+          writer.addMutation(m);
+          try {
+            writer.close();
+          } catch (MutationsRejectedException e1) {
+            if (e1.getAuthorizationFailuresMap().size() > 0)
+              throw new AccumuloSecurityException(test_user_conn.whoami(), org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.PERMISSION_DENIED,
+                  e1);
+          }
+          throw new IllegalStateException("Should NOT be able to write to a table");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+            throw e;
+        }
+        break;
+      case BULK_IMPORT:
+        // test for bulk import permission would go here
+        break;
+      case ALTER_TABLE:
+        Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
+        groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), new Text("t2"))));
+        try {
+          test_user_conn.tableOperations().setLocalityGroups(TEST_TABLE, groups);
+          throw new IllegalStateException("User should not be able to set locality groups");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+            throw e;
+        }
+        break;
+      case DROP_TABLE:
+        try {
+          test_user_conn.tableOperations().delete(TEST_TABLE);
+          throw new IllegalStateException("User should not be able delete the table");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+            throw e;
+        }
+        break;
+      case GRANT:
+        try {
+          test_user_conn.securityOperations().grantTablePermission("root", TEST_TABLE, TablePermission.GRANT);
+          throw new IllegalStateException("User should not be able grant permissions");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+            throw e;
+        }
+        break;
+      default:
+        throw new IllegalArgumentException("Unrecognized table Permission: " + perm);
+    }
+  }
+  
+  private static void testGrantedTablePermission(Connector root_conn, Connector test_user_conn, TablePermission perm) throws AccumuloException,
+  TableExistsException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
+    Scanner scanner;
+    BatchWriter writer;
+    Mutation m;
+    log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
+    
+    // test permission after granting it
+    switch (perm) {
+      case READ:
+        scanner = test_user_conn.createScanner(TEST_TABLE, Authorizations.EMPTY);
+        Iterator<Entry<Key,Value>> iter = scanner.iterator();
+        while (iter.hasNext())
+          iter.next();
+        break;
+      case WRITE:
+        writer = test_user_conn.createBatchWriter(TEST_TABLE, new BatchWriterConfig());
+        m = new Mutation(new Text("row"));
+        m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
+        writer.addMutation(m);
+        writer.close();
+        break;
+      case BULK_IMPORT:
+        // test for bulk import permission would go here
+        break;
+      case ALTER_TABLE:
+        Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
+        groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), new Text("t2"))));
+        break;
+      case DROP_TABLE:
+        test_user_conn.tableOperations().delete(TEST_TABLE);
+        break;
+      case GRANT:
+        test_user_conn.securityOperations().grantTablePermission("root", TEST_TABLE, TablePermission.GRANT);
+        break;
+      default:
+        throw new IllegalArgumentException("Unrecognized table Permission: " + perm);
+    }
+  }
+  
+  private static void verifyHasOnlyTheseTablePermissions(Connector root_conn, String user, String table, TablePermission... perms) throws AccumuloException,
+  AccumuloSecurityException {
+    List<TablePermission> permList = Arrays.asList(perms);
+    for (TablePermission p : TablePermission.values()) {
+      if (permList.contains(p)) {
+        // should have these
+        if (!root_conn.securityOperations().hasTablePermission(user, table, p))
+          throw new IllegalStateException(user + " SHOULD have table permission " + p + " for table " + table);
+      } else {
+        // should not have these
+        if (root_conn.securityOperations().hasTablePermission(user, table, p))
+          throw new IllegalStateException(user + " SHOULD NOT have table permission " + p + " for table " + table);
+      }
+    }
+  }
+  
+  private static void verifyHasNoTablePermissions(Connector root_conn, String user, String table, TablePermission... perms) throws AccumuloException,
+  AccumuloSecurityException {
+    for (TablePermission p : perms)
+      if (root_conn.securityOperations().hasTablePermission(user, table, p))
+        throw new IllegalStateException(user + " SHOULD NOT have table permission " + p + " for table " + table);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
index a6c7802..c4c5980 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
@@ -56,7 +56,7 @@ import org.junit.Test;
 
 public class ReadWriteIT extends MacTest {
   
-  static final int ROWS = 20000;
+  static final int ROWS = 200000;
   static final int COLS = 1;
   static final String COLF = "colf";
   
@@ -81,11 +81,11 @@ public class ReadWriteIT extends MacTest {
     monitor.destroy();
   }
   
-  public void ingest(Connector connector, int rows, int cols, int width, int offset) throws Exception {
+  public static void ingest(Connector connector, int rows, int cols, int width, int offset) throws Exception {
     ingest(connector, rows, cols, width, offset, COLF);
   }
   
-  public void ingest(Connector connector, int rows, int cols, int width, int offset, String colf) throws Exception {
+  public static void ingest(Connector connector, int rows, int cols, int width, int offset, String colf) throws Exception {
     TestIngest.Opts opts = new TestIngest.Opts();
     opts.rows = rows;
     opts.cols = cols;
@@ -96,10 +96,10 @@ public class ReadWriteIT extends MacTest {
     TestIngest.ingest(connector, opts, new BatchWriterOpts());
   }
   
-  private void verify(Connector connector, int rows, int cols, int width, int offset) throws Exception {
+  private static void verify(Connector connector, int rows, int cols, int width, int offset) throws Exception {
     verify(connector, rows, cols, width, offset, COLF);
   }
-  private void verify(Connector connector, int rows, int cols, int width, int offset, String colf) throws Exception {
+  private static void verify(Connector connector, int rows, int cols, int width, int offset, String colf) throws Exception {
     ScannerOpts scannerOpts = new ScannerOpts();
     VerifyIngest.Opts opts = new VerifyIngest.Opts();
     opts.rows = rows;
@@ -135,6 +135,10 @@ public class ReadWriteIT extends MacTest {
   public void interleaved() throws Exception {
     // read and write concurrently
     final Connector connector = getConnector();
+    interleaveTest(connector);
+  }
+ 
+  static void interleaveTest(final Connector connector) throws Exception {
     final AtomicBoolean fail = new AtomicBoolean(false);
     final int CHUNKSIZE = ROWS / 10;
     ingest(connector, CHUNKSIZE, 1, 50, 0);


Mime
View raw message