accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e..@apache.org
Subject [12/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar, stop building test jar
Date Thu, 04 Jun 2015 18:52:53 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
deleted file mode 100644
index 9d0ce86..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
+++ /dev/null
@@ -1,660 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static com.google.common.base.Charsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
-import org.apache.accumulo.cluster.standalone.StandaloneClusterControl;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.user.AgeOffFilter;
-import org.apache.accumulo.core.iterators.user.SummingCombiner;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.examples.simple.client.Flush;
-import org.apache.accumulo.examples.simple.client.RandomBatchScanner;
-import org.apache.accumulo.examples.simple.client.RandomBatchWriter;
-import org.apache.accumulo.examples.simple.client.ReadWriteExample;
-import org.apache.accumulo.examples.simple.client.RowOperations;
-import org.apache.accumulo.examples.simple.client.SequentialBatchWriter;
-import org.apache.accumulo.examples.simple.client.TraceDumpExample;
-import org.apache.accumulo.examples.simple.client.TracingExample;
-import org.apache.accumulo.examples.simple.combiner.StatsCombiner;
-import org.apache.accumulo.examples.simple.constraints.MaxMutationSize;
-import org.apache.accumulo.examples.simple.dirlist.Ingest;
-import org.apache.accumulo.examples.simple.dirlist.QueryUtil;
-import org.apache.accumulo.examples.simple.helloworld.InsertWithBatchWriter;
-import org.apache.accumulo.examples.simple.helloworld.ReadData;
-import org.apache.accumulo.examples.simple.isolation.InterferenceTest;
-import org.apache.accumulo.examples.simple.mapreduce.RegexExample;
-import org.apache.accumulo.examples.simple.mapreduce.RowHash;
-import org.apache.accumulo.examples.simple.mapreduce.TableToFile;
-import org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest;
-import org.apache.accumulo.examples.simple.mapreduce.WordCount;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.BulkIngestExample;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.GenerateTestData;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.SetupTable;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.VerifyIngest;
-import org.apache.accumulo.examples.simple.shard.ContinuousQuery;
-import org.apache.accumulo.examples.simple.shard.Index;
-import org.apache.accumulo.examples.simple.shard.Query;
-import org.apache.accumulo.examples.simple.shard.Reverse;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.LogWriter;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.start.Main;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.tracer.TraceServer;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.util.Tool;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-public class ExamplesIT extends AccumuloClusterHarness {
-  private static final Logger log = LoggerFactory.getLogger(ExamplesIT.class);
-  private static final BatchWriterOpts bwOpts = new BatchWriterOpts();
-  private static final BatchWriterConfig bwc = new BatchWriterConfig();
-  private static final String visibility = "A|B";
-  private static final String auths = "A,B";
-
-  Connector c;
-  String instance;
-  String keepers;
-  String user;
-  String passwd;
-  String keytab;
-  BatchWriter bw;
-  IteratorSetting is;
-  String dir;
-  FileSystem fs;
-  Authorizations origAuths;
-  boolean saslEnabled;
-
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopConf) {
-    // 128MB * 3
-    cfg.setDefaultMemory(cfg.getDefaultMemory() * 3, MemoryUnit.BYTE);
-  }
-
-  @Before
-  public void getClusterInfo() throws Exception {
-    c = getConnector();
-    user = getAdminPrincipal();
-    AuthenticationToken token = getAdminToken();
-    if (token instanceof KerberosToken) {
-      keytab = getAdminUser().getKeytab().getAbsolutePath();
-      saslEnabled = true;
-    } else if (token instanceof PasswordToken) {
-      passwd = new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8);
-      saslEnabled = false;
-    } else {
-      Assert.fail("Unknown token type: " + token);
-    }
-    fs = getCluster().getFileSystem();
-    instance = c.getInstance().getInstanceName();
-    keepers = c.getInstance().getZooKeepers();
-    dir = new Path(cluster.getTemporaryPath(), getClass().getName()).toString();
-
-    origAuths = c.securityOperations().getUserAuthorizations(user);
-    c.securityOperations().changeUserAuthorizations(user, new Authorizations(auths.split(",")));
-  }
-
-  @After
-  public void resetAuths() throws Exception {
-    if (null != origAuths) {
-      getConnector().securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths);
-    }
-  }
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 6 * 60;
-  }
-
-  @Test
-  public void testTrace() throws Exception {
-    Process trace = null;
-    if (ClusterType.MINI == getClusterType()) {
-      MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
-      trace = impl.exec(TraceServer.class);
-      while (!c.tableOperations().exists("trace"))
-        UtilWaitThread.sleep(500);
-    }
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-C", "-D", "-c"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-C", "-D", "-c"};
-    }
-    Entry<Integer,String> pair = cluster.getClusterControl().execWithStdout(TracingExample.class, args);
-    Assert.assertEquals("Expected return code of zero. STDOUT=" + pair.getValue(), 0, pair.getKey().intValue());
-    String result = pair.getValue();
-    Pattern pattern = Pattern.compile("TraceID: ([0-9a-f]+)");
-    Matcher matcher = pattern.matcher(result);
-    int count = 0;
-    while (matcher.find()) {
-      if (saslEnabled) {
-        args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--traceid", matcher.group(1)};
-      } else {
-        args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--traceid", matcher.group(1)};
-      }
-      pair = cluster.getClusterControl().execWithStdout(TraceDumpExample.class, args);
-      count++;
-    }
-    assertTrue(count > 0);
-    assertTrue("Output did not contain myApp@myHost", pair.getValue().contains("myApp@myHost"));
-    if (ClusterType.MINI == getClusterType() && null != trace) {
-      trace.destroy();
-    }
-  }
-
-  @Test
-  public void testClasspath() throws Exception {
-    Entry<Integer,String> entry = getCluster().getClusterControl().execWithStdout(Main.class, new String[] {"classpath"});
-    assertEquals(0, entry.getKey().intValue());
-    String result = entry.getValue();
-    int level1 = result.indexOf("Level 1");
-    int level2 = result.indexOf("Level 2");
-    int level3 = result.indexOf("Level 3");
-    int level4 = result.indexOf("Level 4");
-    assertTrue("Level 1 classloader not present.", level1 >= 0);
-    assertTrue("Level 2 classloader not present.", level2 > 0);
-    assertTrue("Level 3 classloader not present.", level3 > 0);
-    assertTrue("Level 4 classloader not present.", level4 > 0);
-    assertTrue(level1 < level2);
-    assertTrue(level2 < level3);
-    assertTrue(level3 < level4);
-  }
-
-  @Test
-  public void testDirList() throws Exception {
-    String[] names = getUniqueNames(3);
-    String dirTable = names[0], indexTable = names[1], dataTable = names[2];
-    String[] args;
-    String dirListDirectory;
-    switch (getClusterType()) {
-      case MINI:
-        dirListDirectory = ((MiniAccumuloClusterImpl) getCluster()).getConfig().getDir().getAbsolutePath();
-        break;
-      case STANDALONE:
-        dirListDirectory = ((StandaloneAccumuloCluster) getCluster()).getAccumuloHome();
-        break;
-      default:
-        throw new RuntimeException("Unknown cluster type");
-    }
-    // Index a directory listing on /tmp. If this is running against a standalone cluster, we can't guarantee Accumulo source will be there.
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable",
-          dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable",
-          dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory};
-    }
-    Entry<Integer,String> entry = getClusterControl().execWithStdout(Ingest.class, args);
-    assertEquals("Got non-zero return code. Stdout=" + entry.getValue(), 0, entry.getKey().intValue());
-
-    String expectedFile;
-    switch (getClusterType()) {
-      case MINI:
-        // Should be present in a minicluster dir
-        expectedFile = "accumulo-site.xml";
-        break;
-      case STANDALONE:
-        // Should be in place on standalone installs (not having ot follow symlinks)
-        expectedFile = "LICENSE";
-        break;
-      default:
-        throw new RuntimeException("Unknown cluster type");
-    }
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "--keytab", keytab, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path",
-          expectedFile};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-p", passwd, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path", expectedFile};
-    }
-    entry = getClusterControl().execWithStdout(QueryUtil.class, args);
-    if (ClusterType.MINI == getClusterType()) {
-      MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
-      for (LogWriter writer : impl.getLogWriters()) {
-        writer.flush();
-      }
-    }
-
-    log.info("result " + entry.getValue());
-    assertEquals(0, entry.getKey().intValue());
-    assertTrue(entry.getValue().contains(expectedFile));
-  }
-
-  @Test
-  public void testAgeoffFilter() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    is = new IteratorSetting(10, AgeOffFilter.class);
-    AgeOffFilter.setTTL(is, 1000L);
-    c.tableOperations().attachIterator(tableName, is);
-    UtilWaitThread.sleep(500); // let zookeeper updates propagate.
-    bw = c.createBatchWriter(tableName, bwc);
-    Mutation m = new Mutation("foo");
-    m.put("a", "b", "c");
-    bw.addMutation(m);
-    bw.close();
-    UtilWaitThread.sleep(1000);
-    assertEquals(0, Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator()));
-  }
-
-  @Test
-  public void testStatsCombiner() throws Exception {
-    String table = getUniqueNames(1)[0];
-    c.tableOperations().create(table);
-    is = new IteratorSetting(10, StatsCombiner.class);
-    StatsCombiner.setCombineAllColumns(is, true);
-
-    c.tableOperations().attachIterator(table, is);
-    bw = c.createBatchWriter(table, bwc);
-    // Write two mutations otherwise the NativeMap would dedupe them into a single update
-    Mutation m = new Mutation("foo");
-    m.put("a", "b", "1");
-    bw.addMutation(m);
-    m = new Mutation("foo");
-    m.put("a", "b", "3");
-    bw.addMutation(m);
-    bw.flush();
-
-    Iterator<Entry<Key,Value>> iter = c.createScanner(table, Authorizations.EMPTY).iterator();
-    assertTrue("Iterator had no results", iter.hasNext());
-    Entry<Key,Value> e = iter.next();
-    assertEquals("Results ", "1,3,4,2", e.getValue().toString());
-    assertFalse("Iterator had additional results", iter.hasNext());
-
-    m = new Mutation("foo");
-    m.put("a", "b", "0,20,20,2");
-    bw.addMutation(m);
-    bw.close();
-
-    iter = c.createScanner(table, Authorizations.EMPTY).iterator();
-    assertTrue("Iterator had no results", iter.hasNext());
-    e = iter.next();
-    assertEquals("Results ", "0,20,24,4", e.getValue().toString());
-    assertFalse("Iterator had additional results", iter.hasNext());
-  }
-
-  @Test
-  public void testBloomFilters() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "100000", "--min", "0", "--max",
-          "1000000000", "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", tableName};
-    } else {
-      args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "100000", "--min", "0", "--max", "1000000000",
-          "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", tableName};
-    }
-    goodExec(RandomBatchWriter.class, args);
-    c.tableOperations().flush(tableName, null, null, true);
-    long diff = 0, diff2 = 0;
-    // try the speed test a couple times in case the system is loaded with other tests
-    for (int i = 0; i < 2; i++) {
-      long now = System.currentTimeMillis();
-      if (saslEnabled) {
-        args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "10000", "--min", "0", "--max",
-            "1000000000", "--size", "50", "--scanThreads", "4", "-t", tableName};
-      } else {
-        args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "10000", "--min", "0", "--max", "1000000000",
-            "--size", "50", "--scanThreads", "4", "-t", tableName};
-      }
-      goodExec(RandomBatchScanner.class, args);
-      diff = System.currentTimeMillis() - now;
-      now = System.currentTimeMillis();
-      if (saslEnabled) {
-        args = new String[] {"--seed", "8", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "10000", "--min", "0", "--max",
-            "1000000000", "--size", "50", "--scanThreads", "4", "-t", tableName};
-      } else {
-        args = new String[] {"--seed", "8", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "10000", "--min", "0", "--max", "1000000000",
-            "--size", "50", "--scanThreads", "4", "-t", tableName};
-      }
-      int retCode = getClusterControl().exec(RandomBatchScanner.class, args);
-      assertEquals(1, retCode);
-      diff2 = System.currentTimeMillis() - now;
-      if (diff2 < diff)
-        break;
-    }
-    assertTrue(diff2 < diff);
-  }
-
-  @Test
-  public void testShardedIndex() throws Exception {
-    String[] names = getUniqueNames(3);
-    final String shard = names[0], index = names[1];
-    c.tableOperations().create(shard);
-    c.tableOperations().create(index);
-    bw = c.createBatchWriter(shard, bwc);
-    Index.index(30, new File(System.getProperty("user.dir") + "/src"), "\\W+", bw);
-    bw.close();
-    BatchScanner bs = c.createBatchScanner(shard, Authorizations.EMPTY, 4);
-    List<String> found = Query.query(bs, Arrays.asList("foo", "bar"));
-    bs.close();
-    // should find ourselves
-    boolean thisFile = false;
-    for (String file : found) {
-      if (file.endsWith("/ExamplesIT.java"))
-        thisFile = true;
-    }
-    assertTrue(thisFile);
-
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "--keytab", keytab};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", getAdminPrincipal(), "-p", passwd};
-    }
-    // create a reverse index
-    goodExec(Reverse.class, args);
-
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "--keytab", keytab, "--terms", "5",
-          "--count", "1000"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "-p", passwd, "--terms", "5", "--count",
-          "1000"};
-    }
-    // run some queries
-    goodExec(ContinuousQuery.class, args);
-  }
-
-  @Test
-  public void testMaxMutationConstraint() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    c.tableOperations().addConstraint(tableName, MaxMutationSize.class.getName());
-    TestIngest.Opts opts = new TestIngest.Opts();
-    opts.rows = 1;
-    opts.cols = 1000;
-    opts.setTableName(tableName);
-    if (saslEnabled) {
-      opts.updateKerberosCredentials(cluster.getClientConfig());
-    } else {
-      opts.setPrincipal(getAdminPrincipal());
-    }
-    try {
-      TestIngest.ingest(c, opts, bwOpts);
-    } catch (MutationsRejectedException ex) {
-      assertEquals(1, ex.getConstraintViolationSummaries().size());
-    }
-  }
-
-  @Test
-  public void testBulkIngest() throws Exception {
-    // TODO Figure out a way to run M/R with Kerberos
-    Assume.assumeTrue(getAdminToken() instanceof PasswordToken);
-    String tableName = getUniqueNames(1)[0];
-    FileSystem fs = getFileSystem();
-    Path p = new Path(dir, "tmp");
-    if (fs.exists(p)) {
-      fs.delete(p, true);
-    }
-    goodExec(GenerateTestData.class, "--start-row", "0", "--count", "10000", "--output", dir + "/tmp/input/data");
-
-    List<String> commonArgs = new ArrayList<>(Arrays.asList(new String[] {"-i", instance, "-z", keepers, "-u", user, "--table", tableName}));
-    if (saslEnabled) {
-      commonArgs.add("--keytab");
-      commonArgs.add(keytab);
-    } else {
-      commonArgs.add("-p");
-      commonArgs.add(passwd);
-    }
-
-    List<String> args = new ArrayList<>(commonArgs);
-    goodExec(SetupTable.class, args.toArray(new String[0]));
-
-    args = new ArrayList<>(commonArgs);
-    args.addAll(Arrays.asList(new String[] {"--inputDir", dir + "/tmp/input", "--workDir", dir + "/tmp"}));
-    goodExec(BulkIngestExample.class, args.toArray(new String[0]));
-
-    args = new ArrayList<>(commonArgs);
-    args.addAll(Arrays.asList(new String[] {"--start-row", "0", "--count", "10000"}));
-    goodExec(VerifyIngest.class, args.toArray(new String[0]));
-  }
-
-  @Test
-  public void testTeraSortAndRead() throws Exception {
-    // TODO Figure out a way to run M/R with Kerberos
-    Assume.assumeTrue(getAdminToken() instanceof PasswordToken);
-    String tableName = getUniqueNames(1)[0];
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-i", instance, "-z", keepers,
-          "-u", user, "--keytab", keytab, "--splits", "4"};
-    } else {
-      args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-i", instance, "-z", keepers,
-          "-u", user, "-p", passwd, "--splits", "4"};
-    }
-    goodExec(TeraSortIngest.class, args);
-    Path output = new Path(dir, "tmp/nines");
-    if (fs.exists(output)) {
-      fs.delete(output, true);
-    }
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--rowRegex", ".*999.*", "--output",
-          output.toString()};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--rowRegex", ".*999.*", "--output", output.toString()};
-    }
-    goodExec(RegexExample.class, args);
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--column", "c:"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--column", "c:"};
-    }
-    goodExec(RowHash.class, args);
-    output = new Path(dir, "tmp/tableFile");
-    if (fs.exists(output)) {
-      fs.delete(output, true);
-    }
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--output", output.toString()};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--output", output.toString()};
-    }
-    goodExec(TableToFile.class, args);
-  }
-
-  @Test
-  public void testWordCount() throws Exception {
-    // TODO Figure out a way to run M/R with Kerberos
-    Assume.assumeTrue(getAdminToken() instanceof PasswordToken);
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    is = new IteratorSetting(10, SummingCombiner.class);
-    SummingCombiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column(new Text("count"))));
-    SummingCombiner.setEncodingType(is, SummingCombiner.Type.STRING);
-    c.tableOperations().attachIterator(tableName, is);
-    fs.copyFromLocalFile(new Path(new Path(System.getProperty("user.dir")).getParent(), "README.md"), new Path(dir + "/tmp/wc/README.md"));
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-u", user, "--keytab", keytab, "-z", keepers, "--input", dir + "/tmp/wc", "-t", tableName};
-    } else {
-      args = new String[] {"-i", instance, "-u", user, "-p", passwd, "-z", keepers, "--input", dir + "/tmp/wc", "-t", tableName};
-    }
-    goodExec(WordCount.class, args);
-  }
-
-  @Test
-  public void testInsertWithBatchWriterAndReadData() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName};
-    }
-    goodExec(InsertWithBatchWriter.class, args);
-    goodExec(ReadData.class, args);
-  }
-
-  @Test
-  public void testIsolatedScansWithInterference() throws Exception {
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"};
-    }
-    goodExec(InterferenceTest.class, args);
-  }
-
-  @Test
-  public void testScansWithInterference() throws Exception {
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", getUniqueNames(1)[0], "--iterations", "100000"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", getUniqueNames(1)[0], "--iterations", "100000"};
-    }
-    goodExec(InterferenceTest.class, args);
-  }
-
-  @Test
-  public void testRowOperations() throws Exception {
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd};
-    }
-    goodExec(RowOperations.class, args);
-  }
-
-  @Test
-  public void testBatchWriter() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--start", "0", "--num", "100000", "--size", "50",
-          "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--start", "0", "--num", "100000", "--size", "50",
-          "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    }
-    goodExec(SequentialBatchWriter.class, args);
-
-  }
-
-  @Test
-  public void testReadWriteAndDelete() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--auths", auths, "--table", tableName, "--createtable", "-c",
-          "--debug"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", tableName, "--createtable", "-c", "--debug"};
-    }
-    goodExec(ReadWriteExample.class, args);
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--auths", auths, "--table", tableName, "-d", "--debug"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", tableName, "-d", "--debug"};
-    }
-    goodExec(ReadWriteExample.class, args);
-
-  }
-
-  @Test
-  public void testRandomBatchesAndFlush() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName, "--num", "100000", "--min", "0", "--max",
-          "100000", "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName, "--num", "100000", "--min", "0", "--max", "100000",
-          "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    }
-    goodExec(RandomBatchWriter.class, args);
-
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName, "--num", "10000", "--min", "0", "--max",
-          "100000", "--size", "100", "--scanThreads", "4", "--auths", auths};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName, "--num", "10000", "--min", "0", "--max", "100000",
-          "--size", "100", "--scanThreads", "4", "--auths", auths};
-    }
-    goodExec(RandomBatchScanner.class, args);
-
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName};
-    }
-    goodExec(Flush.class, args);
-  }
-
-  private void goodExec(Class<?> theClass, String... args) throws InterruptedException, IOException {
-    Entry<Integer,String> pair;
-    if (Tool.class.isAssignableFrom(theClass) && ClusterType.STANDALONE == getClusterType()) {
-      StandaloneClusterControl control = (StandaloneClusterControl) getClusterControl();
-      pair = control.execMapreduceWithStdout(theClass, args);
-    } else {
-      // We're already slurping stdout into memory (not redirecting to file). Might as well add it to error message.
-      pair = getClusterControl().execWithStdout(theClass, args);
-    }
-    Assert.assertEquals("stdout=" + pair.getValue(), 0, pair.getKey().intValue());
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java b/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
deleted file mode 100644
index b75a74e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-/**
- * See ACCUMULO-779
- */
-public class FateStarvationIT extends AccumuloClusterHarness {
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 2 * 60;
-  }
-
-  @Test
-  public void run() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    Connector c = getConnector();
-    c.tableOperations().create(tableName);
-
-    c.tableOperations().addSplits(tableName, TestIngest.getSplitPoints(0, 100000, 50));
-
-    TestIngest.Opts opts = new TestIngest.Opts();
-    opts.random = 89;
-    opts.timestamp = 7;
-    opts.dataSize = 50;
-    opts.rows = 100000;
-    opts.cols = 1;
-    opts.setTableName(tableName);
-    ClientConfiguration clientConf = cluster.getClientConfig();
-    if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
-      opts.updateKerberosCredentials(clientConf);
-    } else {
-      opts.setPrincipal(getAdminPrincipal());
-    }
-    TestIngest.ingest(c, opts, new BatchWriterOpts());
-
-    c.tableOperations().flush(tableName, null, null, true);
-
-    List<Text> splits = new ArrayList<Text>(TestIngest.getSplitPoints(0, 100000, 67));
-    Random rand = new Random();
-
-    for (int i = 0; i < 100; i++) {
-      int idx1 = rand.nextInt(splits.size() - 1);
-      int idx2 = rand.nextInt(splits.size() - (idx1 + 1)) + idx1 + 1;
-
-      c.tableOperations().compact(tableName, splits.get(idx1), splits.get(idx2), false, false);
-    }
-
-    c.tableOperations().offline(tableName);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
deleted file mode 100644
index 05d0562..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertFalse;
-
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.LogWriter;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-
-import com.google.common.collect.Iterators;
-
-public class FunctionalTestUtils {
-
-  public static int countRFiles(Connector c, String tableName) throws Exception {
-    Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    String tableId = c.tableOperations().tableIdMap().get(tableName);
-    scanner.setRange(MetadataSchema.TabletsSection.getRange(tableId));
-    scanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-
-    return Iterators.size(scanner.iterator());
-  }
-
-  static void checkRFiles(Connector c, String tableName, int minTablets, int maxTablets, int minRFiles, int maxRFiles) throws Exception {
-    Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    String tableId = c.tableOperations().tableIdMap().get(tableName);
-    scanner.setRange(new Range(new Text(tableId + ";"), true, new Text(tableId + "<"), true));
-    scanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-    MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
-
-    HashMap<Text,Integer> tabletFileCounts = new HashMap<Text,Integer>();
-
-    for (Entry<Key,Value> entry : scanner) {
-
-      Text row = entry.getKey().getRow();
-
-      Integer count = tabletFileCounts.get(row);
-      if (count == null)
-        count = 0;
-      if (entry.getKey().getColumnFamily().equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) {
-        count = count + 1;
-      }
-
-      tabletFileCounts.put(row, count);
-    }
-
-    if (tabletFileCounts.size() < minTablets || tabletFileCounts.size() > maxTablets) {
-      throw new Exception("Did not find expected number of tablets " + tabletFileCounts.size());
-    }
-
-    Set<Entry<Text,Integer>> es = tabletFileCounts.entrySet();
-    for (Entry<Text,Integer> entry : es) {
-      if (entry.getValue() > maxRFiles || entry.getValue() < minRFiles) {
-        throw new Exception("tablet " + entry.getKey() + " has " + entry.getValue() + " map files");
-      }
-    }
-  }
-
-  static public void bulkImport(Connector c, FileSystem fs, String table, String dir) throws Exception {
-    String failDir = dir + "_failures";
-    Path failPath = new Path(failDir);
-    fs.delete(failPath, true);
-    fs.mkdirs(failPath);
-
-    // Ensure server can read/modify files
-    FsShell fsShell = new FsShell(fs.getConf());
-    Assert.assertEquals("Failed to chmod " + dir, 0, fsShell.run(new String[] {"-chmod", "-R", "777", dir}));
-    Assert.assertEquals("Failed to chmod " + failDir, 0, fsShell.run(new String[] {"-chmod", "-R", "777", failDir}));
-
-    c.tableOperations().importDirectory(table, dir, failDir, false);
-
-    if (fs.listStatus(failPath).length > 0) {
-      throw new Exception("Some files failed to bulk import");
-    }
-
-  }
-
-  static public void checkSplits(Connector c, String table, int min, int max) throws Exception {
-    Collection<Text> splits = c.tableOperations().listSplits(table);
-    if (splits.size() < min || splits.size() > max) {
-      throw new Exception("# of table splits points out of range, #splits=" + splits.size() + " table=" + table + " min=" + min + " max=" + max);
-    }
-  }
-
-  static public void createRFiles(final Connector c, FileSystem fs, String path, int rows, int splits, int threads) throws Exception {
-    fs.delete(new Path(path), true);
-    ExecutorService threadPool = Executors.newFixedThreadPool(threads);
-    final AtomicBoolean fail = new AtomicBoolean(false);
-    for (int i = 0; i < rows; i += rows / splits) {
-      final TestIngest.Opts opts = new TestIngest.Opts();
-      opts.outputFile = String.format("%s/mf%s", path, i);
-      opts.random = 56;
-      opts.timestamp = 1;
-      opts.dataSize = 50;
-      opts.rows = rows / splits;
-      opts.startRow = i;
-      opts.cols = 1;
-      threadPool.execute(new Runnable() {
-        @Override
-        public void run() {
-          try {
-            TestIngest.ingest(c, opts, new BatchWriterOpts());
-          } catch (Exception e) {
-            fail.set(true);
-          }
-        }
-      });
-    }
-    threadPool.shutdown();
-    threadPool.awaitTermination(1, TimeUnit.HOURS);
-    assertFalse(fail.get());
-  }
-
-  static public String readAll(InputStream is) throws IOException {
-    byte[] buffer = new byte[4096];
-    StringBuffer result = new StringBuffer();
-    while (true) {
-      int n = is.read(buffer);
-      if (n <= 0)
-        break;
-      result.append(new String(buffer, 0, n));
-    }
-    return result.toString();
-  }
-
-  public static String readAll(MiniAccumuloClusterImpl c, Class<?> klass, Process p) throws Exception {
-    for (LogWriter writer : c.getLogWriters())
-      writer.flush();
-    return readAll(new FileInputStream(c.getConfig().getLogDir() + "/" + klass.getSimpleName() + "_" + p.hashCode() + ".out"));
-  }
-
-  static Mutation nm(String row, String cf, String cq, Value value) {
-    Mutation m = new Mutation(new Text(row));
-    m.put(new Text(cf), new Text(cq), value);
-    return m;
-  }
-
-  static Mutation nm(String row, String cf, String cq, String value) {
-    return nm(row, cf, cq, new Value(value.getBytes()));
-  }
-
-  public static SortedSet<Text> splits(String[] splits) {
-    SortedSet<Text> result = new TreeSet<Text>();
-    for (String split : splits)
-      result.add(new Text(split));
-    return result;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
deleted file mode 100644
index a73f239..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.ServerServices;
-import org.apache.accumulo.core.util.ServerServices.Service;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooLock;
-import org.apache.accumulo.gc.SimpleGarbageCollector;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessNotFoundException;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.NoNodeException;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class GarbageCollectorIT extends ConfigurableMacBase {
-  private static final String OUR_SECRET = "itsreallysecret";
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 5 * 60;
-  }
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
-    cfg.setProperty(Property.INSTANCE_SECRET, OUR_SECRET);
-    cfg.setProperty(Property.GC_CYCLE_START, "1");
-    cfg.setProperty(Property.GC_CYCLE_DELAY, "1");
-    cfg.setProperty(Property.GC_PORT, "0");
-    cfg.setProperty(Property.TSERV_MAXMEM, "5K");
-    cfg.setProperty(Property.TSERV_MAJC_DELAY, "1");
-
-    // use raw local file system so walogs sync and flush will work
-    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-  }
-
-  private void killMacGc() throws ProcessNotFoundException, InterruptedException, KeeperException {
-    // kill gc started by MAC
-    getCluster().killProcess(ServerType.GARBAGE_COLLECTOR, getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR).iterator().next());
-    // delete lock in zookeeper if there, this will allow next GC to start quickly
-    String path = ZooUtil.getRoot(new ZooKeeperInstance(getCluster().getClientConfig())) + Constants.ZGC_LOCK;
-    ZooReaderWriter zk = new ZooReaderWriter(cluster.getZooKeepers(), 30000, OUR_SECRET);
-    try {
-      ZooLock.deleteLock(zk, path);
-    } catch (IllegalStateException e) {
-
-    }
-
-    assertNull(getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR));
-  }
-
-  @Test
-  public void gcTest() throws Exception {
-    killMacGc();
-    Connector c = getConnector();
-    c.tableOperations().create("test_ingest");
-    c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
-    TestIngest.Opts opts = new TestIngest.Opts();
-    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
-    vopts.rows = opts.rows = 10000;
-    vopts.cols = opts.cols = 1;
-    opts.setPrincipal("root");
-    vopts.setPrincipal("root");
-    TestIngest.ingest(c, opts, new BatchWriterOpts());
-    c.tableOperations().compact("test_ingest", null, null, true, true);
-    int before = countFiles();
-    while (true) {
-      UtilWaitThread.sleep(1000);
-      int more = countFiles();
-      if (more <= before)
-        break;
-      before = more;
-    }
-
-    // restart GC
-    getCluster().start();
-    UtilWaitThread.sleep(15 * 1000);
-    int after = countFiles();
-    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
-    assertTrue(after < before);
-  }
-
-  @Test
-  public void gcLotsOfCandidatesIT() throws Exception {
-    killMacGc();
-
-    log.info("Filling metadata table with bogus delete flags");
-    Connector c = getConnector();
-    addEntries(c, new BatchWriterOpts());
-    cluster.getConfig().setDefaultMemory(10, MemoryUnit.MEGABYTE);
-    Process gc = cluster.exec(SimpleGarbageCollector.class);
-    UtilWaitThread.sleep(20 * 1000);
-    String output = FunctionalTestUtils.readAll(cluster, SimpleGarbageCollector.class, gc);
-    gc.destroy();
-    assertTrue(output.contains("delete candidates has exceeded"));
-  }
-
-  @Test
-  public void dontGCRootLog() throws Exception {
-    killMacGc();
-    // dirty metadata
-    Connector c = getConnector();
-    String table = getUniqueNames(1)[0];
-    c.tableOperations().create(table);
-    // let gc run for a bit
-    cluster.start();
-    UtilWaitThread.sleep(20 * 1000);
-    killMacGc();
-    // kill tservers
-    for (ProcessReference ref : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
-      cluster.killProcess(ServerType.TABLET_SERVER, ref);
-    }
-    // run recovery
-    cluster.start();
-    // did it recover?
-    Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    Iterators.size(scanner.iterator());
-  }
-
-  private Mutation createDelMutation(String path, String cf, String cq, String val) {
-    Text row = new Text(MetadataSchema.DeletesSection.getRowPrefix() + path);
-    Mutation delFlag = new Mutation(row);
-    delFlag.put(cf, cq, val);
-    return delFlag;
-  }
-
-  @Test
-  public void testInvalidDelete() throws Exception {
-    killMacGc();
-
-    String table = getUniqueNames(1)[0];
-    getConnector().tableOperations().create(table);
-
-    BatchWriter bw2 = getConnector().createBatchWriter(table, new BatchWriterConfig());
-    Mutation m1 = new Mutation("r1");
-    m1.put("cf1", "cq1", "v1");
-    bw2.addMutation(m1);
-    bw2.close();
-
-    getConnector().tableOperations().flush(table, null, null, true);
-
-    // ensure an invalid delete entry does not cause GC to go berserk ACCUMULO-2520
-    getConnector().securityOperations().grantTablePermission(getConnector().whoami(), MetadataTable.NAME, TablePermission.WRITE);
-    BatchWriter bw3 = getConnector().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
-    bw3.addMutation(createDelMutation("", "", "", ""));
-    bw3.addMutation(createDelMutation("", "testDel", "test", "valueTest"));
-    bw3.addMutation(createDelMutation("/", "", "", ""));
-    bw3.close();
-
-    Process gc = cluster.exec(SimpleGarbageCollector.class);
-    try {
-      String output = "";
-      while (!output.contains("Ingoring invalid deletion candidate")) {
-        UtilWaitThread.sleep(250);
-        try {
-          output = FunctionalTestUtils.readAll(cluster, SimpleGarbageCollector.class, gc);
-        } catch (IOException ioe) {
-          log.error("Could not read all from cluster.", ioe);
-        }
-      }
-    } finally {
-      gc.destroy();
-    }
-
-    Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
-    Iterator<Entry<Key,Value>> iter = scanner.iterator();
-    assertTrue(iter.hasNext());
-    Entry<Key,Value> entry = iter.next();
-    Assert.assertEquals("r1", entry.getKey().getRow().toString());
-    Assert.assertEquals("cf1", entry.getKey().getColumnFamily().toString());
-    Assert.assertEquals("cq1", entry.getKey().getColumnQualifier().toString());
-    Assert.assertEquals("v1", entry.getValue().toString());
-    Assert.assertFalse(iter.hasNext());
-  }
-
-  @Test
-  public void testProperPortAdvertisement() throws Exception {
-
-    Connector conn = getConnector();
-    Instance instance = conn.getInstance();
-
-    ZooReaderWriter zk = new ZooReaderWriter(cluster.getZooKeepers(), 30000, OUR_SECRET);
-    String path = ZooUtil.getRoot(instance) + Constants.ZGC_LOCK;
-    for (int i = 0; i < 5; i++) {
-      List<String> locks;
-      try {
-        locks = zk.getChildren(path, null);
-      } catch (NoNodeException e) {
-        Thread.sleep(5000);
-        continue;
-      }
-
-      if (locks != null && locks.size() > 0) {
-        Collections.sort(locks);
-
-        String lockPath = path + "/" + locks.get(0);
-
-        String gcLoc = new String(zk.getData(lockPath, null));
-
-        Assert.assertTrue("Found unexpected data in zookeeper for GC location: " + gcLoc, gcLoc.startsWith(Service.GC_CLIENT.name()));
-        int loc = gcLoc.indexOf(ServerServices.SEPARATOR_CHAR);
-        Assert.assertNotEquals("Could not find split point of GC location for: " + gcLoc, -1, loc);
-        String addr = gcLoc.substring(loc + 1);
-
-        int addrSplit = addr.indexOf(':');
-        Assert.assertNotEquals("Could not find split of GC host:port for: " + addr, -1, addrSplit);
-
-        String host = addr.substring(0, addrSplit), port = addr.substring(addrSplit + 1);
-        // We shouldn't have the "bindall" address in zk
-        Assert.assertNotEquals("0.0.0.0", host);
-        // Nor should we have the "random port" in zk
-        Assert.assertNotEquals(0, Integer.parseInt(port));
-        return;
-      }
-
-      Thread.sleep(5000);
-    }
-
-    Assert.fail("Could not find advertised GC address");
-  }
-
-  private int countFiles() throws Exception {
-    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
-    Path path = new Path(cluster.getConfig().getDir() + "/accumulo/tables/1/*/*.rf");
-    return Iterators.size(Arrays.asList(fs.globStatus(path)).iterator());
-  }
-
-  public static void addEntries(Connector conn, BatchWriterOpts bwOpts) throws Exception {
-    conn.securityOperations().grantTablePermission(conn.whoami(), MetadataTable.NAME, TablePermission.WRITE);
-    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, bwOpts.getBatchWriterConfig());
-
-    for (int i = 0; i < 100000; ++i) {
-      final Text emptyText = new Text("");
-      Text row = new Text(String.format("%s/%020d/%s", MetadataSchema.DeletesSection.getRowPrefix(), i,
-          "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj"));
-      Mutation delFlag = new Mutation(row);
-      delFlag.put(emptyText, emptyText, new Value(new byte[] {}));
-      bw.addMutation(delFlag);
-    }
-    bw.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java b/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
deleted file mode 100644
index 59d8259..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Map;
-
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.util.Daemon;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.start.Main;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.accumulo.tserver.TabletServer;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-public class HalfDeadTServerIT extends ConfigurableMacBase {
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setNumTservers(1);
-    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s");
-    cfg.setProperty(Property.GENERAL_RPC_TIMEOUT, "5s");
-    cfg.useMiniDFS(true);
-  }
-
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 4 * 60;
-  }
-
-  class DumpOutput extends Daemon {
-
-    private final BufferedReader rdr;
-    private final StringBuilder output;
-
-    DumpOutput(InputStream is) {
-      rdr = new BufferedReader(new InputStreamReader(is));
-      output = new StringBuilder();
-    }
-
-    @Override
-    public void run() {
-      try {
-        while (true) {
-          String line = rdr.readLine();
-          if (line == null)
-            break;
-          System.out.println(line);
-          output.append(line);
-          output.append("\n");
-        }
-      } catch (IOException ex) {
-        log.error("IOException", ex);
-      }
-    }
-
-    @Override
-    public String toString() {
-      return output.toString();
-    }
-  }
-
-  @Test
-  public void testRecover() throws Exception {
-    test(10);
-  }
-
-  @Test
-  public void testTimeout() throws Exception {
-    String results = test(20, true);
-    if (results != null) {
-      if (!results.contains("Session expired")) {
-        log.info("Failed to find 'Session expired' in output, but TServer did die which is expected");
-      }
-    }
-  }
-
-  public String test(int seconds) throws Exception {
-    return test(seconds, false);
-  }
-
-  public String test(int seconds, boolean expectTserverDied) throws Exception {
-    if (!makeDiskFailureLibrary())
-      return null;
-    Connector c = getConnector();
-    assertEquals(1, c.instanceOperations().getTabletServers().size());
-
-    // create our own tablet server with the special test library
-    String javaHome = System.getProperty("java.home");
-    String javaBin = javaHome + File.separator + "bin" + File.separator + "java";
-    String classpath = System.getProperty("java.class.path");
-    classpath = new File(cluster.getConfig().getDir(), "conf") + File.pathSeparator + classpath;
-    String className = TabletServer.class.getName();
-    ArrayList<String> argList = new ArrayList<String>();
-    argList.addAll(Arrays.asList(javaBin, "-cp", classpath));
-    argList.addAll(Arrays.asList(Main.class.getName(), className));
-    ProcessBuilder builder = new ProcessBuilder(argList);
-    Map<String,String> env = builder.environment();
-    env.put("ACCUMULO_HOME", cluster.getConfig().getDir().getAbsolutePath());
-    env.put("ACCUMULO_LOG_DIR", cluster.getConfig().getLogDir().getAbsolutePath());
-    String trickFilename = cluster.getConfig().getLogDir().getAbsolutePath() + "/TRICK_FILE";
-    env.put("TRICK_FILE", trickFilename);
-    String libPath = System.getProperty("user.dir") + "/target/fake_disk_failure.so";
-    env.put("LD_PRELOAD", libPath);
-    env.put("DYLD_INSERT_LIBRARIES", libPath);
-    env.put("DYLD_FORCE_FLAT_NAMESPACE", "true");
-    Process ingest = null;
-    Process tserver = builder.start();
-    DumpOutput t = new DumpOutput(tserver.getInputStream());
-    try {
-      t.start();
-      UtilWaitThread.sleep(1000);
-      // don't need the regular tablet server
-      cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
-      UtilWaitThread.sleep(1000);
-      c.tableOperations().create("test_ingest");
-      assertEquals(1, c.instanceOperations().getTabletServers().size());
-      int rows = 100 * 1000;
-      ingest = cluster.exec(TestIngest.class, "-u", "root", "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", ROOT_PASSWORD, "--rows", rows
-          + "");
-      UtilWaitThread.sleep(500);
-
-      // block I/O with some side-channel trickiness
-      File trickFile = new File(trickFilename);
-      try {
-        assertTrue(trickFile.createNewFile());
-        UtilWaitThread.sleep(seconds * 1000);
-      } finally {
-        if (!trickFile.delete()) {
-          log.error("Couldn't delete " + trickFile);
-        }
-      }
-
-      if (seconds <= 10) {
-        assertEquals(0, ingest.waitFor());
-        VerifyIngest.Opts vopts = new VerifyIngest.Opts();
-        vopts.rows = rows;
-        vopts.setPrincipal("root");
-        VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
-      } else {
-        UtilWaitThread.sleep(5 * 1000);
-        tserver.waitFor();
-        t.join();
-        tserver = null;
-      }
-      // verify the process was blocked
-      String results = t.toString();
-      assertTrue(results.contains("sleeping\nsleeping\nsleeping\n"));
-      return results;
-    } finally {
-      if (ingest != null) {
-        ingest.destroy();
-        ingest.waitFor();
-      }
-      if (tserver != null) {
-        try {
-          if (expectTserverDied) {
-            try {
-              tserver.exitValue();
-            } catch (IllegalThreadStateException e) {
-              fail("Expected TServer to kill itself, but it is still running");
-            }
-          }
-        } finally {
-          tserver.destroy();
-          tserver.waitFor();
-          t.join();
-        }
-      }
-    }
-  }
-
-  private boolean makeDiskFailureLibrary() throws Exception {
-    String root = System.getProperty("user.dir");
-    String source = root + "/src/test/c/fake_disk_failure.c";
-    String lib = root + "/target/fake_disk_failure.so";
-    String platform = System.getProperty("os.name");
-    String cmd[];
-    if (platform.equals("Darwin")) {
-      cmd = new String[] {"gcc", "-arch", "x86_64", "-arch", "i386", "-dynamiclib", "-O3", "-fPIC", source, "-o", lib};
-    } else {
-      cmd = new String[] {"gcc", "-D_GNU_SOURCE", "-Wall", "-fPIC", source, "-shared", "-o", lib, "-ldl"};
-    }
-    Process gcc = Runtime.getRuntime().exec(cmd);
-    return gcc.waitFor() == 0;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/KerberosIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/KerberosIT.java b/test/src/test/java/org/apache/accumulo/test/functional/KerberosIT.java
deleted file mode 100644
index aa8313e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/KerberosIT.java
+++ /dev/null
@@ -1,573 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.CompactionConfig;
-import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
-import org.apache.accumulo.core.client.impl.AuthenticationTokenIdentifier;
-import org.apache.accumulo.core.client.impl.DelegationTokenImpl;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.security.SystemPermission;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.harness.AccumuloITBase;
-import org.apache.accumulo.harness.MiniClusterConfigurationCallback;
-import org.apache.accumulo.harness.MiniClusterHarness;
-import org.apache.accumulo.harness.TestingKdc;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.minikdc.MiniKdc;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Sets;
-
-/**
- * MAC test which uses {@link MiniKdc} to simulate ta secure environment. Can be used as a sanity check for Kerberos/SASL testing.
- */
-public class KerberosIT extends AccumuloITBase {
-  private static final Logger log = LoggerFactory.getLogger(KerberosIT.class);
-
-  private static TestingKdc kdc;
-  private static String krbEnabledForITs = null;
-  private static ClusterUser rootUser;
-
-  @BeforeClass
-  public static void startKdc() throws Exception {
-    kdc = new TestingKdc();
-    kdc.start();
-    krbEnabledForITs = System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION);
-    if (null == krbEnabledForITs || !Boolean.parseBoolean(krbEnabledForITs)) {
-      System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, "true");
-    }
-    rootUser = kdc.getRootUser();
-  }
-
-  @AfterClass
-  public static void stopKdc() throws Exception {
-    if (null != kdc) {
-      kdc.stop();
-    }
-    if (null != krbEnabledForITs) {
-      System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, krbEnabledForITs);
-    }
-  }
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 60 * 5;
-  }
-
-  private MiniAccumuloClusterImpl mac;
-
-  @Before
-  public void startMac() throws Exception {
-    MiniClusterHarness harness = new MiniClusterHarness();
-    mac = harness.create(this, new PasswordToken("unused"), kdc, new MiniClusterConfigurationCallback() {
-
-      @Override
-      public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
-        Map<String,String> site = cfg.getSiteConfig();
-        site.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "10s");
-        cfg.setSiteConfig(site);
-      }
-
-    });
-
-    mac.getConfig().setNumTservers(1);
-    mac.start();
-    // Enabled kerberos auth
-    Configuration conf = new Configuration(false);
-    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
-    UserGroupInformation.setConfiguration(conf);
-  }
-
-  @After
-  public void stopMac() throws Exception {
-    if (null != mac) {
-      mac.stop();
-    }
-  }
-
-  @Test
-  public void testAdminUser() throws Exception {
-    // Login as the client (provided to `accumulo init` as the "root" user)
-    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
-
-    final Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
-
-    // The "root" user should have all system permissions
-    for (SystemPermission perm : SystemPermission.values()) {
-      assertTrue("Expected user to have permission: " + perm, conn.securityOperations().hasSystemPermission(conn.whoami(), perm));
-    }
-
-    // and the ability to modify the root and metadata tables
-    for (String table : Arrays.asList(RootTable.NAME, MetadataTable.NAME)) {
-      assertTrue(conn.securityOperations().hasTablePermission(conn.whoami(), table, TablePermission.ALTER_TABLE));
-    }
-  }
-
-  @Test
-  public void testNewUser() throws Exception {
-    String newUser = testName.getMethodName();
-    final File newUserKeytab = new File(kdc.getKeytabDir(), newUser + ".keytab");
-    if (newUserKeytab.exists() && !newUserKeytab.delete()) {
-      log.warn("Unable to delete {}", newUserKeytab);
-    }
-
-    // Create a new user
-    kdc.createPrincipal(newUserKeytab, newUser);
-
-    newUser = kdc.qualifyUser(newUser);
-
-    // Login as the "root" user
-    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
-    log.info("Logged in as {}", rootUser.getPrincipal());
-
-    Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
-    log.info("Created connector as {}", rootUser.getPrincipal());
-    assertEquals(rootUser.getPrincipal(), conn.whoami());
-
-    // Make sure the system user doesn't exist -- this will force some RPC to happen server-side
-    createTableWithDataAndCompact(conn);
-
-    HashSet<String> users = Sets.newHashSet(rootUser.getPrincipal());
-    assertEquals(users, conn.securityOperations().listLocalUsers());
-
-    // Switch to a new user
-    UserGroupInformation.loginUserFromKeytab(newUser, newUserKeytab.getAbsolutePath());
-    log.info("Logged in as {}", newUser);
-
-    conn = mac.getConnector(newUser, new KerberosToken());
-    log.info("Created connector as {}", newUser);
-    assertEquals(newUser, conn.whoami());
-
-    // The new user should have no system permissions
-    for (SystemPermission perm : SystemPermission.values()) {
-      assertFalse(conn.securityOperations().hasSystemPermission(newUser, perm));
-    }
-
-    users.add(newUser);
-
-    // Same users as before, plus the new user we just created
-    assertEquals(users, conn.securityOperations().listLocalUsers());
-  }
-
-  @Test
-  public void testUserPrivilegesThroughGrant() throws Exception {
-    String user1 = testName.getMethodName();
-    final File user1Keytab = new File(kdc.getKeytabDir(), user1 + ".keytab");
-    if (user1Keytab.exists() && !user1Keytab.delete()) {
-      log.warn("Unable to delete {}", user1Keytab);
-    }
-
-    // Create some new users
-    kdc.createPrincipal(user1Keytab, user1);
-
-    user1 = kdc.qualifyUser(user1);
-
-    // Log in as user1
-    UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
-    log.info("Logged in as {}", user1);
-
-    // Indirectly creates this user when we use it
-    Connector conn = mac.getConnector(user1, new KerberosToken());
-    log.info("Created connector as {}", user1);
-
-    // The new user should have no system permissions
-    for (SystemPermission perm : SystemPermission.values()) {
-      assertFalse(conn.securityOperations().hasSystemPermission(user1, perm));
-    }
-
-    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
-    conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
-
-    conn.securityOperations().grantSystemPermission(user1, SystemPermission.CREATE_TABLE);
-
-    // Switch back to the original user
-    UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
-    conn = mac.getConnector(user1, new KerberosToken());
-
-    // Shouldn't throw an exception since we granted the create table permission
-    final String table = testName.getMethodName() + "_user_table";
-    conn.tableOperations().create(table);
-
-    // Make sure we can actually use the table we made
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
-    Mutation m = new Mutation("a");
-    m.put("b", "c", "d");
-    bw.addMutation(m);
-    bw.close();
-
-    conn.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
-  }
-
-  @Test
-  public void testUserPrivilegesForTable() throws Exception {
-    String user1 = testName.getMethodName();
-    final File user1Keytab = new File(kdc.getKeytabDir(), user1 + ".keytab");
-    if (user1Keytab.exists() && !user1Keytab.delete()) {
-      log.warn("Unable to delete {}", user1Keytab);
-    }
-
-    // Create some new users -- cannot contain realm
-    kdc.createPrincipal(user1Keytab, user1);
-
-    user1 = kdc.qualifyUser(user1);
-
-    // Log in as user1
-    UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
-    log.info("Logged in as {}", user1);
-
-    // Indirectly creates this user when we use it
-    Connector conn = mac.getConnector(user1, new KerberosToken());
-    log.info("Created connector as {}", user1);
-
-    // The new user should have no system permissions
-    for (SystemPermission perm : SystemPermission.values()) {
-      assertFalse(conn.securityOperations().hasSystemPermission(user1, perm));
-    }
-
-    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
-    conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
-
-    final String table = testName.getMethodName() + "_user_table";
-    conn.tableOperations().create(table);
-
-    final String viz = "viz";
-
-    // Give our unprivileged user permission on the table we made for them
-    conn.securityOperations().grantTablePermission(user1, table, TablePermission.READ);
-    conn.securityOperations().grantTablePermission(user1, table, TablePermission.WRITE);
-    conn.securityOperations().grantTablePermission(user1, table, TablePermission.ALTER_TABLE);
-    conn.securityOperations().grantTablePermission(user1, table, TablePermission.DROP_TABLE);
-    conn.securityOperations().changeUserAuthorizations(user1, new Authorizations(viz));
-
-    // Switch back to the original user
-    UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
-    conn = mac.getConnector(user1, new KerberosToken());
-
-    // Make sure we can actually use the table we made
-
-    // Write data
-    final long ts = 1000l;
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
-    Mutation m = new Mutation("a");
-    m.put("b", "c", new ColumnVisibility(viz.getBytes()), ts, "d");
-    bw.addMutation(m);
-    bw.close();
-
-    // Compact
-    conn.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
-
-    // Alter
-    conn.tableOperations().setProperty(table, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
-
-    // Read (and proper authorizations)
-    Scanner s = conn.createScanner(table, new Authorizations(viz));
-    Iterator<Entry<Key,Value>> iter = s.iterator();
-    assertTrue("No results from iterator", iter.hasNext());
-    Entry<Key,Value> entry = iter.next();
-    assertEquals(new Key("a", "b", "c", viz, ts), entry.getKey());
-    assertEquals(new Value("d".getBytes()), entry.getValue());
-    assertFalse("Had more results from iterator", iter.hasNext());
-  }
-
-  @Test
-  public void testDelegationToken() throws Exception {
-    final String tableName = getUniqueNames(1)[0];
-
-    // Login as the "root" user
-    UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
-    log.info("Logged in as {}", rootUser.getPrincipal());
-
-    final int numRows = 100, numColumns = 10;
-
-    // As the "root" user, open up the connection and get a delegation token
-    final AuthenticationToken delegationToken = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
-      @Override
-      public AuthenticationToken run() throws Exception {
-        Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
-        log.info("Created connector as {}", rootUser.getPrincipal());
-        assertEquals(rootUser.getPrincipal(), conn.whoami());
-
-        conn.tableOperations().create(tableName);
-        BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
-        for (int r = 0; r < numRows; r++) {
-          Mutation m = new Mutation(Integer.toString(r));
-          for (int c = 0; c < numColumns; c++) {
-            String col = Integer.toString(c);
-            m.put(col, col, col);
-          }
-          bw.addMutation(m);
-        }
-        bw.close();
-
-        return conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
-      }
-    });
-
-    // The above login with keytab doesn't have a way to logout, so make a fake user that won't have krb credentials
-    UserGroupInformation userWithoutPrivs = UserGroupInformation.createUserForTesting("fake_user", new String[0]);
-    int recordsSeen = userWithoutPrivs.doAs(new PrivilegedExceptionAction<Integer>() {
-      @Override
-      public Integer run() throws Exception {
-        Connector conn = mac.getConnector(rootUser.getPrincipal(), delegationToken);
-
-        BatchScanner bs = conn.createBatchScanner(tableName, Authorizations.EMPTY, 2);
-        bs.setRanges(Collections.singleton(new Range()));
-        int recordsSeen = Iterables.size(bs);
-        bs.close();
-        return recordsSeen;
-      }
-    });
-
-    assertEquals(numRows * numColumns, recordsSeen);
-  }
-
-  @Test
-  public void testDelegationTokenAsDifferentUser() throws Exception {
-    // Login as the "root" user
-    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
-    log.info("Logged in as {}", rootUser.getPrincipal());
-
-    // As the "root" user, open up the connection and get a delegation token
-    Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
-    log.info("Created connector as {}", rootUser.getPrincipal());
-    assertEquals(rootUser.getPrincipal(), conn.whoami());
-    final AuthenticationToken delegationToken = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
-
-    // The above login with keytab doesn't have a way to logout, so make a fake user that won't have krb credentials
-    UserGroupInformation userWithoutPrivs = UserGroupInformation.createUserForTesting("fake_user", new String[0]);
-    try {
-      // Use the delegation token to try to log in as a different user
-      userWithoutPrivs.doAs(new PrivilegedExceptionAction<Void>() {
-        @Override
-        public Void run() throws Exception {
-          mac.getConnector("some_other_user", delegationToken);
-          return null;
-        }
-      });
-      fail("Using a delegation token as a different user should throw an exception");
-    } catch (UndeclaredThrowableException e) {
-      Throwable cause = e.getCause();
-      assertNotNull(cause);
-      // We should get an AccumuloSecurityException from trying to use a delegation token for the wrong user
-      assertTrue("Expected cause to be AccumuloSecurityException, but was " + cause.getClass(), cause instanceof AccumuloSecurityException);
-    }
-  }
-
-  @Test(expected = AccumuloSecurityException.class)
-  public void testGetDelegationTokenDenied() throws Exception {
-    String newUser = testName.getMethodName();
-    final File newUserKeytab = new File(kdc.getKeytabDir(), newUser + ".keytab");
-    if (newUserKeytab.exists() && !newUserKeytab.delete()) {
-      log.warn("Unable to delete {}", newUserKeytab);
-    }
-
-    // Create a new user
-    kdc.createPrincipal(newUserKeytab, newUser);
-
-    newUser = kdc.qualifyUser(newUser);
-
-    // Login as a normal user
-    UserGroupInformation.loginUserFromKeytab(newUser, newUserKeytab.getAbsolutePath());
-
-    // As the "root" user, open up the connection and get a delegation token
-    Connector conn = mac.getConnector(newUser, new KerberosToken());
-    log.info("Created connector as {}", newUser);
-    assertEquals(newUser, conn.whoami());
-
-    conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
-  }
-
-  @Test
-  public void testRestartedMasterReusesSecretKey() throws Exception {
-    // Login as the "root" user
-    UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
-    log.info("Logged in as {}", rootUser.getPrincipal());
-
-    // As the "root" user, open up the connection and get a delegation token
-    final AuthenticationToken delegationToken1 = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
-      @Override
-      public AuthenticationToken run() throws Exception {
-        Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
-        log.info("Created connector as {}", rootUser.getPrincipal());
-        assertEquals(rootUser.getPrincipal(), conn.whoami());
-
-        AuthenticationToken token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
-
-        assertTrue("Could not get tables with delegation token", mac.getConnector(rootUser.getPrincipal(), token).tableOperations().list().size() > 0);
-
-        return token;
-      }
-    });
-
-    log.info("Stopping master");
-    mac.getClusterControl().stop(ServerType.MASTER);
-    Thread.sleep(5000);
-    log.info("Restarting master");
-    mac.getClusterControl().start(ServerType.MASTER);
-
-    // Make sure our original token is still good
-    root.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws Exception {
-        Connector conn = mac.getConnector(rootUser.getPrincipal(), delegationToken1);
-
-        assertTrue("Could not get tables with delegation token", conn.tableOperations().list().size() > 0);
-
-        return null;
-      }
-    });
-
-    // Get a new token, so we can compare the keyId on the second to the first
-    final AuthenticationToken delegationToken2 = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
-      @Override
-      public AuthenticationToken run() throws Exception {
-        Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
-        log.info("Created connector as {}", rootUser.getPrincipal());
-        assertEquals(rootUser.getPrincipal(), conn.whoami());
-
-        AuthenticationToken token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
-
-        assertTrue("Could not get tables with delegation token", mac.getConnector(rootUser.getPrincipal(), token).tableOperations().list().size() > 0);
-
-        return token;
-      }
-    });
-
-    // A restarted master should reuse the same secret key after a restart if the secret key hasn't expired (1day by default)
-    DelegationTokenImpl dt1 = (DelegationTokenImpl) delegationToken1;
-    DelegationTokenImpl dt2 = (DelegationTokenImpl) delegationToken2;
-    assertEquals(dt1.getIdentifier().getKeyId(), dt2.getIdentifier().getKeyId());
-  }
-
-  @Test(expected = AccumuloException.class)
-  public void testDelegationTokenWithInvalidLifetime() throws Throwable {
-    // Login as the "root" user
-    UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
-    log.info("Logged in as {}", rootUser.getPrincipal());
-
-    // As the "root" user, open up the connection and get a delegation token
-    try {
-      root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
-        @Override
-        public AuthenticationToken run() throws Exception {
-          Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
-          log.info("Created connector as {}", rootUser.getPrincipal());
-          assertEquals(rootUser.getPrincipal(), conn.whoami());
-
-          // Should fail
-          return conn.securityOperations().getDelegationToken(new DelegationTokenConfig().setTokenLifetime(Long.MAX_VALUE, TimeUnit.MILLISECONDS));
-        }
-      });
-    } catch (UndeclaredThrowableException e) {
-      Throwable cause = e.getCause();
-      if (null != cause) {
-        throw cause;
-      } else {
-        throw e;
-      }
-    }
-  }
-
-  @Test
-  public void testDelegationTokenWithReducedLifetime() throws Throwable {
-    // Login as the "root" user
-    UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
-    log.info("Logged in as {}", rootUser.getPrincipal());
-
-    // As the "root" user, open up the connection and get a delegation token
-    final AuthenticationToken dt = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
-      @Override
-      public AuthenticationToken run() throws Exception {
-        Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
-        log.info("Created connector as {}", rootUser.getPrincipal());
-        assertEquals(rootUser.getPrincipal(), conn.whoami());
-
-        return conn.securityOperations().getDelegationToken(new DelegationTokenConfig().setTokenLifetime(5, TimeUnit.MINUTES));
-      }
-    });
-
-    AuthenticationTokenIdentifier identifier = ((DelegationTokenImpl) dt).getIdentifier();
-    assertTrue("Expected identifier to expire in no more than 5 minutes: " + identifier,
-        identifier.getExpirationDate() - identifier.getIssueDate() <= (5 * 60 * 1000));
-  }
-
-  /**
-   * Creates a table, adds a record to it, and then compacts the table. A simple way to make sure that the system user exists (since the master does an RPC to
-   * the tserver which will create the system user if it doesn't already exist).
-   */
-  private void createTableWithDataAndCompact(Connector conn) throws TableNotFoundException, AccumuloSecurityException, AccumuloException, TableExistsException {
-    final String table = testName.getMethodName() + "_table";
-    conn.tableOperations().create(table);
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
-    Mutation m = new Mutation("a");
-    m.put("b", "c", "d");
-    bw.addMutation(m);
-    bw.close();
-    conn.tableOperations().compact(table, new CompactionConfig().setFlush(true).setWait(true));
-  }
-}


Mime
View raw message