accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mwa...@apache.org
Subject [1/7] accumulo git commit: ACCUMULO-4511 Removed Accumulo Examples
Date Fri, 09 Dec 2016 17:16:47 GMT
Repository: accumulo
Updated Branches:
  refs/heads/master 13201a814 -> 8e0f19a1c


http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/examples/simple/filedata/ChunkInputStreamIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/examples/simple/filedata/ChunkInputStreamIT.java b/test/src/main/java/org/apache/accumulo/test/examples/simple/filedata/ChunkInputStreamIT.java
deleted file mode 100644
index 5b956d7..0000000
--- a/test/src/main/java/org/apache/accumulo/test/examples/simple/filedata/ChunkInputStreamIT.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.test.examples.simple.filedata;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyValue;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.PeekingIterator;
-import org.apache.accumulo.examples.simple.filedata.ChunkInputStream;
-import org.apache.accumulo.examples.simple.filedata.FileDataIngest;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Before;
-import org.junit.Test;
-
-public class ChunkInputStreamIT extends AccumuloClusterHarness {
-
-  private static final Authorizations AUTHS = new Authorizations("A", "B", "C", "D");
-
-  private Connector conn;
-  private String tableName;
-  private List<Entry<Key,Value>> data;
-  private List<Entry<Key,Value>> baddata;
-  private List<Entry<Key,Value>> multidata;
-
-  @Before
-  public void setupInstance() throws Exception {
-    conn = getConnector();
-    tableName = getUniqueNames(1)[0];
-    conn.securityOperations().changeUserAuthorizations(conn.whoami(), AUTHS);
-  }
-
-  @Before
-  public void setupData() {
-    data = new ArrayList<>();
-    addData(data, "a", "refs", "id\0ext", "A&B", "ext");
-    addData(data, "a", "refs", "id\0name", "A&B", "name");
-    addData(data, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(data, "a", "~chunk", 100, 1, "A&B", "");
-    addData(data, "b", "refs", "id\0ext", "A&B", "ext");
-    addData(data, "b", "refs", "id\0name", "A&B", "name");
-    addData(data, "b", "~chunk", 100, 0, "A&B", "qwertyuiop");
-    addData(data, "b", "~chunk", 100, 0, "B&C", "qwertyuiop");
-    addData(data, "b", "~chunk", 100, 1, "A&B", "");
-    addData(data, "b", "~chunk", 100, 1, "B&C", "");
-    addData(data, "b", "~chunk", 100, 1, "D", "");
-    addData(data, "c", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(data, "c", "~chunk", 100, 1, "A&B", "asdfjkl;");
-    addData(data, "c", "~chunk", 100, 2, "A&B", "");
-    addData(data, "d", "~chunk", 100, 0, "A&B", "");
-    addData(data, "e", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(data, "e", "~chunk", 100, 1, "A&B", "");
-    baddata = new ArrayList<>();
-    addData(baddata, "a", "~chunk", 100, 0, "A", "asdfjkl;");
-    addData(baddata, "b", "~chunk", 100, 0, "B", "asdfjkl;");
-    addData(baddata, "b", "~chunk", 100, 2, "C", "");
-    addData(baddata, "c", "~chunk", 100, 0, "D", "asdfjkl;");
-    addData(baddata, "c", "~chunk", 100, 2, "E", "");
-    addData(baddata, "d", "~chunk", 100, 0, "F", "asdfjkl;");
-    addData(baddata, "d", "~chunk", 100, 1, "G", "");
-    addData(baddata, "d", "~zzzzz", "colq", "H", "");
-    addData(baddata, "e", "~chunk", 100, 0, "I", "asdfjkl;");
-    addData(baddata, "e", "~chunk", 100, 1, "J", "");
-    addData(baddata, "e", "~chunk", 100, 2, "I", "asdfjkl;");
-    addData(baddata, "f", "~chunk", 100, 2, "K", "asdfjkl;");
-    addData(baddata, "g", "~chunk", 100, 0, "L", "");
-    multidata = new ArrayList<>();
-    addData(multidata, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(multidata, "a", "~chunk", 100, 1, "A&B", "");
-    addData(multidata, "a", "~chunk", 200, 0, "B&C", "asdfjkl;");
-    addData(multidata, "b", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(multidata, "b", "~chunk", 200, 0, "B&C", "asdfjkl;");
-    addData(multidata, "b", "~chunk", 200, 1, "B&C", "asdfjkl;");
-    addData(multidata, "c", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(multidata, "c", "~chunk", 100, 1, "B&C", "");
-  }
-
-  static void addData(List<Entry<Key,Value>> data, String row, String cf, String cq, String vis, String value) {
-    data.add(new KeyValue(new Key(new Text(row), new Text(cf), new Text(cq), new Text(vis)), value.getBytes()));
-  }
-
-  static void addData(List<Entry<Key,Value>> data, String row, String cf, int chunkSize, int chunkCount, String vis, String value) {
-    Text chunkCQ = new Text(FileDataIngest.intToBytes(chunkSize));
-    chunkCQ.append(FileDataIngest.intToBytes(chunkCount), 0, 4);
-    data.add(new KeyValue(new Key(new Text(row), new Text(cf), chunkCQ, new Text(vis)), value.getBytes()));
-  }
-
-  @Test
-  public void testWithAccumulo() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException {
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
-
-    for (Entry<Key,Value> e : data) {
-      Key k = e.getKey();
-      Mutation m = new Mutation(k.getRow());
-      m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), e.getValue());
-      bw.addMutation(m);
-    }
-    bw.close();
-
-    Scanner scan = conn.createScanner(tableName, AUTHS);
-
-    ChunkInputStream cis = new ChunkInputStream();
-    byte[] b = new byte[20];
-    int read;
-    PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<>(scan.iterator());
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 8);
-    assertEquals(new String(b, 0, read), "asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 10);
-    assertEquals(new String(b, 0, read), "qwertyuiop");
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 16);
-    assertEquals(new String(b, 0, read), "asdfjkl;asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[A&B]");
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), -1);
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 8);
-    assertEquals(new String(b, 0, read), "asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-    cis.close();
-
-    assertFalse(pi.hasNext());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
index 27d84de..181ac08 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
@@ -37,8 +37,8 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint;
-import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
+import org.apache.accumulo.test.constraints.AlphaNumKeyConstraint;
+import org.apache.accumulo.test.constraints.NumericValueConstraint;
 import org.apache.accumulo.harness.AccumuloClusterHarness;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
@@ -295,8 +295,8 @@ public class ConstraintIT extends AccumuloClusterHarness {
 
       HashMap<String,Integer> expected = new HashMap<>();
 
-      expected.put("org.apache.accumulo.examples.simple.constraints.NumericValueConstraint", numericErrors);
-      expected.put("org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint", 1);
+      expected.put("org.apache.accumulo.test.constraints.NumericValueConstraint", numericErrors);
+      expected.put("org.apache.accumulo.test.constraints.AlphaNumKeyConstraint", 1);
 
       for (ConstraintViolationSummary cvs : cvsl) {
         if (expected.get(cvs.constrainClass) != cvs.numberOfViolatingMutations) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java
deleted file mode 100644
index a69f4a5..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java
+++ /dev/null
@@ -1,673 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
-import org.apache.accumulo.cluster.standalone.StandaloneClusterControl;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.user.AgeOffFilter;
-import org.apache.accumulo.core.iterators.user.SummingCombiner;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.examples.simple.client.Flush;
-import org.apache.accumulo.examples.simple.client.RandomBatchScanner;
-import org.apache.accumulo.examples.simple.client.RandomBatchWriter;
-import org.apache.accumulo.examples.simple.client.ReadWriteExample;
-import org.apache.accumulo.examples.simple.client.RowOperations;
-import org.apache.accumulo.examples.simple.client.SequentialBatchWriter;
-import org.apache.accumulo.examples.simple.client.TraceDumpExample;
-import org.apache.accumulo.examples.simple.client.TracingExample;
-import org.apache.accumulo.examples.simple.combiner.StatsCombiner;
-import org.apache.accumulo.examples.simple.constraints.MaxMutationSize;
-import org.apache.accumulo.examples.simple.dirlist.Ingest;
-import org.apache.accumulo.examples.simple.dirlist.QueryUtil;
-import org.apache.accumulo.examples.simple.helloworld.InsertWithBatchWriter;
-import org.apache.accumulo.examples.simple.helloworld.ReadData;
-import org.apache.accumulo.examples.simple.isolation.InterferenceTest;
-import org.apache.accumulo.examples.simple.mapreduce.RegexExample;
-import org.apache.accumulo.examples.simple.mapreduce.RowHash;
-import org.apache.accumulo.examples.simple.mapreduce.TableToFile;
-import org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest;
-import org.apache.accumulo.examples.simple.mapreduce.WordCount;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.BulkIngestExample;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.GenerateTestData;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.SetupTable;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.VerifyIngest;
-import org.apache.accumulo.examples.simple.shard.ContinuousQuery;
-import org.apache.accumulo.examples.simple.shard.Index;
-import org.apache.accumulo.examples.simple.shard.Query;
-import org.apache.accumulo.examples.simple.shard.Reverse;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.LogWriter;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.start.Main;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.categories.StandaloneCapableClusterTests;
-import org.apache.accumulo.test.categories.SunnyDayTests;
-import org.apache.accumulo.tracer.TraceServer;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.util.Tool;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-@Category({StandaloneCapableClusterTests.class, SunnyDayTests.class})
-public class ExamplesIT extends AccumuloClusterHarness {
-  private static final Logger log = LoggerFactory.getLogger(ExamplesIT.class);
-  private static final BatchWriterOpts bwOpts = new BatchWriterOpts();
-  private static final BatchWriterConfig bwc = new BatchWriterConfig();
-  private static final String visibility = "A|B";
-  private static final String auths = "A,B";
-
-  Connector c;
-  String instance;
-  String keepers;
-  String user;
-  String passwd;
-  String keytab;
-  BatchWriter bw;
-  IteratorSetting is;
-  String dir;
-  FileSystem fs;
-  Authorizations origAuths;
-  boolean saslEnabled;
-
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopConf) {
-    // 128MB * 3
-    cfg.setDefaultMemory(cfg.getDefaultMemory() * 3, MemoryUnit.BYTE);
-  }
-
-  @Before
-  public void getClusterInfo() throws Exception {
-    c = getConnector();
-    user = getAdminPrincipal();
-    AuthenticationToken token = getAdminToken();
-    if (token instanceof KerberosToken) {
-      keytab = getAdminUser().getKeytab().getAbsolutePath();
-      saslEnabled = true;
-    } else if (token instanceof PasswordToken) {
-      passwd = new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8);
-      saslEnabled = false;
-    } else {
-      Assert.fail("Unknown token type: " + token);
-    }
-    fs = getCluster().getFileSystem();
-    instance = c.getInstance().getInstanceName();
-    keepers = c.getInstance().getZooKeepers();
-    dir = new Path(cluster.getTemporaryPath(), getClass().getName()).toString();
-
-    origAuths = c.securityOperations().getUserAuthorizations(user);
-    c.securityOperations().changeUserAuthorizations(user, new Authorizations(auths.split(",")));
-  }
-
-  @After
-  public void resetAuths() throws Exception {
-    if (null != origAuths) {
-      getConnector().securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths);
-    }
-  }
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 6 * 60;
-  }
-
-  @Test
-  public void testTrace() throws Exception {
-    Process trace = null;
-    if (ClusterType.MINI == getClusterType()) {
-      MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
-      trace = impl.exec(TraceServer.class);
-      while (!c.tableOperations().exists("trace"))
-        sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-    }
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-C", "-D", "-c"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-C", "-D", "-c"};
-    }
-    Entry<Integer,String> pair = cluster.getClusterControl().execWithStdout(TracingExample.class, args);
-    Assert.assertEquals("Expected return code of zero. STDOUT=" + pair.getValue(), 0, pair.getKey().intValue());
-    String result = pair.getValue();
-    Pattern pattern = Pattern.compile("TraceID: ([0-9a-f]+)");
-    Matcher matcher = pattern.matcher(result);
-    int count = 0;
-    while (matcher.find()) {
-      if (saslEnabled) {
-        args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--traceid", matcher.group(1)};
-      } else {
-        args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--traceid", matcher.group(1)};
-      }
-      pair = cluster.getClusterControl().execWithStdout(TraceDumpExample.class, args);
-      count++;
-    }
-    assertTrue(count > 0);
-    assertTrue("Output did not contain myApp@myHost", pair.getValue().contains("myApp@myHost"));
-    if (ClusterType.MINI == getClusterType() && null != trace) {
-      trace.destroy();
-    }
-  }
-
-  @Test
-  public void testClasspath() throws Exception {
-    Entry<Integer,String> entry = getCluster().getClusterControl().execWithStdout(Main.class, new String[] {"classpath"});
-    assertEquals(0, entry.getKey().intValue());
-    String result = entry.getValue();
-    int level1 = result.indexOf("Level 1");
-    int level2 = result.indexOf("Level 2");
-    int level3 = result.indexOf("Level 3");
-    int level4 = result.indexOf("Level 4");
-    assertTrue("Level 1 classloader not present.", level1 >= 0);
-    assertTrue("Level 2 classloader not present.", level2 > 0);
-    assertTrue("Level 3 classloader not present.", level3 > 0);
-    assertTrue("Level 4 classloader not present.", level4 > 0);
-    assertTrue(level1 < level2);
-    assertTrue(level2 < level3);
-    assertTrue(level3 < level4);
-  }
-
-  @Test
-  public void testDirList() throws Exception {
-    String[] names = getUniqueNames(3);
-    String dirTable = names[0], indexTable = names[1], dataTable = names[2];
-    String[] args;
-    String dirListDirectory;
-    switch (getClusterType()) {
-      case MINI:
-        dirListDirectory = ((MiniAccumuloClusterImpl) getCluster()).getConfig().getDir().getAbsolutePath();
-        break;
-      case STANDALONE:
-        dirListDirectory = ((StandaloneAccumuloCluster) getCluster()).getAccumuloHome();
-        break;
-      default:
-        throw new RuntimeException("Unknown cluster type");
-    }
-    assumeTrue(new File(dirListDirectory).exists());
-    // Index a directory listing on /tmp. If this is running against a standalone cluster, we can't guarantee Accumulo source will be there.
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable",
-          dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable",
-          dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory};
-    }
-    Entry<Integer,String> entry = getClusterControl().execWithStdout(Ingest.class, args);
-    assertEquals("Got non-zero return code. Stdout=" + entry.getValue(), 0, entry.getKey().intValue());
-
-    String expectedFile;
-    switch (getClusterType()) {
-      case MINI:
-        // Should be present in a minicluster dir
-        expectedFile = "accumulo-site.xml";
-        break;
-      case STANDALONE:
-        // Should be in place on standalone installs (not having to follow symlinks)
-        expectedFile = "LICENSE";
-        break;
-      default:
-        throw new RuntimeException("Unknown cluster type");
-    }
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "--keytab", keytab, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path",
-          expectedFile};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-p", passwd, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path", expectedFile};
-    }
-    entry = getClusterControl().execWithStdout(QueryUtil.class, args);
-    if (ClusterType.MINI == getClusterType()) {
-      MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
-      for (LogWriter writer : impl.getLogWriters()) {
-        writer.flush();
-      }
-    }
-
-    log.info("result " + entry.getValue());
-    assertEquals(0, entry.getKey().intValue());
-    assertTrue(entry.getValue().contains(expectedFile));
-  }
-
-  @Test
-  public void testAgeoffFilter() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    is = new IteratorSetting(10, AgeOffFilter.class);
-    AgeOffFilter.setTTL(is, 1000L);
-    c.tableOperations().attachIterator(tableName, is);
-    sleepUninterruptibly(500, TimeUnit.MILLISECONDS); // let zookeeper updates propagate.
-    bw = c.createBatchWriter(tableName, bwc);
-    Mutation m = new Mutation("foo");
-    m.put("a", "b", "c");
-    bw.addMutation(m);
-    bw.close();
-    sleepUninterruptibly(1, TimeUnit.SECONDS);
-    assertEquals(0, Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator()));
-  }
-
-  @Test
-  public void testStatsCombiner() throws Exception {
-    String table = getUniqueNames(1)[0];
-    c.tableOperations().create(table);
-    is = new IteratorSetting(10, StatsCombiner.class);
-    StatsCombiner.setCombineAllColumns(is, true);
-
-    c.tableOperations().attachIterator(table, is);
-    bw = c.createBatchWriter(table, bwc);
-    // Write two mutations otherwise the NativeMap would dedupe them into a single update
-    Mutation m = new Mutation("foo");
-    m.put("a", "b", "1");
-    bw.addMutation(m);
-    m = new Mutation("foo");
-    m.put("a", "b", "3");
-    bw.addMutation(m);
-    bw.flush();
-
-    Iterator<Entry<Key,Value>> iter = c.createScanner(table, Authorizations.EMPTY).iterator();
-    assertTrue("Iterator had no results", iter.hasNext());
-    Entry<Key,Value> e = iter.next();
-    assertEquals("Results ", "1,3,4,2", e.getValue().toString());
-    assertFalse("Iterator had additional results", iter.hasNext());
-
-    m = new Mutation("foo");
-    m.put("a", "b", "0,20,20,2");
-    bw.addMutation(m);
-    bw.close();
-
-    iter = c.createScanner(table, Authorizations.EMPTY).iterator();
-    assertTrue("Iterator had no results", iter.hasNext());
-    e = iter.next();
-    assertEquals("Results ", "0,20,24,4", e.getValue().toString());
-    assertFalse("Iterator had additional results", iter.hasNext());
-  }
-
-  @Test
-  public void testBloomFilters() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "100000", "--min", "0", "--max",
-          "1000000000", "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", tableName};
-    } else {
-      args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "100000", "--min", "0", "--max", "1000000000",
-          "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", tableName};
-    }
-    goodExec(RandomBatchWriter.class, args);
-    c.tableOperations().flush(tableName, null, null, true);
-    long diff = 0, diff2 = 0;
-    // try the speed test a couple times in case the system is loaded with other tests
-    for (int i = 0; i < 2; i++) {
-      long now = System.currentTimeMillis();
-      if (saslEnabled) {
-        args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "10000", "--min", "0", "--max",
-            "1000000000", "--size", "50", "--scanThreads", "4", "-t", tableName};
-      } else {
-        args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "10000", "--min", "0", "--max", "1000000000",
-            "--size", "50", "--scanThreads", "4", "-t", tableName};
-      }
-      goodExec(RandomBatchScanner.class, args);
-      diff = System.currentTimeMillis() - now;
-      now = System.currentTimeMillis();
-      if (saslEnabled) {
-        args = new String[] {"--seed", "8", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "10000", "--min", "0", "--max",
-            "1000000000", "--size", "50", "--scanThreads", "4", "-t", tableName};
-      } else {
-        args = new String[] {"--seed", "8", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "10000", "--min", "0", "--max", "1000000000",
-            "--size", "50", "--scanThreads", "4", "-t", tableName};
-      }
-      int retCode = getClusterControl().exec(RandomBatchScanner.class, args);
-      assertEquals(1, retCode);
-      diff2 = System.currentTimeMillis() - now;
-      if (diff2 < diff)
-        break;
-    }
-    assertTrue(diff2 < diff);
-  }
-
-  @Test
-  public void testShardedIndex() throws Exception {
-    File src = new File(System.getProperty("user.dir") + "/src");
-    assumeTrue(src.exists());
-    String[] names = getUniqueNames(3);
-    final String shard = names[0], index = names[1];
-    c.tableOperations().create(shard);
-    c.tableOperations().create(index);
-    bw = c.createBatchWriter(shard, bwc);
-    Index.index(30, src, "\\W+", bw);
-    bw.close();
-    BatchScanner bs = c.createBatchScanner(shard, Authorizations.EMPTY, 4);
-    List<String> found = Query.query(bs, Arrays.asList("foo", "bar"), null);
-    bs.close();
-    // should find ourselves
-    boolean thisFile = false;
-    for (String file : found) {
-      if (file.endsWith("/ExamplesIT.java"))
-        thisFile = true;
-    }
-    assertTrue(thisFile);
-
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "--keytab", keytab};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", getAdminPrincipal(), "-p", passwd};
-    }
-    // create a reverse index
-    goodExec(Reverse.class, args);
-
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "--keytab", keytab, "--terms", "5",
-          "--count", "1000"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "-p", passwd, "--terms", "5", "--count",
-          "1000"};
-    }
-    // run some queries
-    goodExec(ContinuousQuery.class, args);
-  }
-
-  @Test
-  public void testMaxMutationConstraint() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    c.tableOperations().addConstraint(tableName, MaxMutationSize.class.getName());
-    TestIngest.Opts opts = new TestIngest.Opts();
-    opts.rows = 1;
-    opts.cols = 1000;
-    opts.setTableName(tableName);
-    if (saslEnabled) {
-      opts.updateKerberosCredentials(cluster.getClientConfig());
-    } else {
-      opts.setPrincipal(getAdminPrincipal());
-    }
-    try {
-      TestIngest.ingest(c, opts, bwOpts);
-    } catch (MutationsRejectedException ex) {
-      assertEquals(1, ex.getConstraintViolationSummaries().size());
-    }
-  }
-
-  @Test
-  public void testBulkIngest() throws Exception {
-    // TODO Figure out a way to run M/R with Kerberos
-    assumeTrue(getAdminToken() instanceof PasswordToken);
-    String tableName = getUniqueNames(1)[0];
-    FileSystem fs = getFileSystem();
-    Path p = new Path(dir, "tmp");
-    if (fs.exists(p)) {
-      fs.delete(p, true);
-    }
-    goodExec(GenerateTestData.class, "--start-row", "0", "--count", "10000", "--output", dir + "/tmp/input/data");
-
-    List<String> commonArgs = new ArrayList<>(Arrays.asList(new String[] {"-i", instance, "-z", keepers, "-u", user, "--table", tableName}));
-    if (saslEnabled) {
-      commonArgs.add("--keytab");
-      commonArgs.add(keytab);
-    } else {
-      commonArgs.add("-p");
-      commonArgs.add(passwd);
-    }
-
-    List<String> args = new ArrayList<>(commonArgs);
-    goodExec(SetupTable.class, args.toArray(new String[0]));
-
-    args = new ArrayList<>(commonArgs);
-    args.addAll(Arrays.asList(new String[] {"--inputDir", dir + "/tmp/input", "--workDir", dir + "/tmp"}));
-    goodExec(BulkIngestExample.class, args.toArray(new String[0]));
-
-    args = new ArrayList<>(commonArgs);
-    args.addAll(Arrays.asList(new String[] {"--start-row", "0", "--count", "10000"}));
-    goodExec(VerifyIngest.class, args.toArray(new String[0]));
-  }
-
-  @Test
-  public void testTeraSortAndRead() throws Exception {
-    // TODO Figure out a way to run M/R with Kerberos
-    assumeTrue(getAdminToken() instanceof PasswordToken);
-    String tableName = getUniqueNames(1)[0];
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-i", instance, "-z", keepers,
-          "-u", user, "--keytab", keytab, "--splits", "4"};
-    } else {
-      args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-i", instance, "-z", keepers,
-          "-u", user, "-p", passwd, "--splits", "4"};
-    }
-    goodExec(TeraSortIngest.class, args);
-    Path output = new Path(dir, "tmp/nines");
-    if (fs.exists(output)) {
-      fs.delete(output, true);
-    }
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--rowRegex", ".*999.*", "--output",
-          output.toString()};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--rowRegex", ".*999.*", "--output", output.toString()};
-    }
-    goodExec(RegexExample.class, args);
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--column", "c:"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--column", "c:"};
-    }
-    goodExec(RowHash.class, args);
-    output = new Path(dir, "tmp/tableFile");
-    if (fs.exists(output)) {
-      fs.delete(output, true);
-    }
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--output", output.toString()};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--output", output.toString()};
-    }
-    goodExec(TableToFile.class, args);
-  }
-
-  @Test
-  public void testWordCount() throws Exception {
-    // TODO Figure out a way to run M/R with Kerberos
-    assumeTrue(getAdminToken() instanceof PasswordToken);
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    is = new IteratorSetting(10, SummingCombiner.class);
-    SummingCombiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column(new Text("count"))));
-    SummingCombiner.setEncodingType(is, SummingCombiner.Type.STRING);
-    c.tableOperations().attachIterator(tableName, is);
-    Path readme = new Path(new Path(System.getProperty("user.dir")).getParent(), "README.md");
-    if (!new File(readme.toString()).exists()) {
-      log.info("Not running test: README.md does not exist)");
-      return;
-    }
-    fs.copyFromLocalFile(readme, new Path(dir + "/tmp/wc/README.md"));
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-u", user, "--keytab", keytab, "-z", keepers, "--input", dir + "/tmp/wc", "-t", tableName};
-    } else {
-      args = new String[] {"-i", instance, "-u", user, "-p", passwd, "-z", keepers, "--input", dir + "/tmp/wc", "-t", tableName};
-    }
-    goodExec(WordCount.class, args);
-  }
-
-  @Test
-  public void testInsertWithBatchWriterAndReadData() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName};
-    }
-    goodExec(InsertWithBatchWriter.class, args);
-    goodExec(ReadData.class, args);
-  }
-
-  @Test
-  public void testIsolatedScansWithInterference() throws Exception {
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"};
-    }
-    goodExec(InterferenceTest.class, args);
-  }
-
-  @Test
-  public void testScansWithInterference() throws Exception {
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", getUniqueNames(1)[0], "--iterations", "100000"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", getUniqueNames(1)[0], "--iterations", "100000"};
-    }
-    goodExec(InterferenceTest.class, args);
-  }
-
-  @Test
-  public void testRowOperations() throws Exception {
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd};
-    }
-    goodExec(RowOperations.class, args);
-  }
-
-  @Test
-  public void testBatchWriter() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--start", "0", "--num", "100000", "--size", "50",
-          "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--start", "0", "--num", "100000", "--size", "50",
-          "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    }
-    goodExec(SequentialBatchWriter.class, args);
-
-  }
-
-  @Test
-  public void testReadWriteAndDelete() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--auths", auths, "--table", tableName, "--createtable", "-c",
-          "--debug"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", tableName, "--createtable", "-c", "--debug"};
-    }
-    goodExec(ReadWriteExample.class, args);
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--auths", auths, "--table", tableName, "-d", "--debug"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", tableName, "-d", "--debug"};
-    }
-    goodExec(ReadWriteExample.class, args);
-
-  }
-
-  @Test
-  public void testRandomBatchesAndFlush() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName, "--num", "100000", "--min", "0", "--max",
-          "100000", "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName, "--num", "100000", "--min", "0", "--max", "100000",
-          "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    }
-    goodExec(RandomBatchWriter.class, args);
-
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName, "--num", "10000", "--min", "0", "--max",
-          "100000", "--size", "100", "--scanThreads", "4", "--auths", auths};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName, "--num", "10000", "--min", "0", "--max", "100000",
-          "--size", "100", "--scanThreads", "4", "--auths", auths};
-    }
-    goodExec(RandomBatchScanner.class, args);
-
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName};
-    }
-    goodExec(Flush.class, args);
-  }
-
-  private void goodExec(Class<?> theClass, String... args) throws InterruptedException, IOException {
-    Entry<Integer,String> pair;
-    if (Tool.class.isAssignableFrom(theClass) && ClusterType.STANDALONE == getClusterType()) {
-      StandaloneClusterControl control = (StandaloneClusterControl) getClusterControl();
-      pair = control.execMapreduceWithStdout(theClass, args);
-    } else {
-      // We're already slurping stdout into memory (not redirecting to file). Might as well add it to error message.
-      pair = getClusterControl().execWithStdout(theClass, args);
-    }
-    Assert.assertEquals("stdout=" + pair.getValue(), 0, pair.getKey().intValue());
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
index 3797e5b..c690baf 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
@@ -38,7 +38,7 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.examples.simple.mapreduce.RowHash;
+import org.apache.accumulo.test.mapreduce.RowHash;
 import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/mapreduce/RowHash.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/mapreduce/RowHash.java b/test/src/main/java/org/apache/accumulo/test/mapreduce/RowHash.java
new file mode 100644
index 0000000..48b5b33
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/mapreduce/RowHash.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.mapreduce;
+
+import java.io.IOException;
+import java.util.Base64;
+import java.util.Collections;
+
+import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
+import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
+import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+import com.beust.jcommander.Parameter;
+
+public class RowHash extends Configured implements Tool {
+  /**
+   * The Mapper class that given a row number, will generate the appropriate output line.
+   */
+  public static class HashDataMapper extends Mapper<Key,Value,Text,Mutation> {
+    @Override
+    public void map(Key row, Value data, Context context) throws IOException, InterruptedException {
+      Mutation m = new Mutation(row.getRow());
+      m.put(new Text("cf-HASHTYPE"), new Text("cq-MD5BASE64"), new Value(Base64.getEncoder().encode(MD5Hash.digest(data.toString()).getDigest())));
+      context.write(null, m);
+      context.progress();
+    }
+
+    @Override
+    public void setup(Context job) {}
+  }
+
+  private static class Opts extends MapReduceClientOnRequiredTable {
+    @Parameter(names = "--column", required = true)
+    String column;
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {
+    Job job = Job.getInstance(getConf());
+    job.setJobName(this.getClass().getName());
+    job.setJarByClass(this.getClass());
+    Opts opts = new Opts();
+    opts.parseArgs(RowHash.class.getName(), args);
+    job.setInputFormatClass(AccumuloInputFormat.class);
+    opts.setAccumuloConfigs(job);
+
+    String col = opts.column;
+    int idx = col.indexOf(":");
+    Text cf = new Text(idx < 0 ? col : col.substring(0, idx));
+    Text cq = idx < 0 ? null : new Text(col.substring(idx + 1));
+    if (cf.getLength() > 0)
+      AccumuloInputFormat.fetchColumns(job, Collections.singleton(new Pair<>(cf, cq)));
+
+    job.setMapperClass(HashDataMapper.class);
+    job.setMapOutputKeyClass(Text.class);
+    job.setMapOutputValueClass(Mutation.class);
+
+    job.setNumReduceTasks(0);
+
+    job.setOutputFormatClass(AccumuloOutputFormat.class);
+
+    job.waitForCompletion(true);
+    return job.isSuccessful() ? 0 : 1;
+  }
+
+  public static void main(String[] args) throws Exception {
+    ToolRunner.run(new Configuration(), new RowHash(), args);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/mapreduce/TeraSortIngest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/mapreduce/TeraSortIngest.java b/test/src/main/java/org/apache/accumulo/test/mapreduce/TeraSortIngest.java
new file mode 100644
index 0000000..28762e0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/mapreduce/TeraSortIngest.java
@@ -0,0 +1,399 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.test.mapreduce;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+import com.beust.jcommander.Parameter;
+
+/**
+ * Generate the *almost* official terasort input data set. (See below) The user specifies the number of rows and the output directory and this class runs a
+ * map/reduce program to generate the data. The format of the data is:
+ * <ul>
+ * <li>(10 bytes key) (10 bytes rowid) (78 bytes filler) \r \n
+ * <li>The keys are random characters from the set ' ' .. '~'.
+ * <li>The rowid is the right justified row id as a int.
+ * <li>The filler consists of 7 runs of 10 characters from 'A' to 'Z'.
+ * </ul>
+ *
+ * This TeraSort is slightly modified to allow for variable length key sizes and value sizes. The row length isn't variable. To generate a terabyte of data in
+ * the same way TeraSort does use 10000000000 rows and 10/10 byte key length and 78/78 byte value length. Along with the 10 byte row id and \r\n this gives you
+ * 100 byte row * 10000000000 rows = 1tb. Min/Max ranges for key and value parameters are inclusive/inclusive respectively.
+ *
+ *
+ */
+public class TeraSortIngest extends Configured implements Tool {
+  /**
+   * An input format that assigns ranges of longs to each mapper.
+   */
+  static class RangeInputFormat extends InputFormat<LongWritable,NullWritable> {
+    /**
+     * An input split consisting of a range on numbers.
+     */
+    static class RangeInputSplit extends InputSplit implements Writable {
+      long firstRow;
+      long rowCount;
+
+      public RangeInputSplit() {}
+
+      public RangeInputSplit(long offset, long length) {
+        firstRow = offset;
+        rowCount = length;
+      }
+
+      @Override
+      public long getLength() throws IOException {
+        return 0;
+      }
+
+      @Override
+      public String[] getLocations() throws IOException {
+        return new String[] {};
+      }
+
+      @Override
+      public void readFields(DataInput in) throws IOException {
+        firstRow = WritableUtils.readVLong(in);
+        rowCount = WritableUtils.readVLong(in);
+      }
+
+      @Override
+      public void write(DataOutput out) throws IOException {
+        WritableUtils.writeVLong(out, firstRow);
+        WritableUtils.writeVLong(out, rowCount);
+      }
+    }
+
+    /**
+     * A record reader that will generate a range of numbers.
+     */
+    static class RangeRecordReader extends RecordReader<LongWritable,NullWritable> {
+      long startRow;
+      long finishedRows;
+      long totalRows;
+
+      public RangeRecordReader(RangeInputSplit split) {
+        startRow = split.firstRow;
+        finishedRows = 0;
+        totalRows = split.rowCount;
+      }
+
+      @Override
+      public void close() throws IOException {}
+
+      @Override
+      public float getProgress() throws IOException {
+        return finishedRows / (float) totalRows;
+      }
+
+      @Override
+      public LongWritable getCurrentKey() throws IOException, InterruptedException {
+        return new LongWritable(startRow + finishedRows);
+      }
+
+      @Override
+      public NullWritable getCurrentValue() throws IOException, InterruptedException {
+        return NullWritable.get();
+      }
+
+      @Override
+      public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {}
+
+      @Override
+      public boolean nextKeyValue() throws IOException, InterruptedException {
+        if (finishedRows < totalRows) {
+          ++finishedRows;
+          return true;
+        }
+        return false;
+      }
+    }
+
+    @Override
+    public RecordReader<LongWritable,NullWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
+      // reporter.setStatus("Creating record reader");
+      return new RangeRecordReader((RangeInputSplit) split);
+    }
+
+    /**
+     * Create the desired number of splits, dividing the number of rows between the mappers.
+     */
+    @Override
+    public List<InputSplit> getSplits(JobContext job) {
+      long totalRows = job.getConfiguration().getLong(NUMROWS, 0);
+      int numSplits = job.getConfiguration().getInt(NUMSPLITS, 1);
+      long rowsPerSplit = totalRows / numSplits;
+      System.out.println("Generating " + totalRows + " using " + numSplits + " maps with step of " + rowsPerSplit);
+      ArrayList<InputSplit> splits = new ArrayList<>(numSplits);
+      long currentRow = 0;
+      for (int split = 0; split < numSplits - 1; ++split) {
+        splits.add(new RangeInputSplit(currentRow, rowsPerSplit));
+        currentRow += rowsPerSplit;
+      }
+      splits.add(new RangeInputSplit(currentRow, totalRows - currentRow));
+      System.out.println("Done Generating.");
+      return splits;
+    }
+
+  }
+
+  private static String NUMSPLITS = "terasort.overridesplits";
+  private static String NUMROWS = "terasort.numrows";
+
+  static class RandomGenerator {
+    private long seed = 0;
+    private static final long mask32 = (1l << 32) - 1;
+    /**
+     * The number of iterations separating the precomputed seeds.
+     */
+    private static final int seedSkip = 128 * 1024 * 1024;
+    /**
+     * The precomputed seed values after every seedSkip iterations. There should be enough values so that a 2**32 iterations are covered.
+     */
+    private static final long[] seeds = new long[] {0L, 4160749568L, 4026531840L, 3892314112L, 3758096384L, 3623878656L, 3489660928L, 3355443200L, 3221225472L,
+        3087007744L, 2952790016L, 2818572288L, 2684354560L, 2550136832L, 2415919104L, 2281701376L, 2147483648L, 2013265920L, 1879048192L, 1744830464L,
+        1610612736L, 1476395008L, 1342177280L, 1207959552L, 1073741824L, 939524096L, 805306368L, 671088640L, 536870912L, 402653184L, 268435456L, 134217728L,};
+
+    /**
+     * Start the random number generator on the given iteration.
+     *
+     * @param initalIteration
+     *          the iteration number to start on
+     */
+    RandomGenerator(long initalIteration) {
+      int baseIndex = (int) ((initalIteration & mask32) / seedSkip);
+      seed = seeds[baseIndex];
+      for (int i = 0; i < initalIteration % seedSkip; ++i) {
+        next();
+      }
+    }
+
+    RandomGenerator() {
+      this(0);
+    }
+
+    long next() {
+      seed = (seed * 3141592621l + 663896637) & mask32;
+      return seed;
+    }
+  }
+
+  /**
+   * The Mapper class that given a row number, will generate the appropriate output line.
+   */
+  public static class SortGenMapper extends Mapper<LongWritable,NullWritable,Text,Mutation> {
+    private Text tableName = null;
+    private int minkeylength = 0;
+    private int maxkeylength = 0;
+    private int minvaluelength = 0;
+    private int maxvaluelength = 0;
+
+    private Text key = new Text();
+    private Text value = new Text();
+    private RandomGenerator rand;
+    private byte[] keyBytes; // = new byte[12];
+    private byte[] spaces = "          ".getBytes();
+    private byte[][] filler = new byte[26][];
+    {
+      for (int i = 0; i < 26; ++i) {
+        filler[i] = new byte[10];
+        for (int j = 0; j < 10; ++j) {
+          filler[i][j] = (byte) ('A' + i);
+        }
+      }
+    }
+
+    /**
+     * Add a random key to the text
+     */
+    private Random random = new Random();
+
+    private void addKey() {
+      int range = random.nextInt(maxkeylength - minkeylength + 1);
+      int keylen = range + minkeylength;
+      int keyceil = keylen + (4 - (keylen % 4));
+      keyBytes = new byte[keyceil];
+
+      long temp = 0;
+      for (int i = 0; i < keyceil / 4; i++) {
+        temp = rand.next() / 52;
+        keyBytes[3 + 4 * i] = (byte) (' ' + (temp % 95));
+        temp /= 95;
+        keyBytes[2 + 4 * i] = (byte) (' ' + (temp % 95));
+        temp /= 95;
+        keyBytes[1 + 4 * i] = (byte) (' ' + (temp % 95));
+        temp /= 95;
+        keyBytes[4 * i] = (byte) (' ' + (temp % 95));
+      }
+      key.set(keyBytes, 0, keylen);
+    }
+
+    /**
+     * Add the rowid to the row.
+     */
+    private Text getRowIdString(long rowId) {
+      Text paddedRowIdString = new Text();
+      byte[] rowid = Integer.toString((int) rowId).getBytes();
+      int padSpace = 10 - rowid.length;
+      if (padSpace > 0) {
+        paddedRowIdString.append(spaces, 0, 10 - rowid.length);
+      }
+      paddedRowIdString.append(rowid, 0, Math.min(rowid.length, 10));
+      return paddedRowIdString;
+    }
+
+    /**
+     * Add the required filler bytes. Each row consists of 7 blocks of 10 characters and 1 block of 8 characters.
+     *
+     * @param rowId
+     *          the current row number
+     */
+    private void addFiller(long rowId) {
+      int base = (int) ((rowId * 8) % 26);
+
+      // Get Random var
+      Random random = new Random(rand.seed);
+
+      int range = random.nextInt(maxvaluelength - minvaluelength + 1);
+      int valuelen = range + minvaluelength;
+
+      while (valuelen > 10) {
+        value.append(filler[(base + valuelen) % 26], 0, 10);
+        valuelen -= 10;
+      }
+
+      if (valuelen > 0)
+        value.append(filler[(base + valuelen) % 26], 0, valuelen);
+    }
+
+    @Override
+    public void map(LongWritable row, NullWritable ignored, Context context) throws IOException, InterruptedException {
+      context.setStatus("Entering");
+      long rowId = row.get();
+      if (rand == null) {
+        // we use 3 random numbers per a row
+        rand = new RandomGenerator(rowId * 3);
+      }
+      addKey();
+      value.clear();
+      // addRowId(rowId);
+      addFiller(rowId);
+
+      // New
+      Mutation m = new Mutation(key);
+      m.put(new Text("c"), // column family
+          getRowIdString(rowId), // column qual
+          new Value(value.toString().getBytes())); // data
+
+      context.setStatus("About to add to accumulo");
+      context.write(tableName, m);
+      context.setStatus("Added to accumulo " + key.toString());
+    }
+
+    @Override
+    public void setup(Context job) {
+      minkeylength = job.getConfiguration().getInt("cloudgen.minkeylength", 0);
+      maxkeylength = job.getConfiguration().getInt("cloudgen.maxkeylength", 0);
+      minvaluelength = job.getConfiguration().getInt("cloudgen.minvaluelength", 0);
+      maxvaluelength = job.getConfiguration().getInt("cloudgen.maxvaluelength", 0);
+      tableName = new Text(job.getConfiguration().get("cloudgen.tablename"));
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    ToolRunner.run(new Configuration(), new TeraSortIngest(), args);
+  }
+
+  static class Opts extends MapReduceClientOnRequiredTable {
+    @Parameter(names = "--count", description = "number of rows to ingest", required = true)
+    long numRows;
+    @Parameter(names = {"-nk", "--minKeySize"}, description = "miniumum key size", required = true)
+    int minKeyLength;
+    @Parameter(names = {"-xk", "--maxKeySize"}, description = "maximum key size", required = true)
+    int maxKeyLength;
+    @Parameter(names = {"-nv", "--minValueSize"}, description = "minimum key size", required = true)
+    int minValueLength;
+    @Parameter(names = {"-xv", "--maxValueSize"}, description = "maximum key size", required = true)
+    int maxValueLength;
+    @Parameter(names = "--splits", description = "number of splits to create in the table")
+    int splits = 0;
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {
+    Job job = Job.getInstance(getConf());
+    job.setJobName("TeraSortCloud");
+    job.setJarByClass(this.getClass());
+    Opts opts = new Opts();
+    opts.parseArgs(TeraSortIngest.class.getName(), args);
+
+    job.setInputFormatClass(RangeInputFormat.class);
+    job.setMapperClass(SortGenMapper.class);
+    job.setMapOutputKeyClass(Text.class);
+    job.setMapOutputValueClass(Mutation.class);
+
+    job.setNumReduceTasks(0);
+
+    job.setOutputFormatClass(AccumuloOutputFormat.class);
+    opts.setAccumuloConfigs(job);
+    BatchWriterConfig bwConfig = new BatchWriterConfig().setMaxMemory(10L * 1000 * 1000);
+    AccumuloOutputFormat.setBatchWriterOptions(job, bwConfig);
+
+    Configuration conf = job.getConfiguration();
+    conf.setLong(NUMROWS, opts.numRows);
+    conf.setInt("cloudgen.minkeylength", opts.minKeyLength);
+    conf.setInt("cloudgen.maxkeylength", opts.maxKeyLength);
+    conf.setInt("cloudgen.minvaluelength", opts.minValueLength);
+    conf.setInt("cloudgen.maxvaluelength", opts.maxValueLength);
+    conf.set("cloudgen.tablename", opts.getTableName());
+
+    if (args.length > 10)
+      conf.setInt(NUMSPLITS, opts.splits);
+
+    job.waitForCompletion(true);
+    return job.isSuccessful() ? 0 : 1;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java b/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
index 5bb4ad6..0ba8bf2 100644
--- a/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
@@ -64,8 +64,8 @@ import org.apache.accumulo.core.iterators.user.VersioningIterator;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.ByteBufferUtil;
-import org.apache.accumulo.examples.simple.constraints.MaxMutationSize;
-import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
+import org.apache.accumulo.test.constraints.MaxMutationSize;
+import org.apache.accumulo.test.constraints.NumericValueConstraint;
 import org.apache.accumulo.harness.MiniClusterHarness;
 import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.accumulo.harness.TestingKdc;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyNamespaceOperations.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyNamespaceOperations.java b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyNamespaceOperations.java
index 8dc2990..301145d 100644
--- a/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyNamespaceOperations.java
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyNamespaceOperations.java
@@ -148,12 +148,11 @@ public class TestProxyNamespaceOperations {
 
   @Test
   public void namespaceConstraints() throws TException {
-    int constraintId = tpc.proxy().addNamespaceConstraint(userpass, testnamespace, "org.apache.accumulo.examples.simple.constraints.MaxMutationSize");
-    assertTrue(tpc.proxy().listNamespaceConstraints(userpass, testnamespace).containsKey("org.apache.accumulo.examples.simple.constraints.MaxMutationSize"));
-    assertEquals(constraintId,
-        (int) tpc.proxy().listNamespaceConstraints(userpass, testnamespace).get("org.apache.accumulo.examples.simple.constraints.MaxMutationSize"));
+    int constraintId = tpc.proxy().addNamespaceConstraint(userpass, testnamespace, "org.apache.accumulo.test.constraints.MaxMutationSize");
+    assertTrue(tpc.proxy().listNamespaceConstraints(userpass, testnamespace).containsKey("org.apache.accumulo.test.constraints.MaxMutationSize"));
+    assertEquals(constraintId, (int) tpc.proxy().listNamespaceConstraints(userpass, testnamespace).get("org.apache.accumulo.test.constraints.MaxMutationSize"));
     tpc.proxy().removeNamespaceConstraint(userpass, testnamespace, constraintId);
-    assertFalse(tpc.proxy().listNamespaceConstraints(userpass, testnamespace).containsKey("org.apache.accumulo.examples.simple.constraints.MaxMutationSize"));
+    assertFalse(tpc.proxy().listNamespaceConstraints(userpass, testnamespace).containsKey("org.apache.accumulo.test.constraints.MaxMutationSize"));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/test/java/org/apache/accumulo/test/constraints/AlphaNumKeyConstraintTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/constraints/AlphaNumKeyConstraintTest.java b/test/src/test/java/org/apache/accumulo/test/constraints/AlphaNumKeyConstraintTest.java
new file mode 100644
index 0000000..7729743
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/constraints/AlphaNumKeyConstraintTest.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.constraints;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.ImmutableList;
+
+public class AlphaNumKeyConstraintTest {
+
+  private AlphaNumKeyConstraint ankc = new AlphaNumKeyConstraint();
+
+  @Test
+  public void test() {
+    Mutation goodMutation = new Mutation(new Text("Row1"));
+    goodMutation.put(new Text("Colf2"), new Text("ColQ3"), new Value("value".getBytes()));
+    assertNull(ankc.check(null, goodMutation));
+
+    // Check that violations are in row, cf, cq order
+    Mutation badMutation = new Mutation(new Text("Row#1"));
+    badMutation.put(new Text("Colf$2"), new Text("Colq%3"), new Value("value".getBytes()));
+    assertEquals(ImmutableList.of(AlphaNumKeyConstraint.NON_ALPHA_NUM_ROW, AlphaNumKeyConstraint.NON_ALPHA_NUM_COLF, AlphaNumKeyConstraint.NON_ALPHA_NUM_COLQ),
+        ankc.check(null, badMutation));
+  }
+
+  @Test
+  public void testGetViolationDescription() {
+    assertEquals(AlphaNumKeyConstraint.ROW_VIOLATION_MESSAGE, ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_ROW));
+    assertEquals(AlphaNumKeyConstraint.COLF_VIOLATION_MESSAGE, ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_COLF));
+    assertEquals(AlphaNumKeyConstraint.COLQ_VIOLATION_MESSAGE, ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_COLQ));
+    assertNull(ankc.getViolationDescription((short) 4));
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/test/java/org/apache/accumulo/test/constraints/NumericValueConstraintTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/constraints/NumericValueConstraintTest.java b/test/src/test/java/org/apache/accumulo/test/constraints/NumericValueConstraintTest.java
new file mode 100644
index 0000000..f13fd28
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/constraints/NumericValueConstraintTest.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.constraints;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+
+public class NumericValueConstraintTest {
+
+  private NumericValueConstraint nvc = new NumericValueConstraint();
+
+  @Test
+  public void testCheck() {
+    Mutation goodMutation = new Mutation(new Text("r"));
+    goodMutation.put(new Text("cf"), new Text("cq"), new Value("1234".getBytes()));
+    assertNull(nvc.check(null, goodMutation));
+
+    // Check that multiple bad mutations result in one violation only
+    Mutation badMutation = new Mutation(new Text("r"));
+    badMutation.put(new Text("cf"), new Text("cq"), new Value("foo1234".getBytes()));
+    badMutation.put(new Text("cf2"), new Text("cq2"), new Value("foo1234".getBytes()));
+    assertEquals(NumericValueConstraint.NON_NUMERIC_VALUE, Iterables.getOnlyElement(nvc.check(null, badMutation)).shortValue());
+  }
+
+  @Test
+  public void testGetViolationDescription() {
+    assertEquals(NumericValueConstraint.VIOLATION_MESSAGE, nvc.getViolationDescription(NumericValueConstraint.NON_NUMERIC_VALUE));
+    assertNull(nvc.getViolationDescription((short) 2));
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/system/bench/lib/Benchmark.py
----------------------------------------------------------------------
diff --git a/test/system/bench/lib/Benchmark.py b/test/system/bench/lib/Benchmark.py
index d205e10..1481ccf 100755
--- a/test/system/bench/lib/Benchmark.py
+++ b/test/system/bench/lib/Benchmark.py
@@ -94,9 +94,9 @@ class Benchmark(unittest.TestCase):
         globjar = [ j for j in glob.glob(path) if j.find('javadoc') == -1 and j.find('sources') == -1 ]
         return globjar[0]
         
-    # Returns the location of the local examples jar
-    def getexamplejar(self):
-        return self.findjar(accumulo() + '/lib/accumulo-examples-simple.jar')
+    # Returns the location of the local test jar
+    def gettestjar(self):
+        return self.findjar(accumulo() + '/lib/accumulo-test.jar')
     
     # Returns a string of core, thrift and zookeeper jars with a specified delim
     def getjars(self, delim=','):

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/system/bench/lib/RowHashBenchmark.py
----------------------------------------------------------------------
diff --git a/test/system/bench/lib/RowHashBenchmark.py b/test/system/bench/lib/RowHashBenchmark.py
index 89b0fbb..34009d6 100755
--- a/test/system/bench/lib/RowHashBenchmark.py
+++ b/test/system/bench/lib/RowHashBenchmark.py
@@ -55,7 +55,7 @@ class RowHashBenchmark(Benchmark):
                     self.output_table) 
             self.sleep(15)
         code, out, err = cloudshell.run(self.username, self.password, "createtable %s -sf %s\n" % (self.output_table, file))
-        command = self.buildcommand('org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest',
+        command = self.buildcommand('org.apache.accumulo.test.mapreduce.TeraSortIngest',
                                     '--count', self.numrows(),
                                     '-nk', self.keysizemin(),
                                     '-xk', self.keysizemax(),
@@ -102,7 +102,7 @@ class RowHashBenchmark(Benchmark):
         return self.valmax
         
     def runTest(self):   
-        command = self.buildcommand('org.apache.accumulo.examples.simple.mapreduce.RowHash',
+        command = self.buildcommand('org.apache.accumulo.test.mapreduce.RowHash',
                                     self.getInstance(),
                                     self.getZookeepers(),
                                     self.getUsername(),

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/system/bench/lib/TeraSortBenchmark.py
----------------------------------------------------------------------
diff --git a/test/system/bench/lib/TeraSortBenchmark.py b/test/system/bench/lib/TeraSortBenchmark.py
index 0b1d9f4..f9984b2 100755
--- a/test/system/bench/lib/TeraSortBenchmark.py
+++ b/test/system/bench/lib/TeraSortBenchmark.py
@@ -72,7 +72,7 @@ class TeraSortBenchmark(Benchmark):
         dir = os.path.dirname(os.path.realpath(__file__))
         file = os.path.join( dir, 'splits' )
         code, out, err = cloudshell.run(self.username, self.password, "createtable %s -sf %s\n" % (self.tablename, file))
-        command = self.buildcommand('org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest',
+        command = self.buildcommand('org.apache.accumulo.test.mapreduce.TeraSortIngest',
                                     '--count', self.numrows(),
                                     '-nk', self.keysizemin(),
                                     '-xk', self.keysizemax(),


Mime
View raw message