accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From els...@apache.org
Subject [21/48] Merge branch '1.5.1-SNAPSHOT' into 1.6.0-SNAPSHOT
Date Tue, 04 Feb 2014 17:54:50 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
index 1e824f8,0000000..bf7906f
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
@@@ -1,311 -1,0 +1,312 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import java.util.HashMap;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map.Entry;
 +
++import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.MutationsRejectedException;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.ConstraintViolationSummary;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint;
 +import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class ConstraintIT extends SimpleMacIT {
 +
 +  @Test(timeout = 30 * 1000)
 +  public void run() throws Exception {
 +    String[] tableNames = getTableNames(3);
 +    Connector c = getConnector();
 +    for (String table : tableNames) {
 +      c.tableOperations().create(table);
 +      c.tableOperations().addConstraint(table, NumericValueConstraint.class.getName());
 +      c.tableOperations().addConstraint(table, AlphaNumKeyConstraint.class.getName());
 +    }
 +
 +    // Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
 +    // logger.setLevel(Level.TRACE);
 +
 +    test1(tableNames[0]);
 +
 +    // logger.setLevel(Level.TRACE);
 +
 +    test2(tableNames[1], false);
 +    test2(tableNames[2], true);
 +  }
 +
 +  private void test1(String tableName) throws Exception {
 +    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
 +
 +    Mutation mut1 = new Mutation(new Text("r1"));
-     mut1.put(new Text("cf1"), new Text("cq1"), new Value("123".getBytes()));
++    mut1.put(new Text("cf1"), new Text("cq1"), new Value("123".getBytes(Constants.UTF8)));
 +
 +    bw.addMutation(mut1);
 +
 +    // should not throw any exceptions
 +    bw.close();
 +
 +    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
 +
 +    // create a mutation with a non numeric value
 +    Mutation mut2 = new Mutation(new Text("r1"));
-     mut2.put(new Text("cf1"), new Text("cq1"), new Value("123a".getBytes()));
++    mut2.put(new Text("cf1"), new Text("cq1"), new Value("123a".getBytes(Constants.UTF8)));
 +
 +    bw.addMutation(mut2);
 +
 +    boolean sawMRE = false;
 +
 +    try {
 +      bw.close();
 +      // should not get here
 +      throw new Exception("Test failed, constraint did not catch bad mutation");
 +    } catch (MutationsRejectedException mre) {
 +      sawMRE = true;
 +
 +      // verify constraint violation summary
 +      List<ConstraintViolationSummary> cvsl = mre.getConstraintViolationSummaries();
 +
 +      if (cvsl.size() != 1) {
 +        throw new Exception("Unexpected constraints");
 +      }
 +
 +      for (ConstraintViolationSummary cvs : cvsl) {
 +        if (!cvs.constrainClass.equals(NumericValueConstraint.class.getName())) {
 +          throw new Exception("Unexpected constraint class " + cvs.constrainClass);
 +        }
 +
 +        if (cvs.numberOfViolatingMutations != 1) {
 +          throw new Exception("Unexpected # violating mutations " + cvs.numberOfViolatingMutations);
 +        }
 +      }
 +    }
 +
 +    if (!sawMRE) {
 +      throw new Exception("Did not see MutationsRejectedException");
 +    }
 +
 +    // verify mutation did not go through
 +    Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
 +    scanner.setRange(new Range(new Text("r1")));
 +
 +    Iterator<Entry<Key,Value>> iter = scanner.iterator();
 +    Entry<Key,Value> entry = iter.next();
 +
 +    if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
-         || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123".getBytes()))) {
++        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123".getBytes(Constants.UTF8)))) {
 +      throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    if (iter.hasNext()) {
 +      entry = iter.next();
 +      throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    // remove the numeric value constraint
 +    getConnector().tableOperations().removeConstraint(tableName, 2);
 +    UtilWaitThread.sleep(1000);
 +
 +    // now should be able to add a non numeric value
 +    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
 +    bw.addMutation(mut2);
 +    bw.close();
 +
 +    // verify mutation went through
 +    iter = scanner.iterator();
 +    entry = iter.next();
 +
 +    if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
-         || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123a".getBytes()))) {
++        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123a".getBytes(Constants.UTF8)))) {
 +      throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    if (iter.hasNext()) {
 +      entry = iter.next();
 +      throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    // add a constraint that references a non-existant class
 +    getConnector().tableOperations().setProperty(tableName, Property.TABLE_CONSTRAINT_PREFIX + "1", "com.foobar.nonExistantClass");
 +    UtilWaitThread.sleep(1000);
 +
 +    // add a mutation
 +    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
 +
 +    Mutation mut3 = new Mutation(new Text("r1"));
-     mut3.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes()));
++    mut3.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(Constants.UTF8)));
 +
 +    bw.addMutation(mut3);
 +
 +    sawMRE = false;
 +
 +    try {
 +      bw.close();
 +      // should not get here
 +      throw new Exception("Test failed, mutation went through when table had bad constraints");
 +    } catch (MutationsRejectedException mre) {
 +      sawMRE = true;
 +    }
 +
 +    if (!sawMRE) {
 +      throw new Exception("Did not see MutationsRejectedException");
 +    }
 +
 +    // verify the mutation did not go through
 +    iter = scanner.iterator();
 +    entry = iter.next();
 +
 +    if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
-         || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123a".getBytes()))) {
++        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123a".getBytes(Constants.UTF8)))) {
 +      throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    if (iter.hasNext()) {
 +      entry = iter.next();
 +      throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    // remove the bad constraint
 +    getConnector().tableOperations().removeConstraint(tableName, 1);
 +    UtilWaitThread.sleep(1000);
 +
 +    // try the mutation again
 +    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
 +    bw.addMutation(mut3);
 +    bw.close();
 +
 +    // verify it went through
 +    iter = scanner.iterator();
 +    entry = iter.next();
 +
 +    if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
-         || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("foo".getBytes()))) {
++        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("foo".getBytes(Constants.UTF8)))) {
 +      throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    if (iter.hasNext()) {
 +      entry = iter.next();
 +      throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +  }
 +
 +  private Mutation newMut(String row, String cf, String cq, String val) {
 +    Mutation mut1 = new Mutation(new Text(row));
-     mut1.put(new Text(cf), new Text(cq), new Value(val.getBytes()));
++    mut1.put(new Text(cf), new Text(cq), new Value(val.getBytes(Constants.UTF8)));
 +    return mut1;
 +  }
 +
 +  private void test2(String table, boolean doFlush) throws Exception {
 +    // test sending multiple mutations with multiple constrain violations... all of the non violating mutations
 +    // should go through
 +    int numericErrors = 2;
 +
 +    BatchWriter bw = getConnector().createBatchWriter(table, new BatchWriterConfig());
 +    bw.addMutation(newMut("r1", "cf1", "cq1", "123"));
 +    bw.addMutation(newMut("r1", "cf1", "cq2", "I'm a bad value"));
 +    if (doFlush) {
 +      try {
 +        bw.flush();
 +        throw new Exception("Didn't find a bad mutation");
 +      } catch (MutationsRejectedException mre) {
 +        // ignored
 +        try {
 +          bw.close();
 +        } catch (MutationsRejectedException ex) {
 +          // ignored
 +        }
 +        bw = getConnector().createBatchWriter(table, new BatchWriterConfig());
 +        numericErrors = 1;
 +      }
 +    }
 +    bw.addMutation(newMut("r1", "cf1", "cq3", "I'm a naughty value"));
 +    bw.addMutation(newMut("@bad row@", "cf1", "cq2", "456"));
 +    bw.addMutation(newMut("r1", "cf1", "cq4", "789"));
 +
 +    boolean sawMRE = false;
 +
 +    try {
 +      bw.close();
 +      // should not get here
 +      throw new Exception("Test failed, constraint did not catch bad mutation");
 +    } catch (MutationsRejectedException mre) {
 +      System.out.println(mre);
 +
 +      sawMRE = true;
 +
 +      // verify constraint violation summary
 +      List<ConstraintViolationSummary> cvsl = mre.getConstraintViolationSummaries();
 +
 +      if (cvsl.size() != 2) {
 +        throw new Exception("Unexpected constraints");
 +      }
 +
 +      HashMap<String,Integer> expected = new HashMap<String,Integer>();
 +
 +      expected.put("org.apache.accumulo.examples.simple.constraints.NumericValueConstraint", numericErrors);
 +      expected.put("org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint", 1);
 +
 +      for (ConstraintViolationSummary cvs : cvsl) {
 +        if (expected.get(cvs.constrainClass) != cvs.numberOfViolatingMutations) {
 +          throw new Exception("Unexpected " + cvs.constrainClass + " " + cvs.numberOfViolatingMutations);
 +        }
 +      }
 +    }
 +
 +    if (!sawMRE) {
 +      throw new Exception("Did not see MutationsRejectedException");
 +    }
 +
 +    Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
 +
 +    Iterator<Entry<Key,Value>> iter = scanner.iterator();
 +
 +    Entry<Key,Value> entry = iter.next();
 +
 +    if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
-         || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123".getBytes()))) {
++        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123".getBytes(Constants.UTF8)))) {
 +      throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    entry = iter.next();
 +
 +    if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
-         || !entry.getKey().getColumnQualifier().equals(new Text("cq4")) || !entry.getValue().equals(new Value("789".getBytes()))) {
++        || !entry.getKey().getColumnQualifier().equals(new Text("cq4")) || !entry.getValue().equals(new Value("789".getBytes(Constants.UTF8)))) {
 +      throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    if (iter.hasNext()) {
 +      entry = iter.next();
 +      throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
index 68a6a2e,0000000..e0e93d5
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
@@@ -1,128 -1,0 +1,129 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import java.util.ArrayList;
 +import java.util.Map.Entry;
 +import java.util.SortedSet;
 +import java.util.TreeSet;
 +
++import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchScanner;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class CreateAndUseIT extends SimpleMacIT {
 +
 +  @Test(timeout = 2 * 60 * 1000)
 +  public void run() throws Exception {
 +    SortedSet<Text> splits = new TreeSet<Text>();
 +
 +    for (int i = 1; i < 256; i++) {
 +      splits.add(new Text(String.format("%08x", i << 8)));
 +    }
 +
 +    // TEST 1 create a table and immediately batch write to it
 +
 +    Text cf = new Text("cf1");
 +    Text cq = new Text("cq1");
 +
 +    String[] tableNames = getTableNames(3);
 +    String tableName = tableNames[0];
 +    getConnector().tableOperations().create(tableName);
 +    getConnector().tableOperations().addSplits(tableName, splits);
 +    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
 +
 +    for (int i = 1; i < 257; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%08x", (i << 8) - 16)));
-       m.put(cf, cq, new Value(("" + i).getBytes()));
++      m.put(cf, cq, new Value(Integer.toString(i).getBytes(Constants.UTF8)));
 +
 +      bw.addMutation(m);
 +    }
 +
 +    bw.close();
 +
 +    // verify data is there
 +    Scanner scanner1 = getConnector().createScanner(tableName, Authorizations.EMPTY);
 +
 +    int ei = 1;
 +
 +    for (Entry<Key,Value> entry : scanner1) {
 +      if (!entry.getKey().getRow().toString().equals(String.format("%08x", (ei << 8) - 16))) {
 +        throw new Exception("Expected row " + String.format("%08x", (ei << 8) - 16) + " saw " + entry.getKey().getRow());
 +      }
 +
 +      if (!entry.getValue().toString().equals("" + ei)) {
 +        throw new Exception("Expected value " + ei + " saw " + entry.getValue());
 +      }
 +
 +      ei++;
 +    }
 +
 +    if (ei != 257) {
 +      throw new Exception("Did not see expected number of rows, ei = " + ei);
 +    }
 +
 +    // TEST 2 create a table and immediately scan it
 +    String table2 = tableNames[1];
 +    getConnector().tableOperations().create(table2);
 +    getConnector().tableOperations().addSplits(table2, splits);
 +    Scanner scanner2 = getConnector().createScanner(table2, Authorizations.EMPTY);
 +    int count = 0;
 +    for (Entry<Key,Value> entry : scanner2) {
 +      if (entry != null)
 +        count++;
 +    }
 +
 +    if (count != 0) {
 +      throw new Exception("Did not see expected number of entries, count = " + count);
 +    }
 +
 +    // TEST 3 create a table and immediately batch scan it
 +
 +    ArrayList<Range> ranges = new ArrayList<Range>();
 +    for (int i = 1; i < 257; i++) {
 +      ranges.add(new Range(new Text(String.format("%08x", (i << 8) - 16))));
 +    }
 +
 +    String table3 = tableNames[2];
 +    getConnector().tableOperations().create(table3);
 +    getConnector().tableOperations().addSplits(table3, splits);
 +    BatchScanner bs = getConnector().createBatchScanner(table3, Authorizations.EMPTY, 3);
 +    bs.setRanges(ranges);
 +    count = 0;
 +    for (Entry<Key,Value> entry : bs) {
 +      if (entry != null)
 +        count++;
 +    }
 +
 +    if (count != 0) {
 +      throw new Exception("Did not see expected number of entries, count = " + count);
 +    }
 +
 +    bs.close();
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/test/src/test/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
index 88f936f,0000000..f00a445
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
@@@ -1,93 -1,0 +1,94 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import java.util.Collections;
 +import java.util.Map.Entry;
 +
++import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class DeleteEverythingIT extends ConfigurableMacIT {
 +  
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg) {
 +    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "1s"));
 +  }
 +  
 +  @Test(timeout = 60 * 1000)
 +  public void run() throws Exception {
 +    Connector c = getConnector();
 +    c.tableOperations().create("de");
 +    BatchWriter bw = getConnector().createBatchWriter("de", new BatchWriterConfig());
 +    Mutation m = new Mutation(new Text("foo"));
-     m.put(new Text("bar"), new Text("1910"), new Value("5".getBytes()));
++    m.put(new Text("bar"), new Text("1910"), new Value("5".getBytes(Constants.UTF8)));
 +    bw.addMutation(m);
 +    bw.flush();
 +    
 +    getConnector().tableOperations().flush("de", null, null, true);
 +    
 +    FunctionalTestUtils.checkRFiles(c, "de", 1, 1, 1, 1);
 +    
 +    m = new Mutation(new Text("foo"));
 +    m.putDelete(new Text("bar"), new Text("1910"));
 +    bw.addMutation(m);
 +    bw.flush();
 +    
 +    Scanner scanner = getConnector().createScanner("de", Authorizations.EMPTY);
 +    scanner.setRange(new Range());
 +    
 +    int count = 0;
 +    for (@SuppressWarnings("unused")
 +    Entry<Key,Value> entry : scanner) {
 +      count++;
 +    }
 +    
 +    if (count != 0)
 +      throw new Exception("count == " + count);
 +    
 +    getConnector().tableOperations().flush("de", null, null, true);
 +    
 +    getConnector().tableOperations().setProperty("de", Property.TABLE_MAJC_RATIO.getKey(), "1.0");
 +    UtilWaitThread.sleep(4000);
 +    
 +    FunctionalTestUtils.checkRFiles(c, "de", 1, 1, 0, 0);
 +    
 +    bw.close();
 +    
 +    count = 0;
 +    for (@SuppressWarnings("unused")
 +    Entry<Key,Value> entry : scanner) {
 +      count++;
 +    }
 +    
 +    if (count != 0)
 +      throw new Exception("count == " + count);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
index a08aa2c,0000000..1c26808
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
@@@ -1,137 -1,0 +1,138 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import static org.junit.Assert.assertTrue;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.List;
 +import java.util.Map.Entry;
 +import java.util.SortedSet;
 +import java.util.TreeSet;
 +
++import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.hadoop.io.Text;
 +import org.apache.log4j.Logger;
 +import org.junit.Test;
 +
 +// attempt to reproduce ACCUMULO-315
 +public class DeleteRowsSplitIT extends SimpleMacIT {
 +
 +  private static final Logger log = Logger.getLogger(DeleteRowsSplitIT.class);
 +
 +  private static final String LETTERS = "abcdefghijklmnopqrstuvwxyz";
 +  static final SortedSet<Text> SPLITS = new TreeSet<Text>();
 +  static final List<String> ROWS = new ArrayList<String>();
 +  static {
-     for (byte b : LETTERS.getBytes()) {
++    for (byte b : LETTERS.getBytes(Constants.UTF8)) {
 +      SPLITS.add(new Text(new byte[] {b}));
-       ROWS.add(new String(new byte[] {b}));
++      ROWS.add(new String(new byte[] {b}, Constants.UTF8));
 +    }
 +  }
 +
 +  @Test(timeout = 4 * 60 * 1000)
 +  public void run() throws Exception {
 +    // Delete ranges of rows, and verify the are removed
 +    // Do this while adding many splits
 +    final String tableName = getTableNames(1)[0];
 +
 +    // Eliminate whole tablets
 +    for (int test = 0; test < 10; test++) {
 +      // create a table
 +      log.info("Test " + test);
 +      getConnector().tableOperations().create(tableName);
 +
 +      // put some data in it
 +      fillTable(tableName);
 +
 +      // generate a random delete range
 +      final Text start = new Text();
 +      final Text end = new Text();
 +      generateRandomRange(start, end);
 +
 +      // initiate the delete range
 +      final boolean fail[] = {false};
 +      Thread t = new Thread() {
 +        @Override
 +        public void run() {
 +          try {
 +            // split the table
 +            final SortedSet<Text> afterEnd = SPLITS.tailSet(new Text(end.toString() + "\0"));
 +            getConnector().tableOperations().addSplits(tableName, afterEnd);
 +          } catch (Exception ex) {
 +            log.error(ex, ex);
 +            synchronized (fail) {
 +              fail[0] = true;
 +            }
 +          }
 +        }
 +      };
 +      t.start();
 +
 +      UtilWaitThread.sleep(test * 2);
 +
 +      getConnector().tableOperations().deleteRows(tableName, start, end);
 +
 +      t.join();
 +      synchronized (fail) {
 +        assertTrue(!fail[0]);
 +      }
 +
 +      // scan the table
 +      Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
 +      for (Entry<Key,Value> entry : scanner) {
 +        Text row = entry.getKey().getRow();
 +        assertTrue(row.compareTo(start) <= 0 || row.compareTo(end) > 0);
 +      }
 +
 +      // delete the table
 +      getConnector().tableOperations().delete(tableName);
 +    }
 +  }
 +
 +  private void generateRandomRange(Text start, Text end) {
 +    List<String> bunch = new ArrayList<String>(ROWS);
 +    Collections.shuffle(bunch);
 +    if (bunch.get(0).compareTo((bunch.get(1))) < 0) {
 +      start.set(bunch.get(0));
 +      end.set(bunch.get(1));
 +    } else {
 +      start.set(bunch.get(1));
 +      end.set(bunch.get(0));
 +    }
 +
 +  }
 +
 +  private void fillTable(String table) throws Exception {
 +    BatchWriter bw = getConnector().createBatchWriter(table, new BatchWriterConfig());
 +    for (String row : ROWS) {
 +      Mutation m = new Mutation(row);
 +      m.put("cf", "cq", "value");
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
index 4d622a1,0000000..c311413
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
@@@ -1,168 -1,0 +1,169 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import java.util.Collections;
 +import java.util.Map.Entry;
 +import java.util.Random;
 +import java.util.TreeSet;
 +
++import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.accumulo.test.TestIngest;
 +import org.apache.hadoop.io.Text;
 +import org.apache.log4j.Logger;
 +import org.junit.Test;
 +
 +public class LargeRowIT extends ConfigurableMacIT {
 +  
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg) {
 +    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "10ms"));
 +  }
 +
 +  private static final int SEED = 42;
 +  private static final String REG_TABLE_NAME = "lr";
 +  private static final String PRE_SPLIT_TABLE_NAME = "lrps";
 +  private static final int NUM_ROWS = 100;
 +  private static final int ROW_SIZE = 1 << 17;
 +  private static final int NUM_PRE_SPLITS = 9;
 +  private static final int SPLIT_THRESH = ROW_SIZE * NUM_ROWS / NUM_PRE_SPLITS;
 +  
 +  @Test(timeout = 4 * 60 * 1000)
 +  public void run() throws Exception {
 +    Random r = new Random();
 +    byte rowData[] = new byte[ROW_SIZE];
 +    r.setSeed(SEED + 1);
 +    TreeSet<Text> splitPoints = new TreeSet<Text>();
 +    for (int i = 0; i < NUM_PRE_SPLITS; i++) {
 +      r.nextBytes(rowData);
 +      TestIngest.toPrintableChars(rowData);
 +      splitPoints.add(new Text(rowData));
 +    }
 +    Connector c = getConnector();
 +    c.tableOperations().create(REG_TABLE_NAME);
 +    c.tableOperations().create(PRE_SPLIT_TABLE_NAME);
 +    c.tableOperations().addSplits(PRE_SPLIT_TABLE_NAME, splitPoints);
 +    test1(c);
 +    test2(c);
 +  }
 +  
 +  private void test1(Connector c) throws Exception {
 +    
 +    basicTest(c, REG_TABLE_NAME, 0);
 +    
 +    c.tableOperations().setProperty(REG_TABLE_NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "" + SPLIT_THRESH);
 +    
 +    UtilWaitThread.sleep(12000);
 +    Logger.getLogger(LargeRowIT.class).warn("checking splits");
 +    FunctionalTestUtils.checkSplits(c, REG_TABLE_NAME, NUM_PRE_SPLITS / 2, NUM_PRE_SPLITS * 4);
 +    
 +    verify(c, REG_TABLE_NAME);
 +  }
 +  
 +  private void test2(Connector c) throws Exception {
 +    basicTest(c, PRE_SPLIT_TABLE_NAME, NUM_PRE_SPLITS);
 +  }
 +  
 +  private void basicTest(Connector c, String table, int expectedSplits) throws Exception {
 +    BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
 +    
 +    Random r = new Random();
 +    byte rowData[] = new byte[ROW_SIZE];
 +    
 +    r.setSeed(SEED);
 +    
 +    for (int i = 0; i < NUM_ROWS; i++) {
 +      
 +      r.nextBytes(rowData);
 +      TestIngest.toPrintableChars(rowData);
 +      
 +      Mutation mut = new Mutation(new Text(rowData));
-       mut.put(new Text(""), new Text(""), new Value(("" + i).getBytes()));
++      mut.put(new Text(""), new Text(""), new Value(Integer.toString(i).getBytes(Constants.UTF8)));
 +      bw.addMutation(mut);
 +    }
 +    
 +    bw.close();
 +    
 +    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
 +    
 +    verify(c, table);
 +    
 +    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
 +    
 +    c.tableOperations().flush(table, null, null, false);
 +    
 +    // verify while table flush is running
 +    verify(c, table);
 +    
 +    // give split time to complete
 +    c.tableOperations().flush(table, null, null, true);
 +    
 +    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
 +    
 +    verify(c, table);
 +    
 +    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
 +  }
 +  
 +  private void verify(Connector c, String table) throws Exception {
 +    Random r = new Random();
 +    byte rowData[] = new byte[ROW_SIZE];
 +    
 +    r.setSeed(SEED);
 +    
 +    Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
 +    
 +    for (int i = 0; i < NUM_ROWS; i++) {
 +      
 +      r.nextBytes(rowData);
 +      TestIngest.toPrintableChars(rowData);
 +      
 +      scanner.setRange(new Range(new Text(rowData)));
 +      
 +      int count = 0;
 +      
 +      for (Entry<Key,Value> entry : scanner) {
 +        if (!entry.getKey().getRow().equals(new Text(rowData))) {
 +          throw new Exception("verification failed, unexpected row i =" + i);
 +        }
-         if (!entry.getValue().equals(Integer.toString(i).getBytes())) {
++        if (!entry.getValue().equals(Integer.toString(i).getBytes(Constants.UTF8))) {
 +          throw new Exception("verification failed, unexpected value i =" + i + " value = " + entry.getValue());
 +        }
 +        count++;
 +      }
 +      
 +      if (count != 1) {
 +        throw new Exception("verification failed, unexpected count i =" + i + " count=" + count);
 +      }
 +      
 +    }
 +    
 +  }
 +  
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
index 5a79286,0000000..3102f59
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
@@@ -1,612 -1,0 +1,612 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertNotNull;
 +import static org.junit.Assert.assertNull;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.io.File;
 +import java.io.IOException;
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.Comparator;
 +import java.util.Iterator;
 +import java.util.Map.Entry;
 +import java.util.NoSuchElementException;
 +import java.util.Random;
 +import java.util.TreeMap;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.tserver.NativeMap;
 +import org.apache.hadoop.io.Text;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +public class NativeMapIT {
 +
 +  private Key nk(int r) {
 +    return new Key(new Text(String.format("r%09d", r)));
 +  }
 +
 +  private Key nk(int r, int cf, int cq, int cv, int ts, boolean deleted) {
 +    Key k = new Key(new Text(String.format("r%09d", r)), new Text(String.format("cf%09d", cf)), new Text(String.format("cq%09d", cq)), new Text(String.format(
 +        "cv%09d", cv)), ts);
 +
 +    k.setDeleted(deleted);
 +
 +    return k;
 +  }
 +
 +  private Value nv(int v) {
-     return new Value(String.format("r%09d", v).getBytes());
++    return new Value(String.format("r%09d", v).getBytes(Constants.UTF8));
 +  }
 +
 +  public static File nativeMapLocation() {
 +    File projectDir = new File(System.getProperty("user.dir")).getParentFile();
 +    File nativeMapDir = new File(projectDir, "server/native/target/accumulo-native-" + Constants.VERSION + "/accumulo-native-" + Constants.VERSION);
 +    return nativeMapDir;
 +  }
 +
 +  @BeforeClass
 +  public static void setUp() {
 +    NativeMap.loadNativeLib(Collections.singletonList(nativeMapLocation()));
 +  }
 +
 +  private void verifyIterator(int start, int end, int valueOffset, Iterator<Entry<Key,Value>> iter) {
 +    for (int i = start; i <= end; i++) {
 +      assertTrue(iter.hasNext());
 +      Entry<Key,Value> entry = iter.next();
 +      assertEquals(nk(i), entry.getKey());
 +      assertEquals(nv(i + valueOffset), entry.getValue());
 +    }
 +
 +    assertFalse(iter.hasNext());
 +  }
 +
 +  private void insertAndVerify(NativeMap nm, int start, int end, int valueOffset) {
 +    for (int i = start; i <= end; i++) {
 +      nm.put(nk(i), nv(i + valueOffset));
 +    }
 +
 +    for (int i = start; i <= end; i++) {
 +      Value v = nm.get(nk(i));
 +      assertNotNull(v);
 +      assertEquals(nv(i + valueOffset), v);
 +
 +      Iterator<Entry<Key,Value>> iter2 = nm.iterator(nk(i));
 +      assertTrue(iter2.hasNext());
 +      Entry<Key,Value> entry = iter2.next();
 +      assertEquals(nk(i), entry.getKey());
 +      assertEquals(nv(i + valueOffset), entry.getValue());
 +    }
 +
 +    assertNull(nm.get(nk(start - 1)));
 +
 +    assertNull(nm.get(nk(end + 1)));
 +
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +    verifyIterator(start, end, valueOffset, iter);
 +
 +    for (int i = start; i <= end; i++) {
 +      iter = nm.iterator(nk(i));
 +      verifyIterator(i, end, valueOffset, iter);
 +
 +      // lookup nonexistant key that falls after existing key
 +      iter = nm.iterator(nk(i, 1, 1, 1, 1, false));
 +      verifyIterator(i + 1, end, valueOffset, iter);
 +    }
 +
 +    assertEquals(end - start + 1, nm.size());
 +  }
 +
 +  private void insertAndVerifyExhaustive(NativeMap nm, int num, int run) {
 +    for (int i = 0; i < num; i++) {
 +      for (int j = 0; j < num; j++) {
 +        for (int k = 0; k < num; k++) {
 +          for (int l = 0; l < num; l++) {
 +            for (int ts = 0; ts < num; ts++) {
 +              Key key = nk(i, j, k, l, ts, true);
-               Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes());
++              Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(Constants.UTF8));
 +
 +              nm.put(key, value);
 +
 +              key = nk(i, j, k, l, ts, false);
-               value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes());
++              value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(Constants.UTF8));
 +
 +              nm.put(key, value);
 +            }
 +          }
 +        }
 +      }
 +    }
 +
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +
 +    for (int i = 0; i < num; i++) {
 +      for (int j = 0; j < num; j++) {
 +        for (int k = 0; k < num; k++) {
 +          for (int l = 0; l < num; l++) {
 +            for (int ts = num - 1; ts >= 0; ts--) {
 +              Key key = nk(i, j, k, l, ts, true);
-               Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes());
++              Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(Constants.UTF8));
 +
 +              assertTrue(iter.hasNext());
 +              Entry<Key,Value> entry = iter.next();
 +              assertEquals(key, entry.getKey());
 +              assertEquals(value, entry.getValue());
 +
 +              key = nk(i, j, k, l, ts, false);
-               value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes());
++              value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(Constants.UTF8));
 +
 +              assertTrue(iter.hasNext());
 +              entry = iter.next();
 +              assertEquals(key, entry.getKey());
 +              assertEquals(value, entry.getValue());
 +            }
 +          }
 +        }
 +      }
 +    }
 +
 +    assertFalse(iter.hasNext());
 +
 +    for (int i = 0; i < num; i++) {
 +      for (int j = 0; j < num; j++) {
 +        for (int k = 0; k < num; k++) {
 +          for (int l = 0; l < num; l++) {
 +            for (int ts = 0; ts < num; ts++) {
 +              Key key = nk(i, j, k, l, ts, true);
-               Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes());
++              Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(Constants.UTF8));
 +
 +              assertEquals(value, nm.get(key));
 +
 +              Iterator<Entry<Key,Value>> iter2 = nm.iterator(key);
 +              assertTrue(iter2.hasNext());
 +              Entry<Key,Value> entry = iter2.next();
 +              assertEquals(key, entry.getKey());
 +              assertEquals(value, entry.getValue());
 +
 +              key = nk(i, j, k, l, ts, false);
-               value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes());
++              value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(Constants.UTF8));
 +
 +              assertEquals(value, nm.get(key));
 +
 +              Iterator<Entry<Key,Value>> iter3 = nm.iterator(key);
 +              assertTrue(iter3.hasNext());
 +              Entry<Key,Value> entry2 = iter3.next();
 +              assertEquals(key, entry2.getKey());
 +              assertEquals(value, entry2.getValue());
 +            }
 +          }
 +        }
 +      }
 +    }
 +
 +    assertEquals(num * num * num * num * num * 2, nm.size());
 +  }
 +
 +  @Test
 +  public void test1() {
 +    NativeMap nm = new NativeMap();
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +    assertFalse(iter.hasNext());
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void test2() {
 +    NativeMap nm = new NativeMap();
 +
 +    insertAndVerify(nm, 1, 10, 0);
 +    insertAndVerify(nm, 1, 10, 1);
 +    insertAndVerify(nm, 1, 10, 2);
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void test4() {
 +    NativeMap nm = new NativeMap();
 +
 +    insertAndVerifyExhaustive(nm, 3, 0);
 +    insertAndVerifyExhaustive(nm, 3, 1);
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void test5() {
 +    NativeMap nm = new NativeMap();
 +
 +    insertAndVerify(nm, 1, 10, 0);
 +
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +    iter.next();
 +
 +    nm.delete();
 +
 +    try {
 +      nm.put(nk(1), nv(1));
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +
 +    try {
 +      nm.get(nk(1));
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +
 +    try {
 +      nm.iterator();
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +
 +    try {
 +      nm.iterator(nk(1));
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +
 +    try {
 +      nm.size();
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +
 +    try {
 +      iter.next();
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +
 +  }
 +
 +  @Test
 +  public void test7() {
 +    NativeMap nm = new NativeMap();
 +
 +    insertAndVerify(nm, 1, 10, 0);
 +
 +    nm.delete();
 +
 +    try {
 +      nm.delete();
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +  }
 +
 +  @Test
 +  public void test8() {
 +    // test verifies that native map sorts keys sharing some common prefix properly
 +
 +    NativeMap nm = new NativeMap();
 +
 +    TreeMap<Key,Value> tm = new TreeMap<Key,Value>();
 +
-     tm.put(new Key(new Text("fo")), new Value("0".getBytes()));
-     tm.put(new Key(new Text("foo")), new Value("1".getBytes()));
-     tm.put(new Key(new Text("foo1")), new Value("2".getBytes()));
-     tm.put(new Key(new Text("foo2")), new Value("3".getBytes()));
- 
++    tm.put(new Key(new Text("fo")), new Value(new byte[] {'0'}));
++    tm.put(new Key(new Text("foo")), new Value(new byte[] {'1'}));
++    tm.put(new Key(new Text("foo1")), new Value(new byte[] {'2'}));
++    tm.put(new Key(new Text("foo2")), new Value(new byte[] {'3'}));
++ 
 +    for (Entry<Key,Value> entry : tm.entrySet()) {
 +      nm.put(entry.getKey(), entry.getValue());
 +    }
 +
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +
 +    for (Entry<Key,Value> entry : tm.entrySet()) {
 +      assertTrue(iter.hasNext());
 +      Entry<Key,Value> entry2 = iter.next();
 +
 +      assertEquals(entry.getKey(), entry2.getKey());
 +      assertEquals(entry.getValue(), entry2.getValue());
 +    }
 +
 +    assertFalse(iter.hasNext());
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void test9() {
 +    NativeMap nm = new NativeMap();
 +
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +
 +    try {
 +      iter.next();
 +      assertTrue(false);
 +    } catch (NoSuchElementException e) {
 +
 +    }
 +
 +    insertAndVerify(nm, 1, 1, 0);
 +
 +    iter = nm.iterator();
 +    iter.next();
 +
 +    try {
 +      iter.next();
 +      assertTrue(false);
 +    } catch (NoSuchElementException e) {
 +
 +    }
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void test10() {
 +    int start = 1;
 +    int end = 10000;
 +
 +    NativeMap nm = new NativeMap();
 +    for (int i = start; i <= end; i++) {
 +      nm.put(nk(i), nv(i));
 +    }
 +
 +    long mem1 = nm.getMemoryUsed();
 +
 +    for (int i = start; i <= end; i++) {
 +      nm.put(nk(i), nv(i));
 +    }
 +
 +    long mem2 = nm.getMemoryUsed();
 +
 +    if (mem1 != mem2) {
 +      throw new RuntimeException("Memory changed after inserting duplicate data " + mem1 + " " + mem2);
 +    }
 +
 +    for (int i = start; i <= end; i++) {
 +      nm.put(nk(i), nv(i));
 +    }
 +
 +    long mem3 = nm.getMemoryUsed();
 +
 +    if (mem1 != mem3) {
 +      throw new RuntimeException("Memory changed after inserting duplicate data " + mem1 + " " + mem3);
 +    }
 +
 +    byte bigrow[] = new byte[1000000];
 +    byte bigvalue[] = new byte[bigrow.length];
 +
 +    for (int i = 0; i < bigrow.length; i++) {
 +      bigrow[i] = (byte) (0xff & (i % 256));
 +      bigvalue[i] = bigrow[i];
 +    }
 +
 +    nm.put(new Key(new Text(bigrow)), new Value(bigvalue));
 +
 +    long mem4 = nm.getMemoryUsed();
 +
 +    Value val = nm.get(new Key(new Text(bigrow)));
 +    if (val == null || !val.equals(new Value(bigvalue))) {
 +      throw new RuntimeException("Did not get expected big value");
 +    }
 +
 +    nm.put(new Key(new Text(bigrow)), new Value(bigvalue));
 +
 +    long mem5 = nm.getMemoryUsed();
 +
 +    if (mem4 != mem5) {
 +      throw new RuntimeException("Memory changed after inserting duplicate data " + mem4 + " " + mem5);
 +    }
 +
 +    val = nm.get(new Key(new Text(bigrow)));
 +    if (val == null || !val.equals(new Value(bigvalue))) {
 +      throw new RuntimeException("Did not get expected big value");
 +    }
 +
 +    nm.delete();
 +  }
 +
 +  // random length random field
 +  private static byte[] rlrf(Random r, int maxLen) {
 +    int len = r.nextInt(maxLen);
 +
 +    byte f[] = new byte[len];
 +    r.nextBytes(f);
 +
 +    return f;
 +  }
 +
 +  @Test
 +  public void test11() {
 +    NativeMap nm = new NativeMap();
 +
 +    // insert things with varying field sizes and value sizes
 +
 +    // generate random data
 +    Random r = new Random(75);
 +
 +    ArrayList<Pair<Key,Value>> testData = new ArrayList<Pair<Key,Value>>();
 +
 +    for (int i = 0; i < 100000; i++) {
 +
 +      Key k = new Key(rlrf(r, 97), rlrf(r, 13), rlrf(r, 31), rlrf(r, 11), (r.nextLong() & 0x7fffffffffffffffl), false, false);
 +      Value v = new Value(rlrf(r, 511));
 +
 +      testData.add(new Pair<Key,Value>(k, v));
 +    }
 +
 +    // insert unsorted data
 +    for (Pair<Key,Value> pair : testData) {
 +      nm.put(pair.getFirst(), pair.getSecond());
 +    }
 +
 +    for (int i = 0; i < 2; i++) {
 +
 +      // sort data
 +      Collections.sort(testData, new Comparator<Pair<Key,Value>>() {
 +        @Override
 +        public int compare(Pair<Key,Value> o1, Pair<Key,Value> o2) {
 +          return o1.getFirst().compareTo(o2.getFirst());
 +        }
 +      });
 +
 +      // verify
 +      Iterator<Entry<Key,Value>> iter1 = nm.iterator();
 +      Iterator<Pair<Key,Value>> iter2 = testData.iterator();
 +
 +      while (iter1.hasNext() && iter2.hasNext()) {
 +        Entry<Key,Value> e = iter1.next();
 +        Pair<Key,Value> p = iter2.next();
 +
 +        if (!e.getKey().equals(p.getFirst()))
 +          throw new RuntimeException("Keys not equal");
 +
 +        if (!e.getValue().equals(p.getSecond()))
 +          throw new RuntimeException("Values not equal");
 +      }
 +
 +      if (iter1.hasNext())
 +        throw new RuntimeException("Not all of native map consumed");
 +
 +      if (iter2.hasNext())
 +        throw new RuntimeException("Not all of test data consumed");
 +
 +      System.out.println("test 11 nm mem " + nm.getMemoryUsed());
 +
 +      // insert data again w/ different value
 +      Collections.shuffle(testData, r);
 +      // insert unsorted data
 +      for (Pair<Key,Value> pair : testData) {
 +        pair.getSecond().set(rlrf(r, 511));
 +        nm.put(pair.getFirst(), pair.getSecond());
 +      }
 +    }
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void testBinary() {
 +    NativeMap nm = new NativeMap();
 +
 +    byte emptyBytes[] = new byte[0];
 +
 +    for (int i = 0; i < 256; i++) {
 +      for (int j = 0; j < 256; j++) {
 +        byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
 +        byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
 +
 +        Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
 +        Value v = new Value(data);
 +
 +        nm.put(k, v);
 +      }
 +    }
 +
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +    for (int i = 0; i < 256; i++) {
 +      for (int j = 0; j < 256; j++) {
 +        byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
 +        byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
 +
 +        Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
 +        Value v = new Value(data);
 +
 +        assertTrue(iter.hasNext());
 +        Entry<Key,Value> entry = iter.next();
 +
 +        assertEquals(k, entry.getKey());
 +        assertEquals(v, entry.getValue());
 +
 +      }
 +    }
 +
 +    assertFalse(iter.hasNext());
 +
 +    for (int i = 0; i < 256; i++) {
 +      for (int j = 0; j < 256; j++) {
 +        byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
 +        byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
 +
 +        Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
 +        Value v = new Value(data);
 +
 +        Value v2 = nm.get(k);
 +
 +        assertEquals(v, v2);
 +      }
 +    }
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void testEmpty() {
 +    NativeMap nm = new NativeMap();
 +
 +    assertTrue(nm.size() == 0);
 +    assertTrue(nm.getMemoryUsed() == 0);
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void testConcurrentIter() throws IOException {
 +    NativeMap nm = new NativeMap();
 +
 +    nm.put(nk(0), nv(0));
 +    nm.put(nk(1), nv(1));
 +    nm.put(nk(3), nv(3));
 +
 +    SortedKeyValueIterator<Key,Value> iter = nm.skvIterator();
 +
 +    // modify map after iter created
 +    nm.put(nk(2), nv(2));
 +
 +    assertTrue(iter.hasTop());
 +    assertEquals(iter.getTopKey(), nk(0));
 +    iter.next();
 +
 +    assertTrue(iter.hasTop());
 +    assertEquals(iter.getTopKey(), nk(1));
 +    iter.next();
 +
 +    assertTrue(iter.hasTop());
 +    assertEquals(iter.getTopKey(), nk(2));
 +    iter.next();
 +
 +    assertTrue(iter.hasTop());
 +    assertEquals(iter.getTopKey(), nk(3));
 +    iter.next();
 +
 +    assertFalse(iter.hasTop());
 +
 +    nm.delete();
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
index 3264bd1,0000000..71b9964
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
@@@ -1,126 -1,0 +1,127 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashSet;
 +import java.util.Map.Entry;
 +
++import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchScanner;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.ScannerBase;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class ScanIteratorIT extends SimpleMacIT {
 +
 +  @Test(timeout = 30 * 1000)
 +  public void run() throws Exception {
 +    String tableName = getTableNames(1)[0];
 +    Connector c = getConnector();
 +    c.tableOperations().create(tableName);
 +
 +    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
 +
 +    for (int i = 0; i < 1000; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%06d", i)));
-       m.put(new Text("cf1"), new Text("cq1"), new Value(("" + (1000 - i)).getBytes()));
-       m.put(new Text("cf1"), new Text("cq2"), new Value(("" + (i - 1000)).getBytes()));
++      m.put(new Text("cf1"), new Text("cq1"), new Value(Integer.toString(1000 - i).getBytes(Constants.UTF8)));
++      m.put(new Text("cf1"), new Text("cq2"), new Value(Integer.toString(i - 1000).getBytes(Constants.UTF8)));
 +
 +      bw.addMutation(m);
 +    }
 +
 +    bw.close();
 +
 +    Scanner scanner = c.createScanner(tableName, new Authorizations());
 +
 +    setupIter(scanner);
 +    verify(scanner, 1, 999);
 +
 +    BatchScanner bscanner = c.createBatchScanner(tableName, new Authorizations(), 3);
 +    bscanner.setRanges(Collections.singleton(new Range((Key) null, null)));
 +
 +    setupIter(bscanner);
 +    verify(bscanner, 1, 999);
 +
 +    ArrayList<Range> ranges = new ArrayList<Range>();
 +    ranges.add(new Range(new Text(String.format("%06d", 1))));
 +    ranges.add(new Range(new Text(String.format("%06d", 6)), new Text(String.format("%06d", 16))));
 +    ranges.add(new Range(new Text(String.format("%06d", 20))));
 +    ranges.add(new Range(new Text(String.format("%06d", 23))));
 +    ranges.add(new Range(new Text(String.format("%06d", 56)), new Text(String.format("%06d", 61))));
 +    ranges.add(new Range(new Text(String.format("%06d", 501)), new Text(String.format("%06d", 504))));
 +    ranges.add(new Range(new Text(String.format("%06d", 998)), new Text(String.format("%06d", 1000))));
 +
 +    HashSet<Integer> got = new HashSet<Integer>();
 +    HashSet<Integer> expected = new HashSet<Integer>();
 +    for (int i : new int[] {1, 7, 9, 11, 13, 15, 23, 57, 59, 61, 501, 503, 999}) {
 +      expected.add(i);
 +    }
 +
 +    bscanner.setRanges(ranges);
 +
 +    for (Entry<Key,Value> entry : bscanner) {
 +      got.add(Integer.parseInt(entry.getKey().getRow().toString()));
 +    }
 +
 +    System.out.println("got : " + got);
 +
 +    if (!got.equals(expected)) {
 +      throw new Exception(got + " != " + expected);
 +    }
 +
 +    bscanner.close();
 +
 +  }
 +
 +  private void verify(Iterable<Entry<Key,Value>> scanner, int start, int finish) throws Exception {
 +
 +    int expected = start;
 +    for (Entry<Key,Value> entry : scanner) {
 +      if (Integer.parseInt(entry.getKey().getRow().toString()) != expected) {
 +        throw new Exception("Saw unexpexted " + entry.getKey().getRow() + " " + expected);
 +      }
 +
 +      if (entry.getKey().getColumnQualifier().toString().equals("cq2")) {
 +        expected += 2;
 +      }
 +    }
 +
 +    if (expected != finish + 2) {
 +      throw new Exception("Ended at " + expected + " not " + (finish + 2));
 +    }
 +  }
 +
 +  private void setupIter(ScannerBase scanner) throws Exception {
 +    IteratorSetting dropMod = new IteratorSetting(50, "dropMod", "org.apache.accumulo.test.functional.DropModIter");
 +    dropMod.addOption("mod", "2");
 +    dropMod.addOption("drop", "0");
 +    scanner.addScanIterator(dropMod);
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
index e034740,0000000..dafbb7f
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
@@@ -1,236 -1,0 +1,237 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import java.util.Map.Entry;
 +import java.util.TreeSet;
 +
++import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class ScanRangeIT extends SimpleMacIT {
 +
 +  private static final int TS_LIMIT = 1;
 +  private static final int CQ_LIMIT = 5;
 +  private static final int CF_LIMIT = 5;
 +  private static final int ROW_LIMIT = 100;
 +
 +  @Test(timeout = 2 * 60 * 1000)
 +  public void run() throws Exception {
 +    Connector c = getConnector();
 +    String[] tableNames = getTableNames(2);
 +    String table1 = tableNames[0];
 +    c.tableOperations().create(table1);
 +    String table2 = tableNames[1];
 +    c.tableOperations().create(table2);
 +    TreeSet<Text> splitRows = new TreeSet<Text>();
 +    int splits = 3;
 +    for (int i = (ROW_LIMIT / splits); i < ROW_LIMIT; i += (ROW_LIMIT / splits))
 +      splitRows.add(createRow(i));
 +    c.tableOperations().addSplits(table2, splitRows);
 +
 +    insertData(c, table1);
 +    scanTable(c, table1);
 +
 +    insertData(c, table2);
 +    scanTable(c, table2);
 +  }
 +
 +  private void scanTable(Connector c, String table) throws Exception {
 +    scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(1, 0, 0, 0));
 +
 +    scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
 +
 +    scanRange(c, table, null, null);
 +
 +    for (int i = 0; i < ROW_LIMIT; i += (ROW_LIMIT / 3)) {
 +      for (int j = 0; j < CF_LIMIT; j += (CF_LIMIT / 2)) {
 +        for (int k = 1; k < CQ_LIMIT; k += (CQ_LIMIT / 2)) {
 +          scanRange(c, table, null, new IntKey(i, j, k, 0));
 +          scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(i, j, k, 0));
 +
 +          scanRange(c, table, new IntKey(i, j, k, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
 +
 +          scanRange(c, table, new IntKey(i, j, k, 0), null);
 +
 +        }
 +      }
 +    }
 +
 +    for (int i = 0; i < ROW_LIMIT; i++) {
 +      scanRange(c, table, new IntKey(i, 0, 0, 0), new IntKey(i, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
 +
 +      if (i > 0 && i < ROW_LIMIT - 1) {
 +        scanRange(c, table, new IntKey(i - 1, 0, 0, 0), new IntKey(i + 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
 +      }
 +    }
 +
 +  }
 +
 +  private static class IntKey {
 +    private int row;
 +    private int cf;
 +    private int cq;
 +    private long ts;
 +
 +    IntKey(IntKey ik) {
 +      this.row = ik.row;
 +      this.cf = ik.cf;
 +      this.cq = ik.cq;
 +      this.ts = ik.ts;
 +    }
 +
 +    IntKey(int row, int cf, int cq, long ts) {
 +      this.row = row;
 +      this.cf = cf;
 +      this.cq = cq;
 +      this.ts = ts;
 +    }
 +
 +    Key createKey() {
 +      Text trow = createRow(row);
 +      Text tcf = createCF(cf);
 +      Text tcq = createCQ(cq);
 +
 +      return new Key(trow, tcf, tcq, ts);
 +    }
 +
 +    IntKey increment() {
 +
 +      IntKey ik = new IntKey(this);
 +
 +      ik.ts++;
 +      if (ik.ts >= TS_LIMIT) {
 +        ik.ts = 0;
 +        ik.cq++;
 +        if (ik.cq >= CQ_LIMIT) {
 +          ik.cq = 0;
 +          ik.cf++;
 +          if (ik.cf >= CF_LIMIT) {
 +            ik.cf = 0;
 +            ik.row++;
 +          }
 +        }
 +      }
 +
 +      return ik;
 +    }
 +
 +  }
 +
 +  private void scanRange(Connector c, String table, IntKey ik1, IntKey ik2) throws Exception {
 +    scanRange(c, table, ik1, false, ik2, false);
 +    scanRange(c, table, ik1, false, ik2, true);
 +    scanRange(c, table, ik1, true, ik2, false);
 +    scanRange(c, table, ik1, true, ik2, true);
 +  }
 +
 +  private void scanRange(Connector c, String table, IntKey ik1, boolean inclusive1, IntKey ik2, boolean inclusive2) throws Exception {
 +    Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
 +
 +    Key key1 = null;
 +    Key key2 = null;
 +
 +    IntKey expectedIntKey;
 +    IntKey expectedEndIntKey;
 +
 +    if (ik1 != null) {
 +      key1 = ik1.createKey();
 +      expectedIntKey = ik1;
 +
 +      if (!inclusive1) {
 +        expectedIntKey = expectedIntKey.increment();
 +      }
 +    } else {
 +      expectedIntKey = new IntKey(0, 0, 0, 0);
 +    }
 +
 +    if (ik2 != null) {
 +      key2 = ik2.createKey();
 +      expectedEndIntKey = ik2;
 +
 +      if (inclusive2) {
 +        expectedEndIntKey = expectedEndIntKey.increment();
 +      }
 +    } else {
 +      expectedEndIntKey = new IntKey(ROW_LIMIT, 0, 0, 0);
 +    }
 +
 +    Range range = new Range(key1, inclusive1, key2, inclusive2);
 +
 +    scanner.setRange(range);
 +
 +    for (Entry<Key,Value> entry : scanner) {
 +
 +      Key expectedKey = expectedIntKey.createKey();
 +      if (!expectedKey.equals(entry.getKey())) {
 +        throw new Exception(" " + expectedKey + " != " + entry.getKey());
 +      }
 +
 +      expectedIntKey = expectedIntKey.increment();
 +    }
 +
 +    if (!expectedIntKey.createKey().equals(expectedEndIntKey.createKey())) {
 +      throw new Exception(" " + expectedIntKey.createKey() + " != " + expectedEndIntKey.createKey());
 +    }
 +  }
 +
 +  private static Text createCF(int cf) {
 +    Text tcf = new Text(String.format("cf_%03d", cf));
 +    return tcf;
 +  }
 +
 +  private static Text createCQ(int cf) {
 +    Text tcf = new Text(String.format("cq_%03d", cf));
 +    return tcf;
 +  }
 +
 +  private static Text createRow(int row) {
 +    Text trow = new Text(String.format("r_%06d", row));
 +    return trow;
 +  }
 +
 +  private void insertData(Connector c, String table) throws Exception {
 +
 +    BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
 +
 +    for (int i = 0; i < ROW_LIMIT; i++) {
 +      Mutation m = new Mutation(createRow(i));
 +
 +      for (int j = 0; j < CF_LIMIT; j++) {
 +        for (int k = 0; k < CQ_LIMIT; k++) {
 +          for (int t = 0; t < TS_LIMIT; t++) {
-             m.put(createCF(j), createCQ(k), t, new Value(String.format("%06d_%03d_%03d_%03d", i, j, k, t).getBytes()));
++            m.put(createCF(j), createCQ(k), t, new Value(String.format("%06d_%03d_%03d_%03d", i, j, k, t).getBytes(Constants.UTF8)));
 +          }
 +        }
 +      }
 +
 +      bw.addMutation(m);
 +    }
 +
 +    bw.close();
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
index 9e114a0,0000000..b223845
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
@@@ -1,104 -1,0 +1,105 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import java.util.Collections;
 +import java.util.Iterator;
 +import java.util.Map.Entry;
 +
++import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class ScanSessionTimeOutIT extends ConfigurableMacIT {
 +  
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg) {
 +    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_SESSION_MAXIDLE.getKey(), "3"));
 +  }
 +
 +  @Test(timeout = 60 * 1000)
 +  public void run() throws Exception {
 +    Connector c = getConnector();
 +    c.tableOperations().create("abc");
 +    
 +    BatchWriter bw = c.createBatchWriter("abc", new BatchWriterConfig());
 +    
 +    for (int i = 0; i < 100000; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%08d", i)));
 +      for (int j = 0; j < 3; j++)
-         m.put(new Text("cf1"), new Text("cq" + j), new Value(("" + i + "_" + j).getBytes()));
++        m.put(new Text("cf1"), new Text("cq" + j), new Value((i + "_" + j).getBytes(Constants.UTF8)));
 +      
 +      bw.addMutation(m);
 +    }
 +    
 +    bw.close();
 +    
 +    Scanner scanner = c.createScanner("abc", new Authorizations());
 +    scanner.setBatchSize(1000);
 +    
 +    Iterator<Entry<Key,Value>> iter = scanner.iterator();
 +    
 +    verify(iter, 0, 200);
 +    
 +    // sleep three times the session timeout
 +    UtilWaitThread.sleep(9000);
 +    
 +    verify(iter, 200, 100000);
 +    
 +  }
 +  
 +  private void verify(Iterator<Entry<Key,Value>> iter, int start, int stop) throws Exception {
 +    for (int i = start; i < stop; i++) {
 +      
 +      Text er = new Text(String.format("%08d", i));
 +      
 +      for (int j = 0; j < 3; j++) {
 +        Entry<Key,Value> entry = iter.next();
 +        
 +        if (!entry.getKey().getRow().equals(er)) {
 +          throw new Exception("row " + entry.getKey().getRow() + " != " + er);
 +        }
 +        
 +        if (!entry.getKey().getColumnFamily().equals(new Text("cf1"))) {
 +          throw new Exception("cf " + entry.getKey().getColumnFamily() + " != cf1");
 +        }
 +        
 +        if (!entry.getKey().getColumnQualifier().equals(new Text("cq" + j))) {
 +          throw new Exception("cq " + entry.getKey().getColumnQualifier() + " != cq" + j);
 +        }
 +        
 +        if (!entry.getValue().toString().equals("" + i + "_" + j)) {
 +          throw new Exception("value " + entry.getValue() + " != " + i + "_" + j);
 +        }
 +        
 +      }
 +    }
 +    
 +  }
 +  
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7688eaf0/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
index cf7726e,0000000..e6cf72b
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
@@@ -1,122 -1,0 +1,122 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
 +import java.util.Collections;
 +import java.util.Map.Entry;
 +
 +import org.apache.accumulo.core.client.BatchScanner;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.admin.TableOperations;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.Combiner;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class ServerSideErrorIT extends SimpleMacIT {
 +
 +  @Test(timeout = 2 * 60 * 1000)
 +  public void run() throws Exception {
 +    Connector c = getConnector();
 +    String tableName = getTableNames(1)[0];
 +    c.tableOperations().create(tableName);
 +    IteratorSetting is = new IteratorSetting(5, "Bad Aggregator", BadCombiner.class);
 +    Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("acf")));
 +    c.tableOperations().attachIterator(tableName, is);
 +
 +    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
 +
 +    Mutation m = new Mutation(new Text("r1"));
-     m.put(new Text("acf"), new Text("foo"), new Value("1".getBytes()));
++    m.put(new Text("acf"), new Text("foo"), new Value(new byte[] {'1'}));
 +
 +    bw.addMutation(m);
 +
 +    bw.close();
 +
 +    // try to scan table
 +    Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
 +
 +    boolean caught = false;
 +    try {
 +      for (Entry<Key,Value> entry : scanner) {
 +        entry.getKey();
 +      }
 +    } catch (Exception e) {
 +      caught = true;
 +    }
 +
 +    if (!caught)
 +      throw new Exception("Scan did not fail");
 +
 +    // try to batch scan the table
 +    BatchScanner bs = c.createBatchScanner(tableName, Authorizations.EMPTY, 2);
 +    bs.setRanges(Collections.singleton(new Range()));
 +
 +    caught = false;
 +    try {
 +      for (Entry<Key,Value> entry : bs) {
 +        entry.getKey();
 +      }
 +    } catch (Exception e) {
 +      caught = true;
 +    } finally {
 +      bs.close();
 +    }
 +
 +    if (!caught)
 +      throw new Exception("batch scan did not fail");
 +
 +    // remove the bad agg so accumulo can shutdown
 +    TableOperations to = c.tableOperations();
 +    for (Entry<String,String> e : to.getProperties(tableName)) {
 +      to.removeProperty(tableName, e.getKey());
 +    }
 +
 +    UtilWaitThread.sleep(500);
 +
 +    // should be able to scan now
 +    scanner = c.createScanner(tableName, Authorizations.EMPTY);
 +    for (Entry<Key,Value> entry : scanner) {
 +      entry.getKey();
 +    }
 +
 +    // set a non existant iterator, should cause scan to fail on server side
 +    scanner.addScanIterator(new IteratorSetting(100, "bogus", "com.bogus.iterator"));
 +
 +    caught = false;
 +    try {
 +      for (Entry<Key,Value> entry : scanner) {
 +        // should error
 +        entry.getKey();
 +      }
 +    } catch (Exception e) {
 +      caught = true;
 +    }
 +
 +    if (!caught)
 +      throw new Exception("Scan did not fail");
 +  }
 +}


Mime
View raw message