accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [22/50] [abbrv] Merge branch '1.5' into 1.6
Date Sat, 01 Nov 2014 04:57:16 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
index c07ed1c,0000000..d77d060
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
@@@ -1,193 -1,0 +1,194 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.util.Collections;
 +import java.util.Map.Entry;
 +import java.util.Random;
 +import java.util.TreeSet;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.accumulo.minicluster.MemoryUnit;
 +import org.apache.accumulo.minicluster.ServerType;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.accumulo.test.TestIngest;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.io.Text;
 +import org.apache.log4j.Logger;
 +import org.junit.Assert;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +public class LargeRowIT extends ConfigurableMacIT {
 +
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    cfg.setMemory(ServerType.TABLET_SERVER, cfg.getMemory(ServerType.TABLET_SERVER) * 2, MemoryUnit.BYTE);
 +    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "10ms"));
 +  }
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 4 * 60;
 +  }
 +
 +  private static final int SEED = 42;
 +  private static final String REG_TABLE_NAME = "lr";
 +  private static final String PRE_SPLIT_TABLE_NAME = "lrps";
 +  private static final int NUM_ROWS = 100;
 +  private static final int ROW_SIZE = 1 << 17;
 +  private static final int NUM_PRE_SPLITS = 9;
 +  private static final int SPLIT_THRESH = ROW_SIZE * NUM_ROWS / NUM_PRE_SPLITS;
 +
 +  private int timeoutFactor = 1;
 +
 +  @Before
 +  public void getTimeoutFactor() {
 +    try {
 +      timeoutFactor = Integer.parseInt(System.getProperty("timeout.factor"));
 +    } catch (NumberFormatException e) {
 +      log.warn("Could not parse property value for 'timeout.factor' as integer: " + System.getProperty("timeout.factor"));
 +    }
 +
 +    Assert.assertTrue("Timeout factor must be greater than or equal to 1", timeoutFactor >= 1);
 +  }
 +
 +  @Test
 +  public void run() throws Exception {
 +    Random r = new Random();
 +    byte rowData[] = new byte[ROW_SIZE];
 +    r.setSeed(SEED + 1);
 +    TreeSet<Text> splitPoints = new TreeSet<Text>();
 +    for (int i = 0; i < NUM_PRE_SPLITS; i++) {
 +      r.nextBytes(rowData);
 +      TestIngest.toPrintableChars(rowData);
 +      splitPoints.add(new Text(rowData));
 +    }
 +    Connector c = getConnector();
 +    c.tableOperations().create(REG_TABLE_NAME);
 +    c.tableOperations().create(PRE_SPLIT_TABLE_NAME);
 +    c.tableOperations().addSplits(PRE_SPLIT_TABLE_NAME, splitPoints);
 +    test1(c);
 +    test2(c);
 +  }
 +
 +  private void test1(Connector c) throws Exception {
 +
 +    basicTest(c, REG_TABLE_NAME, 0);
 +
 +    c.tableOperations().setProperty(REG_TABLE_NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "" + SPLIT_THRESH);
 +
 +    UtilWaitThread.sleep(timeoutFactor * 12000);
 +    Logger.getLogger(LargeRowIT.class).warn("checking splits");
 +    FunctionalTestUtils.checkSplits(c, REG_TABLE_NAME, NUM_PRE_SPLITS / 2, NUM_PRE_SPLITS * 4);
 +
 +    verify(c, REG_TABLE_NAME);
 +  }
 +
 +  private void test2(Connector c) throws Exception {
 +    basicTest(c, PRE_SPLIT_TABLE_NAME, NUM_PRE_SPLITS);
 +  }
 +
 +  private void basicTest(Connector c, String table, int expectedSplits) throws Exception {
 +    BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
 +
 +    Random r = new Random();
 +    byte rowData[] = new byte[ROW_SIZE];
 +
 +    r.setSeed(SEED);
 +
 +    for (int i = 0; i < NUM_ROWS; i++) {
 +
 +      r.nextBytes(rowData);
 +      TestIngest.toPrintableChars(rowData);
 +
 +      Mutation mut = new Mutation(new Text(rowData));
-       mut.put(new Text(""), new Text(""), new Value(Integer.toString(i).getBytes(Constants.UTF8)));
++      mut.put(new Text(""), new Text(""), new Value(Integer.toString(i).getBytes(UTF_8)));
 +      bw.addMutation(mut);
 +    }
 +
 +    bw.close();
 +
 +    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
 +
 +    verify(c, table);
 +
 +    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
 +
 +    c.tableOperations().flush(table, null, null, false);
 +
 +    // verify while table flush is running
 +    verify(c, table);
 +
 +    // give split time to complete
 +    c.tableOperations().flush(table, null, null, true);
 +
 +    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
 +
 +    verify(c, table);
 +
 +    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
 +  }
 +
 +  private void verify(Connector c, String table) throws Exception {
 +    Random r = new Random();
 +    byte rowData[] = new byte[ROW_SIZE];
 +
 +    r.setSeed(SEED);
 +
 +    Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
 +
 +    for (int i = 0; i < NUM_ROWS; i++) {
 +
 +      r.nextBytes(rowData);
 +      TestIngest.toPrintableChars(rowData);
 +
 +      scanner.setRange(new Range(new Text(rowData)));
 +
 +      int count = 0;
 +
 +      for (Entry<Key,Value> entry : scanner) {
 +        if (!entry.getKey().getRow().equals(new Text(rowData))) {
 +          throw new Exception("verification failed, unexpected row i =" + i);
 +        }
-         if (!entry.getValue().equals(Integer.toString(i).getBytes(Constants.UTF8))) {
++        if (!entry.getValue().equals(Integer.toString(i).getBytes(UTF_8))) {
 +          throw new Exception("verification failed, unexpected value i =" + i + " value = " + entry.getValue());
 +        }
 +        count++;
 +      }
 +
 +      if (count != 1) {
 +        throw new Exception("verification failed, unexpected count i =" + i + " count=" + count);
 +      }
 +
 +    }
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
index 3102f59,0000000..f4bee97
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
@@@ -1,612 -1,0 +1,613 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.base.Charsets.UTF_8;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertNotNull;
 +import static org.junit.Assert.assertNull;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.io.File;
 +import java.io.IOException;
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.Comparator;
 +import java.util.Iterator;
 +import java.util.Map.Entry;
 +import java.util.NoSuchElementException;
 +import java.util.Random;
 +import java.util.TreeMap;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.tserver.NativeMap;
 +import org.apache.hadoop.io.Text;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +public class NativeMapIT {
 +
 +  private Key nk(int r) {
 +    return new Key(new Text(String.format("r%09d", r)));
 +  }
 +
 +  private Key nk(int r, int cf, int cq, int cv, int ts, boolean deleted) {
 +    Key k = new Key(new Text(String.format("r%09d", r)), new Text(String.format("cf%09d", cf)), new Text(String.format("cq%09d", cq)), new Text(String.format(
 +        "cv%09d", cv)), ts);
 +
 +    k.setDeleted(deleted);
 +
 +    return k;
 +  }
 +
 +  private Value nv(int v) {
-     return new Value(String.format("r%09d", v).getBytes(Constants.UTF8));
++    return new Value(String.format("r%09d", v).getBytes(UTF_8));
 +  }
 +
 +  public static File nativeMapLocation() {
 +    File projectDir = new File(System.getProperty("user.dir")).getParentFile();
 +    File nativeMapDir = new File(projectDir, "server/native/target/accumulo-native-" + Constants.VERSION + "/accumulo-native-" + Constants.VERSION);
 +    return nativeMapDir;
 +  }
 +
 +  @BeforeClass
 +  public static void setUp() {
 +    NativeMap.loadNativeLib(Collections.singletonList(nativeMapLocation()));
 +  }
 +
 +  private void verifyIterator(int start, int end, int valueOffset, Iterator<Entry<Key,Value>> iter) {
 +    for (int i = start; i <= end; i++) {
 +      assertTrue(iter.hasNext());
 +      Entry<Key,Value> entry = iter.next();
 +      assertEquals(nk(i), entry.getKey());
 +      assertEquals(nv(i + valueOffset), entry.getValue());
 +    }
 +
 +    assertFalse(iter.hasNext());
 +  }
 +
 +  private void insertAndVerify(NativeMap nm, int start, int end, int valueOffset) {
 +    for (int i = start; i <= end; i++) {
 +      nm.put(nk(i), nv(i + valueOffset));
 +    }
 +
 +    for (int i = start; i <= end; i++) {
 +      Value v = nm.get(nk(i));
 +      assertNotNull(v);
 +      assertEquals(nv(i + valueOffset), v);
 +
 +      Iterator<Entry<Key,Value>> iter2 = nm.iterator(nk(i));
 +      assertTrue(iter2.hasNext());
 +      Entry<Key,Value> entry = iter2.next();
 +      assertEquals(nk(i), entry.getKey());
 +      assertEquals(nv(i + valueOffset), entry.getValue());
 +    }
 +
 +    assertNull(nm.get(nk(start - 1)));
 +
 +    assertNull(nm.get(nk(end + 1)));
 +
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +    verifyIterator(start, end, valueOffset, iter);
 +
 +    for (int i = start; i <= end; i++) {
 +      iter = nm.iterator(nk(i));
 +      verifyIterator(i, end, valueOffset, iter);
 +
 +      // lookup nonexistant key that falls after existing key
 +      iter = nm.iterator(nk(i, 1, 1, 1, 1, false));
 +      verifyIterator(i + 1, end, valueOffset, iter);
 +    }
 +
 +    assertEquals(end - start + 1, nm.size());
 +  }
 +
 +  private void insertAndVerifyExhaustive(NativeMap nm, int num, int run) {
 +    for (int i = 0; i < num; i++) {
 +      for (int j = 0; j < num; j++) {
 +        for (int k = 0; k < num; k++) {
 +          for (int l = 0; l < num; l++) {
 +            for (int ts = 0; ts < num; ts++) {
 +              Key key = nk(i, j, k, l, ts, true);
-               Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(Constants.UTF8));
++              Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(UTF_8));
 +
 +              nm.put(key, value);
 +
 +              key = nk(i, j, k, l, ts, false);
-               value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(Constants.UTF8));
++              value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(UTF_8));
 +
 +              nm.put(key, value);
 +            }
 +          }
 +        }
 +      }
 +    }
 +
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +
 +    for (int i = 0; i < num; i++) {
 +      for (int j = 0; j < num; j++) {
 +        for (int k = 0; k < num; k++) {
 +          for (int l = 0; l < num; l++) {
 +            for (int ts = num - 1; ts >= 0; ts--) {
 +              Key key = nk(i, j, k, l, ts, true);
-               Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(Constants.UTF8));
++              Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(UTF_8));
 +
 +              assertTrue(iter.hasNext());
 +              Entry<Key,Value> entry = iter.next();
 +              assertEquals(key, entry.getKey());
 +              assertEquals(value, entry.getValue());
 +
 +              key = nk(i, j, k, l, ts, false);
-               value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(Constants.UTF8));
++              value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(UTF_8));
 +
 +              assertTrue(iter.hasNext());
 +              entry = iter.next();
 +              assertEquals(key, entry.getKey());
 +              assertEquals(value, entry.getValue());
 +            }
 +          }
 +        }
 +      }
 +    }
 +
 +    assertFalse(iter.hasNext());
 +
 +    for (int i = 0; i < num; i++) {
 +      for (int j = 0; j < num; j++) {
 +        for (int k = 0; k < num; k++) {
 +          for (int l = 0; l < num; l++) {
 +            for (int ts = 0; ts < num; ts++) {
 +              Key key = nk(i, j, k, l, ts, true);
-               Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(Constants.UTF8));
++              Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(UTF_8));
 +
 +              assertEquals(value, nm.get(key));
 +
 +              Iterator<Entry<Key,Value>> iter2 = nm.iterator(key);
 +              assertTrue(iter2.hasNext());
 +              Entry<Key,Value> entry = iter2.next();
 +              assertEquals(key, entry.getKey());
 +              assertEquals(value, entry.getValue());
 +
 +              key = nk(i, j, k, l, ts, false);
-               value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(Constants.UTF8));
++              value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(UTF_8));
 +
 +              assertEquals(value, nm.get(key));
 +
 +              Iterator<Entry<Key,Value>> iter3 = nm.iterator(key);
 +              assertTrue(iter3.hasNext());
 +              Entry<Key,Value> entry2 = iter3.next();
 +              assertEquals(key, entry2.getKey());
 +              assertEquals(value, entry2.getValue());
 +            }
 +          }
 +        }
 +      }
 +    }
 +
 +    assertEquals(num * num * num * num * num * 2, nm.size());
 +  }
 +
 +  @Test
 +  public void test1() {
 +    NativeMap nm = new NativeMap();
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +    assertFalse(iter.hasNext());
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void test2() {
 +    NativeMap nm = new NativeMap();
 +
 +    insertAndVerify(nm, 1, 10, 0);
 +    insertAndVerify(nm, 1, 10, 1);
 +    insertAndVerify(nm, 1, 10, 2);
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void test4() {
 +    NativeMap nm = new NativeMap();
 +
 +    insertAndVerifyExhaustive(nm, 3, 0);
 +    insertAndVerifyExhaustive(nm, 3, 1);
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void test5() {
 +    NativeMap nm = new NativeMap();
 +
 +    insertAndVerify(nm, 1, 10, 0);
 +
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +    iter.next();
 +
 +    nm.delete();
 +
 +    try {
 +      nm.put(nk(1), nv(1));
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +
 +    try {
 +      nm.get(nk(1));
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +
 +    try {
 +      nm.iterator();
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +
 +    try {
 +      nm.iterator(nk(1));
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +
 +    try {
 +      nm.size();
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +
 +    try {
 +      iter.next();
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +
 +  }
 +
 +  @Test
 +  public void test7() {
 +    NativeMap nm = new NativeMap();
 +
 +    insertAndVerify(nm, 1, 10, 0);
 +
 +    nm.delete();
 +
 +    try {
 +      nm.delete();
 +      assertTrue(false);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +  }
 +
 +  @Test
 +  public void test8() {
 +    // test verifies that native map sorts keys sharing some common prefix properly
 +
 +    NativeMap nm = new NativeMap();
 +
 +    TreeMap<Key,Value> tm = new TreeMap<Key,Value>();
 +
 +    tm.put(new Key(new Text("fo")), new Value(new byte[] {'0'}));
 +    tm.put(new Key(new Text("foo")), new Value(new byte[] {'1'}));
 +    tm.put(new Key(new Text("foo1")), new Value(new byte[] {'2'}));
 +    tm.put(new Key(new Text("foo2")), new Value(new byte[] {'3'}));
 + 
 +    for (Entry<Key,Value> entry : tm.entrySet()) {
 +      nm.put(entry.getKey(), entry.getValue());
 +    }
 +
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +
 +    for (Entry<Key,Value> entry : tm.entrySet()) {
 +      assertTrue(iter.hasNext());
 +      Entry<Key,Value> entry2 = iter.next();
 +
 +      assertEquals(entry.getKey(), entry2.getKey());
 +      assertEquals(entry.getValue(), entry2.getValue());
 +    }
 +
 +    assertFalse(iter.hasNext());
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void test9() {
 +    NativeMap nm = new NativeMap();
 +
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +
 +    try {
 +      iter.next();
 +      assertTrue(false);
 +    } catch (NoSuchElementException e) {
 +
 +    }
 +
 +    insertAndVerify(nm, 1, 1, 0);
 +
 +    iter = nm.iterator();
 +    iter.next();
 +
 +    try {
 +      iter.next();
 +      assertTrue(false);
 +    } catch (NoSuchElementException e) {
 +
 +    }
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void test10() {
 +    int start = 1;
 +    int end = 10000;
 +
 +    NativeMap nm = new NativeMap();
 +    for (int i = start; i <= end; i++) {
 +      nm.put(nk(i), nv(i));
 +    }
 +
 +    long mem1 = nm.getMemoryUsed();
 +
 +    for (int i = start; i <= end; i++) {
 +      nm.put(nk(i), nv(i));
 +    }
 +
 +    long mem2 = nm.getMemoryUsed();
 +
 +    if (mem1 != mem2) {
 +      throw new RuntimeException("Memory changed after inserting duplicate data " + mem1 + " " + mem2);
 +    }
 +
 +    for (int i = start; i <= end; i++) {
 +      nm.put(nk(i), nv(i));
 +    }
 +
 +    long mem3 = nm.getMemoryUsed();
 +
 +    if (mem1 != mem3) {
 +      throw new RuntimeException("Memory changed after inserting duplicate data " + mem1 + " " + mem3);
 +    }
 +
 +    byte bigrow[] = new byte[1000000];
 +    byte bigvalue[] = new byte[bigrow.length];
 +
 +    for (int i = 0; i < bigrow.length; i++) {
 +      bigrow[i] = (byte) (0xff & (i % 256));
 +      bigvalue[i] = bigrow[i];
 +    }
 +
 +    nm.put(new Key(new Text(bigrow)), new Value(bigvalue));
 +
 +    long mem4 = nm.getMemoryUsed();
 +
 +    Value val = nm.get(new Key(new Text(bigrow)));
 +    if (val == null || !val.equals(new Value(bigvalue))) {
 +      throw new RuntimeException("Did not get expected big value");
 +    }
 +
 +    nm.put(new Key(new Text(bigrow)), new Value(bigvalue));
 +
 +    long mem5 = nm.getMemoryUsed();
 +
 +    if (mem4 != mem5) {
 +      throw new RuntimeException("Memory changed after inserting duplicate data " + mem4 + " " + mem5);
 +    }
 +
 +    val = nm.get(new Key(new Text(bigrow)));
 +    if (val == null || !val.equals(new Value(bigvalue))) {
 +      throw new RuntimeException("Did not get expected big value");
 +    }
 +
 +    nm.delete();
 +  }
 +
 +  // random length random field
 +  private static byte[] rlrf(Random r, int maxLen) {
 +    int len = r.nextInt(maxLen);
 +
 +    byte f[] = new byte[len];
 +    r.nextBytes(f);
 +
 +    return f;
 +  }
 +
 +  @Test
 +  public void test11() {
 +    NativeMap nm = new NativeMap();
 +
 +    // insert things with varying field sizes and value sizes
 +
 +    // generate random data
 +    Random r = new Random(75);
 +
 +    ArrayList<Pair<Key,Value>> testData = new ArrayList<Pair<Key,Value>>();
 +
 +    for (int i = 0; i < 100000; i++) {
 +
 +      Key k = new Key(rlrf(r, 97), rlrf(r, 13), rlrf(r, 31), rlrf(r, 11), (r.nextLong() & 0x7fffffffffffffffl), false, false);
 +      Value v = new Value(rlrf(r, 511));
 +
 +      testData.add(new Pair<Key,Value>(k, v));
 +    }
 +
 +    // insert unsorted data
 +    for (Pair<Key,Value> pair : testData) {
 +      nm.put(pair.getFirst(), pair.getSecond());
 +    }
 +
 +    for (int i = 0; i < 2; i++) {
 +
 +      // sort data
 +      Collections.sort(testData, new Comparator<Pair<Key,Value>>() {
 +        @Override
 +        public int compare(Pair<Key,Value> o1, Pair<Key,Value> o2) {
 +          return o1.getFirst().compareTo(o2.getFirst());
 +        }
 +      });
 +
 +      // verify
 +      Iterator<Entry<Key,Value>> iter1 = nm.iterator();
 +      Iterator<Pair<Key,Value>> iter2 = testData.iterator();
 +
 +      while (iter1.hasNext() && iter2.hasNext()) {
 +        Entry<Key,Value> e = iter1.next();
 +        Pair<Key,Value> p = iter2.next();
 +
 +        if (!e.getKey().equals(p.getFirst()))
 +          throw new RuntimeException("Keys not equal");
 +
 +        if (!e.getValue().equals(p.getSecond()))
 +          throw new RuntimeException("Values not equal");
 +      }
 +
 +      if (iter1.hasNext())
 +        throw new RuntimeException("Not all of native map consumed");
 +
 +      if (iter2.hasNext())
 +        throw new RuntimeException("Not all of test data consumed");
 +
 +      System.out.println("test 11 nm mem " + nm.getMemoryUsed());
 +
 +      // insert data again w/ different value
 +      Collections.shuffle(testData, r);
 +      // insert unsorted data
 +      for (Pair<Key,Value> pair : testData) {
 +        pair.getSecond().set(rlrf(r, 511));
 +        nm.put(pair.getFirst(), pair.getSecond());
 +      }
 +    }
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void testBinary() {
 +    NativeMap nm = new NativeMap();
 +
 +    byte emptyBytes[] = new byte[0];
 +
 +    for (int i = 0; i < 256; i++) {
 +      for (int j = 0; j < 256; j++) {
 +        byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
 +        byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
 +
 +        Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
 +        Value v = new Value(data);
 +
 +        nm.put(k, v);
 +      }
 +    }
 +
 +    Iterator<Entry<Key,Value>> iter = nm.iterator();
 +    for (int i = 0; i < 256; i++) {
 +      for (int j = 0; j < 256; j++) {
 +        byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
 +        byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
 +
 +        Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
 +        Value v = new Value(data);
 +
 +        assertTrue(iter.hasNext());
 +        Entry<Key,Value> entry = iter.next();
 +
 +        assertEquals(k, entry.getKey());
 +        assertEquals(v, entry.getValue());
 +
 +      }
 +    }
 +
 +    assertFalse(iter.hasNext());
 +
 +    for (int i = 0; i < 256; i++) {
 +      for (int j = 0; j < 256; j++) {
 +        byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
 +        byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
 +
 +        Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
 +        Value v = new Value(data);
 +
 +        Value v2 = nm.get(k);
 +
 +        assertEquals(v, v2);
 +      }
 +    }
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void testEmpty() {
 +    NativeMap nm = new NativeMap();
 +
 +    assertTrue(nm.size() == 0);
 +    assertTrue(nm.getMemoryUsed() == 0);
 +
 +    nm.delete();
 +  }
 +
 +  @Test
 +  public void testConcurrentIter() throws IOException {
 +    NativeMap nm = new NativeMap();
 +
 +    nm.put(nk(0), nv(0));
 +    nm.put(nk(1), nv(1));
 +    nm.put(nk(3), nv(3));
 +
 +    SortedKeyValueIterator<Key,Value> iter = nm.skvIterator();
 +
 +    // modify map after iter created
 +    nm.put(nk(2), nv(2));
 +
 +    assertTrue(iter.hasTop());
 +    assertEquals(iter.getTopKey(), nk(0));
 +    iter.next();
 +
 +    assertTrue(iter.hasTop());
 +    assertEquals(iter.getTopKey(), nk(1));
 +    iter.next();
 +
 +    assertTrue(iter.hasTop());
 +    assertEquals(iter.getTopKey(), nk(2));
 +    iter.next();
 +
 +    assertTrue(iter.hasTop());
 +    assertEquals(iter.getTopKey(), nk(3));
 +    iter.next();
 +
 +    assertFalse(iter.hasTop());
 +
 +    nm.delete();
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
index 7e43812,0000000..189a55c
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
@@@ -1,132 -1,0 +1,133 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashSet;
 +import java.util.Map.Entry;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchScanner;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.ScannerBase;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class ScanIteratorIT extends SimpleMacIT {
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 30;
 +  }
 +
 +  @Test
 +  public void run() throws Exception {
 +    String tableName = getUniqueNames(1)[0];
 +    Connector c = getConnector();
 +    c.tableOperations().create(tableName);
 +
 +    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
 +
 +    for (int i = 0; i < 1000; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%06d", i)));
-       m.put(new Text("cf1"), new Text("cq1"), new Value(Integer.toString(1000 - i).getBytes(Constants.UTF8)));
-       m.put(new Text("cf1"), new Text("cq2"), new Value(Integer.toString(i - 1000).getBytes(Constants.UTF8)));
++      m.put(new Text("cf1"), new Text("cq1"), new Value(Integer.toString(1000 - i).getBytes(UTF_8)));
++      m.put(new Text("cf1"), new Text("cq2"), new Value(Integer.toString(i - 1000).getBytes(UTF_8)));
 +
 +      bw.addMutation(m);
 +    }
 +
 +    bw.close();
 +
 +    Scanner scanner = c.createScanner(tableName, new Authorizations());
 +
 +    setupIter(scanner);
 +    verify(scanner, 1, 999);
 +
 +    BatchScanner bscanner = c.createBatchScanner(tableName, new Authorizations(), 3);
 +    bscanner.setRanges(Collections.singleton(new Range((Key) null, null)));
 +
 +    setupIter(bscanner);
 +    verify(bscanner, 1, 999);
 +
 +    ArrayList<Range> ranges = new ArrayList<Range>();
 +    ranges.add(new Range(new Text(String.format("%06d", 1))));
 +    ranges.add(new Range(new Text(String.format("%06d", 6)), new Text(String.format("%06d", 16))));
 +    ranges.add(new Range(new Text(String.format("%06d", 20))));
 +    ranges.add(new Range(new Text(String.format("%06d", 23))));
 +    ranges.add(new Range(new Text(String.format("%06d", 56)), new Text(String.format("%06d", 61))));
 +    ranges.add(new Range(new Text(String.format("%06d", 501)), new Text(String.format("%06d", 504))));
 +    ranges.add(new Range(new Text(String.format("%06d", 998)), new Text(String.format("%06d", 1000))));
 +
 +    HashSet<Integer> got = new HashSet<Integer>();
 +    HashSet<Integer> expected = new HashSet<Integer>();
 +    for (int i : new int[] {1, 7, 9, 11, 13, 15, 23, 57, 59, 61, 501, 503, 999}) {
 +      expected.add(i);
 +    }
 +
 +    bscanner.setRanges(ranges);
 +
 +    for (Entry<Key,Value> entry : bscanner) {
 +      got.add(Integer.parseInt(entry.getKey().getRow().toString()));
 +    }
 +
 +    System.out.println("got : " + got);
 +
 +    if (!got.equals(expected)) {
 +      throw new Exception(got + " != " + expected);
 +    }
 +
 +    bscanner.close();
 +
 +  }
 +
 +  private void verify(Iterable<Entry<Key,Value>> scanner, int start, int finish) throws Exception {
 +
 +    int expected = start;
 +    for (Entry<Key,Value> entry : scanner) {
 +      if (Integer.parseInt(entry.getKey().getRow().toString()) != expected) {
 +        throw new Exception("Saw unexpexted " + entry.getKey().getRow() + " " + expected);
 +      }
 +
 +      if (entry.getKey().getColumnQualifier().toString().equals("cq2")) {
 +        expected += 2;
 +      }
 +    }
 +
 +    if (expected != finish + 2) {
 +      throw new Exception("Ended at " + expected + " not " + (finish + 2));
 +    }
 +  }
 +
 +  private void setupIter(ScannerBase scanner) throws Exception {
 +    IteratorSetting dropMod = new IteratorSetting(50, "dropMod", "org.apache.accumulo.test.functional.DropModIter");
 +    dropMod.addOption("mod", "2");
 +    dropMod.addOption("drop", "0");
 +    scanner.addScanIterator(dropMod);
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
index ba03d00,0000000..90b881c
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
@@@ -1,242 -1,0 +1,243 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.util.Map.Entry;
 +import java.util.TreeSet;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class ScanRangeIT extends SimpleMacIT {
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 2 * 60;
 +  }
 +
 +  private static final int TS_LIMIT = 1;
 +  private static final int CQ_LIMIT = 5;
 +  private static final int CF_LIMIT = 5;
 +  private static final int ROW_LIMIT = 100;
 +
 +  @Test
 +  public void run() throws Exception {
 +    Connector c = getConnector();
 +    String[] tableNames = getUniqueNames(2);
 +    String table1 = tableNames[0];
 +    c.tableOperations().create(table1);
 +    String table2 = tableNames[1];
 +    c.tableOperations().create(table2);
 +    TreeSet<Text> splitRows = new TreeSet<Text>();
 +    int splits = 3;
 +    for (int i = (ROW_LIMIT / splits); i < ROW_LIMIT; i += (ROW_LIMIT / splits))
 +      splitRows.add(createRow(i));
 +    c.tableOperations().addSplits(table2, splitRows);
 +
 +    insertData(c, table1);
 +    scanTable(c, table1);
 +
 +    insertData(c, table2);
 +    scanTable(c, table2);
 +  }
 +
 +  private void scanTable(Connector c, String table) throws Exception {
 +    scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(1, 0, 0, 0));
 +
 +    scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
 +
 +    scanRange(c, table, null, null);
 +
 +    for (int i = 0; i < ROW_LIMIT; i += (ROW_LIMIT / 3)) {
 +      for (int j = 0; j < CF_LIMIT; j += (CF_LIMIT / 2)) {
 +        for (int k = 1; k < CQ_LIMIT; k += (CQ_LIMIT / 2)) {
 +          scanRange(c, table, null, new IntKey(i, j, k, 0));
 +          scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(i, j, k, 0));
 +
 +          scanRange(c, table, new IntKey(i, j, k, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
 +
 +          scanRange(c, table, new IntKey(i, j, k, 0), null);
 +
 +        }
 +      }
 +    }
 +
 +    for (int i = 0; i < ROW_LIMIT; i++) {
 +      scanRange(c, table, new IntKey(i, 0, 0, 0), new IntKey(i, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
 +
 +      if (i > 0 && i < ROW_LIMIT - 1) {
 +        scanRange(c, table, new IntKey(i - 1, 0, 0, 0), new IntKey(i + 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
 +      }
 +    }
 +
 +  }
 +
 +  private static class IntKey {
 +    private int row;
 +    private int cf;
 +    private int cq;
 +    private long ts;
 +
 +    IntKey(IntKey ik) {
 +      this.row = ik.row;
 +      this.cf = ik.cf;
 +      this.cq = ik.cq;
 +      this.ts = ik.ts;
 +    }
 +
 +    IntKey(int row, int cf, int cq, long ts) {
 +      this.row = row;
 +      this.cf = cf;
 +      this.cq = cq;
 +      this.ts = ts;
 +    }
 +
 +    Key createKey() {
 +      Text trow = createRow(row);
 +      Text tcf = createCF(cf);
 +      Text tcq = createCQ(cq);
 +
 +      return new Key(trow, tcf, tcq, ts);
 +    }
 +
 +    IntKey increment() {
 +
 +      IntKey ik = new IntKey(this);
 +
 +      ik.ts++;
 +      if (ik.ts >= TS_LIMIT) {
 +        ik.ts = 0;
 +        ik.cq++;
 +        if (ik.cq >= CQ_LIMIT) {
 +          ik.cq = 0;
 +          ik.cf++;
 +          if (ik.cf >= CF_LIMIT) {
 +            ik.cf = 0;
 +            ik.row++;
 +          }
 +        }
 +      }
 +
 +      return ik;
 +    }
 +
 +  }
 +
 +  private void scanRange(Connector c, String table, IntKey ik1, IntKey ik2) throws Exception {
 +    scanRange(c, table, ik1, false, ik2, false);
 +    scanRange(c, table, ik1, false, ik2, true);
 +    scanRange(c, table, ik1, true, ik2, false);
 +    scanRange(c, table, ik1, true, ik2, true);
 +  }
 +
 +  private void scanRange(Connector c, String table, IntKey ik1, boolean inclusive1, IntKey ik2, boolean inclusive2) throws Exception {
 +    Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
 +
 +    Key key1 = null;
 +    Key key2 = null;
 +
 +    IntKey expectedIntKey;
 +    IntKey expectedEndIntKey;
 +
 +    if (ik1 != null) {
 +      key1 = ik1.createKey();
 +      expectedIntKey = ik1;
 +
 +      if (!inclusive1) {
 +        expectedIntKey = expectedIntKey.increment();
 +      }
 +    } else {
 +      expectedIntKey = new IntKey(0, 0, 0, 0);
 +    }
 +
 +    if (ik2 != null) {
 +      key2 = ik2.createKey();
 +      expectedEndIntKey = ik2;
 +
 +      if (inclusive2) {
 +        expectedEndIntKey = expectedEndIntKey.increment();
 +      }
 +    } else {
 +      expectedEndIntKey = new IntKey(ROW_LIMIT, 0, 0, 0);
 +    }
 +
 +    Range range = new Range(key1, inclusive1, key2, inclusive2);
 +
 +    scanner.setRange(range);
 +
 +    for (Entry<Key,Value> entry : scanner) {
 +
 +      Key expectedKey = expectedIntKey.createKey();
 +      if (!expectedKey.equals(entry.getKey())) {
 +        throw new Exception(" " + expectedKey + " != " + entry.getKey());
 +      }
 +
 +      expectedIntKey = expectedIntKey.increment();
 +    }
 +
 +    if (!expectedIntKey.createKey().equals(expectedEndIntKey.createKey())) {
 +      throw new Exception(" " + expectedIntKey.createKey() + " != " + expectedEndIntKey.createKey());
 +    }
 +  }
 +
 +  private static Text createCF(int cf) {
 +    Text tcf = new Text(String.format("cf_%03d", cf));
 +    return tcf;
 +  }
 +
 +  private static Text createCQ(int cf) {
 +    Text tcf = new Text(String.format("cq_%03d", cf));
 +    return tcf;
 +  }
 +
 +  private static Text createRow(int row) {
 +    Text trow = new Text(String.format("r_%06d", row));
 +    return trow;
 +  }
 +
 +  private void insertData(Connector c, String table) throws Exception {
 +
 +    BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
 +
 +    for (int i = 0; i < ROW_LIMIT; i++) {
 +      Mutation m = new Mutation(createRow(i));
 +
 +      for (int j = 0; j < CF_LIMIT; j++) {
 +        for (int k = 0; k < CQ_LIMIT; k++) {
 +          for (int t = 0; t < TS_LIMIT; t++) {
-             m.put(createCF(j), createCQ(k), t, new Value(String.format("%06d_%03d_%03d_%03d", i, j, k, t).getBytes(Constants.UTF8)));
++            m.put(createCF(j), createCQ(k), t, new Value(String.format("%06d_%03d_%03d_%03d", i, j, k, t).getBytes(UTF_8)));
 +          }
 +        }
 +      }
 +
 +      bw.addMutation(m);
 +    }
 +
 +    bw.close();
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
index ed2f0ba,0000000..3547b68
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
@@@ -1,111 -1,0 +1,112 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.util.Collections;
 +import java.util.Iterator;
 +import java.util.Map.Entry;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.UtilWaitThread;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class ScanSessionTimeOutIT extends ConfigurableMacIT {
 +  
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_SESSION_MAXIDLE.getKey(), "3"));
 +  }
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 60;
 +  }
 +
 +  @Test
 +  public void run() throws Exception {
 +    Connector c = getConnector();
 +    c.tableOperations().create("abc");
 +    
 +    BatchWriter bw = c.createBatchWriter("abc", new BatchWriterConfig());
 +    
 +    for (int i = 0; i < 100000; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%08d", i)));
 +      for (int j = 0; j < 3; j++)
-         m.put(new Text("cf1"), new Text("cq" + j), new Value((i + "_" + j).getBytes(Constants.UTF8)));
++        m.put(new Text("cf1"), new Text("cq" + j), new Value((i + "_" + j).getBytes(UTF_8)));
 +      
 +      bw.addMutation(m);
 +    }
 +    
 +    bw.close();
 +    
 +    Scanner scanner = c.createScanner("abc", new Authorizations());
 +    scanner.setBatchSize(1000);
 +    
 +    Iterator<Entry<Key,Value>> iter = scanner.iterator();
 +    
 +    verify(iter, 0, 200);
 +    
 +    // sleep three times the session timeout
 +    UtilWaitThread.sleep(9000);
 +    
 +    verify(iter, 200, 100000);
 +    
 +  }
 +  
 +  private void verify(Iterator<Entry<Key,Value>> iter, int start, int stop) throws Exception {
 +    for (int i = start; i < stop; i++) {
 +      
 +      Text er = new Text(String.format("%08d", i));
 +      
 +      for (int j = 0; j < 3; j++) {
 +        Entry<Key,Value> entry = iter.next();
 +        
 +        if (!entry.getKey().getRow().equals(er)) {
 +          throw new Exception("row " + entry.getKey().getRow() + " != " + er);
 +        }
 +        
 +        if (!entry.getKey().getColumnFamily().equals(new Text("cf1"))) {
 +          throw new Exception("cf " + entry.getKey().getColumnFamily() + " != cf1");
 +        }
 +        
 +        if (!entry.getKey().getColumnQualifier().equals(new Text("cq" + j))) {
 +          throw new Exception("cq " + entry.getKey().getColumnQualifier() + " != cq" + j);
 +        }
 +        
 +        if (!entry.getValue().toString().equals("" + i + "_" + j)) {
 +          throw new Exception("value " + entry.getValue() + " != " + i + "_" + j);
 +        }
 +        
 +      }
 +    }
 +    
 +  }
 +  
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
index ea92949,0000000..43d3a86
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
@@@ -1,272 -1,0 +1,273 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.base.Charsets.UTF_8;
 +import static org.junit.Assert.assertEquals;
 +
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.SortedMap;
 +import java.util.TreeMap;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.impl.ScannerImpl;
 +import org.apache.accumulo.core.client.impl.Writer;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.file.rfile.RFile;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.schema.DataFileValue;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.ColumnFQ;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 +import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
 +import org.apache.accumulo.fate.zookeeper.ZooLock.LockWatcher;
 +import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 +import org.apache.accumulo.server.ServerConstants;
 +import org.apache.accumulo.server.client.HdfsZooInstance;
 +import org.apache.accumulo.server.fs.FileRef;
 +import org.apache.accumulo.server.master.state.Assignment;
 +import org.apache.accumulo.server.master.state.TServerInstance;
 +import org.apache.accumulo.server.security.SystemCredentials;
 +import org.apache.accumulo.server.tablets.TabletTime;
 +import org.apache.accumulo.server.util.FileUtil;
 +import org.apache.accumulo.server.util.MasterMetadataUtil;
 +import org.apache.accumulo.server.util.MetadataTableUtil;
 +import org.apache.accumulo.server.zookeeper.TransactionWatcher;
 +import org.apache.accumulo.server.zookeeper.ZooLock;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 +import org.apache.accumulo.tserver.TabletServer;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class SplitRecoveryIT extends ConfigurableMacIT {
 +  
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 60;
 +  }
 +  
 +  private KeyExtent nke(String table, String endRow, String prevEndRow) {
 +    return new KeyExtent(new Text(table), endRow == null ? null : new Text(endRow), prevEndRow == null ? null : new Text(prevEndRow));
 +  }
 +  
 +  private void run() throws Exception {
 +    String zPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + "/testLock";
 +    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
 +    zoo.putPersistentData(zPath, new byte[0], NodeExistsPolicy.OVERWRITE);
 +    ZooLock zl = new ZooLock(zPath);
 +    boolean gotLock = zl.tryLock(new LockWatcher() {
 +      
 +      @Override
 +      public void lostLock(LockLossReason reason) {
 +        System.exit(-1);
 +        
 +      }
 +      
 +      @Override
 +      public void unableToMonitorLockNode(Throwable e) {
 +        System.exit(-1);
 +      }
-     }, "foo".getBytes(Constants.UTF8));
++    }, "foo".getBytes(UTF_8));
 +    
 +    if (!gotLock) {
 +      System.err.println("Failed to get lock " + zPath);
 +    }
 +    
 +    // run test for a table with one tablet
 +    runSplitRecoveryTest(0, "sp", 0, zl, nke("foo0", null, null));
 +    runSplitRecoveryTest(1, "sp", 0, zl, nke("foo1", null, null));
 +    
 +    // run test for tables with two tablets, run test on first and last tablet
 +    runSplitRecoveryTest(0, "k", 0, zl, nke("foo2", "m", null), nke("foo2", null, "m"));
 +    runSplitRecoveryTest(1, "k", 0, zl, nke("foo3", "m", null), nke("foo3", null, "m"));
 +    runSplitRecoveryTest(0, "o", 1, zl, nke("foo4", "m", null), nke("foo4", null, "m"));
 +    runSplitRecoveryTest(1, "o", 1, zl, nke("foo5", "m", null), nke("foo5", null, "m"));
 +    
 +    // run test for table w/ three tablets, run test on middle tablet
 +    runSplitRecoveryTest(0, "o", 1, zl, nke("foo6", "m", null), nke("foo6", "r", "m"), nke("foo6", null, "r"));
 +    runSplitRecoveryTest(1, "o", 1, zl, nke("foo7", "m", null), nke("foo7", "r", "m"), nke("foo7", null, "r"));
 +    
 +    // run test for table w/ three tablets, run test on first
 +    runSplitRecoveryTest(0, "g", 0, zl, nke("foo8", "m", null), nke("foo8", "r", "m"), nke("foo8", null, "r"));
 +    runSplitRecoveryTest(1, "g", 0, zl, nke("foo9", "m", null), nke("foo9", "r", "m"), nke("foo9", null, "r"));
 +    
 +    // run test for table w/ three tablets, run test on last tablet
 +    runSplitRecoveryTest(0, "w", 2, zl, nke("fooa", "m", null), nke("fooa", "r", "m"), nke("fooa", null, "r"));
 +    runSplitRecoveryTest(1, "w", 2, zl, nke("foob", "m", null), nke("foob", "r", "m"), nke("foob", null, "r"));
 +  }
 +  
 +  private void runSplitRecoveryTest(int failPoint, String mr, int extentToSplit, ZooLock zl, KeyExtent... extents) throws Exception {
 +    
 +    Text midRow = new Text(mr);
 +    
 +    SortedMap<FileRef,DataFileValue> splitMapFiles = null;
 +    
 +    for (int i = 0; i < extents.length; i++) {
 +      KeyExtent extent = extents[i];
 +      
 +      String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId().toString() + "/dir_" + i;
 +      MetadataTableUtil.addTablet(extent, tdir, SystemCredentials.get(), TabletTime.LOGICAL_TIME_ID, zl);
 +      SortedMap<FileRef,DataFileValue> mapFiles = new TreeMap<FileRef,DataFileValue>();
 +      mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i));
 +      
 +      if (i == extentToSplit) {
 +        splitMapFiles = mapFiles;
 +      }
 +      int tid = 0;
 +      TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
 +      MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", SystemCredentials.get(), zl);
 +    }
 +    
 +    KeyExtent extent = extents[extentToSplit];
 +    
 +    KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow);
 +    KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow());
 +    
 +    splitPartiallyAndRecover(extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl);
 +  }
 +  
 +  private void splitPartiallyAndRecover(KeyExtent extent, KeyExtent high, KeyExtent low, double splitRatio, SortedMap<FileRef,DataFileValue> mapFiles,
 +      Text midRow, String location, int steps, ZooLock zl) throws Exception {
 +    
 +    SortedMap<FileRef,DataFileValue> lowDatafileSizes = new TreeMap<FileRef,DataFileValue>();
 +    SortedMap<FileRef,DataFileValue> highDatafileSizes = new TreeMap<FileRef,DataFileValue>();
 +    List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
 +    
 +    MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), mapFiles, lowDatafileSizes,
 +        highDatafileSizes, highDatafilesToRemove);
 +    
 +    MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SystemCredentials.get(), zl);
 +    TServerInstance instance = new TServerInstance(location, zl.getSessionId());
 +    Writer writer = new Writer(HdfsZooInstance.getInstance(), SystemCredentials.get(), MetadataTable.ID);
 +    Assignment assignment = new Assignment(high, instance);
 +    Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
 +    m.put(TabletsSection.FutureLocationColumnFamily.NAME, assignment.server.asColumnQualifier(), assignment.server.asMutationValue());
 +    writer.update(m);
 +    
 +    if (steps >= 1) {
 +      Map<FileRef,Long> bulkFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(), extent);
 +      MasterMetadataUtil.addNewTablet(low, "/lowDir", instance, lowDatafileSizes, bulkFiles, SystemCredentials.get(), TabletTime.LOGICAL_TIME_ID + "0", -1l,
 +          -1l, zl);
 +    }
 +    if (steps >= 2)
 +      MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SystemCredentials.get(), zl);
 +    
 +    TabletServer.verifyTabletInformation(high, instance, null, "127.0.0.1:0", zl);
 +    
 +    if (steps >= 1) {
 +      ensureTabletHasNoUnexpectedMetadataEntries(low, lowDatafileSizes);
 +      ensureTabletHasNoUnexpectedMetadataEntries(high, highDatafileSizes);
 +      
 +      Map<FileRef,Long> lowBulkFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(), low);
 +      Map<FileRef,Long> highBulkFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(), high);
 +      
 +      if (!lowBulkFiles.equals(highBulkFiles)) {
 +        throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " " + high);
 +      }
 +      
 +      if (lowBulkFiles.size() == 0) {
 +        throw new Exception(" no bulk files " + low);
 +      }
 +    } else {
 +      ensureTabletHasNoUnexpectedMetadataEntries(extent, mapFiles);
 +    }
 +  }
 +  
 +  private void ensureTabletHasNoUnexpectedMetadataEntries(KeyExtent extent, SortedMap<FileRef,DataFileValue> expectedMapFiles) throws Exception {
 +    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SystemCredentials.get(), MetadataTable.ID, Authorizations.EMPTY);
 +    scanner.setRange(extent.toMetadataRange());
 +    
 +    HashSet<ColumnFQ> expectedColumns = new HashSet<ColumnFQ>();
 +    expectedColumns.add(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN);
 +    expectedColumns.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
 +    expectedColumns.add(TabletsSection.ServerColumnFamily.TIME_COLUMN);
 +    expectedColumns.add(TabletsSection.ServerColumnFamily.LOCK_COLUMN);
 +    
 +    HashSet<Text> expectedColumnFamilies = new HashSet<Text>();
 +    expectedColumnFamilies.add(DataFileColumnFamily.NAME);
 +    expectedColumnFamilies.add(TabletsSection.FutureLocationColumnFamily.NAME);
 +    expectedColumnFamilies.add(TabletsSection.CurrentLocationColumnFamily.NAME);
 +    expectedColumnFamilies.add(TabletsSection.LastLocationColumnFamily.NAME);
 +    expectedColumnFamilies.add(TabletsSection.BulkFileColumnFamily.NAME);
 +    
 +    Iterator<Entry<Key,Value>> iter = scanner.iterator();
 +    while (iter.hasNext()) {
 +      Key key = iter.next().getKey();
 +      
 +      if (!key.getRow().equals(extent.getMetadataEntry())) {
 +        throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
 +      }
 +      
 +      if (expectedColumnFamilies.contains(key.getColumnFamily())) {
 +        continue;
 +      }
 +      
 +      if (expectedColumns.remove(new ColumnFQ(key))) {
 +        continue;
 +      }
 +      
 +      throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
 +    }
 +    System.out.println("expectedColumns " + expectedColumns);
 +    if (expectedColumns.size() > 1 || (expectedColumns.size() == 1)) {
 +      throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns);
 +    }
 +    
 +    SortedMap<FileRef,DataFileValue> fixedMapFiles = MetadataTableUtil.getDataFileSizes(extent, SystemCredentials.get());
 +    verifySame(expectedMapFiles, fixedMapFiles);
 +  }
 +  
 +  private void verifySame(SortedMap<FileRef,DataFileValue> datafileSizes, SortedMap<FileRef,DataFileValue> fixedDatafileSizes) throws Exception {
 +    
 +    if (!datafileSizes.keySet().containsAll(fixedDatafileSizes.keySet()) || !fixedDatafileSizes.keySet().containsAll(datafileSizes.keySet())) {
 +      throw new Exception("Key sets not the same " + datafileSizes.keySet() + " !=  " + fixedDatafileSizes.keySet());
 +    }
 +    
 +    for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
 +      DataFileValue dfv = entry.getValue();
 +      DataFileValue otherDfv = fixedDatafileSizes.get(entry.getKey());
 +      
 +      if (!dfv.equals(otherDfv)) {
 +        throw new Exception(entry.getKey() + " dfv not equal  " + dfv + "  " + otherDfv);
 +      }
 +    }
 +  }
 +
 +  
 +  public static void main(String[] args) throws Exception {
 +    new SplitRecoveryIT().run();
 +  }
 +  
 +  @Test
 +  public void test() throws Exception {
 +    assertEquals(0, exec(SplitRecoveryIT.class).waitFor());
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
index b74399d,0000000..fccc79f
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
@@@ -1,101 -1,0 +1,101 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.base.Charsets.UTF_8;
 +import static org.junit.Assert.assertEquals;
 +
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.TreeSet;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.minicluster.MemoryUnit;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class TabletIT extends ConfigurableMacIT {
 +
 +  private static final int N = 1000;
 +
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    Map<String,String> siteConfig = new HashMap<String,String>();
 +    siteConfig.put(Property.TABLE_SPLIT_THRESHOLD.getKey(), "200");
 +    siteConfig.put(Property.TSERV_MAXMEM.getKey(), "128M");
 +    cfg.setDefaultMemory(256, MemoryUnit.MEGABYTE);
 +    cfg.setSiteConfig(siteConfig);
 +  }
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 2 * 60;
 +  }
 +
 +  @Test
 +  public void createTableTest() throws Exception {
 +    String tableName = getUniqueNames(1)[0];
 +    createTableTest(tableName, false);
 +    createTableTest(tableName, true);
 +  }
 +
 +  public void createTableTest(String tableName, boolean readOnly) throws Exception {
 +    // create the test table within accumulo
 +    Connector connector = getConnector();
 +
 +    if (!readOnly) {
 +      TreeSet<Text> keys = new TreeSet<Text>();
 +      for (int i = N / 100; i < N; i += N / 100) {
 +        keys.add(new Text(String.format("%05d", i)));
 +      }
 +
 +      // presplit
 +      connector.tableOperations().create(tableName);
 +      connector.tableOperations().addSplits(tableName, keys);
 +      BatchWriter b = connector.createBatchWriter(tableName, new BatchWriterConfig());
 +
 +      // populate
 +      for (int i = 0; i < N; i++) {
 +        Mutation m = new Mutation(new Text(String.format("%05d", i)));
-         m.put(new Text("col" + Integer.toString((i % 3) + 1)), new Text("qual"), new Value("junk".getBytes(Constants.UTF8)));
++        m.put(new Text("col" + Integer.toString((i % 3) + 1)), new Text("qual"), new Value("junk".getBytes(UTF_8)));
 +        b.addMutation(m);
 +      }
 +      b.close();
 +    }
 +
 +    Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
 +    int count = 0;
 +    for (Entry<Key,Value> elt : scanner) {
 +      String expected = String.format("%05d", count);
 +      assert (elt.getKey().getRow().toString().equals(expected));
 +      count++;
 +    }
 +    assertEquals(N, count);
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
----------------------------------------------------------------------
diff --cc test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
index 1dd0bf1,0000000..f2460cf
mode 100644,000000..100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
@@@ -1,303 -1,0 +1,304 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.base.Charsets.UTF_8;
++
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.Set;
 +import java.util.SortedSet;
 +import java.util.TreeSet;
 +
- import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.BatchScanner;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.security.ColumnVisibility;
 +import org.apache.accumulo.core.util.ByteArraySet;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
 +public class VisibilityIT extends SimpleMacIT {
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 2 * 60;
 +  }
 +
 +  @Test
 +  public void run() throws Exception {
 +    Connector c = getConnector();
 +    String[] tableNames = getUniqueNames(2);
 +    String table = tableNames[0];
 +    c.tableOperations().create(table);
 +    String table2 = tableNames[1];
 +    c.tableOperations().create(table2);
 +    c.tableOperations().setProperty(table2, Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "DEFLABEL");
 +
 +    insertData(c, table);
 +    queryData(c, table);
 +    deleteData(c, table);
 +
 +    insertDefaultData(c, table2);
 +    queryDefaultData(c, table2);
 +
 +  }
 +
 +  private static SortedSet<String> nss(String... labels) {
 +    TreeSet<String> ts = new TreeSet<String>();
 +
 +    for (String s : labels) {
 +      ts.add(s);
 +    }
 +
 +    return ts;
 +  }
 +
 +  private void mput(Mutation m, String cf, String cq, String cv, String val) {
-     ColumnVisibility le = new ColumnVisibility(cv.getBytes(Constants.UTF8));
-     m.put(new Text(cf), new Text(cq), le, new Value(val.getBytes(Constants.UTF8)));
++    ColumnVisibility le = new ColumnVisibility(cv.getBytes(UTF_8));
++    m.put(new Text(cf), new Text(cq), le, new Value(val.getBytes(UTF_8)));
 +  }
 +
 +  private void mputDelete(Mutation m, String cf, String cq, String cv) {
-     ColumnVisibility le = new ColumnVisibility(cv.getBytes(Constants.UTF8));
++    ColumnVisibility le = new ColumnVisibility(cv.getBytes(UTF_8));
 +    m.putDelete(new Text(cf), new Text(cq), le);
 +  }
 +
 +  private void insertData(Connector c, String tableName) throws Exception {
 +
 +    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
 +    Mutation m1 = new Mutation(new Text("row1"));
 +
 +    mput(m1, "cf1", "cq1", "", "v1");
 +    mput(m1, "cf1", "cq1", "A", "v2");
 +    mput(m1, "cf1", "cq1", "B", "v3");
 +    mput(m1, "cf1", "cq1", "A&B", "v4");
 +    mput(m1, "cf1", "cq1", "A&(L|M)", "v5");
 +    mput(m1, "cf1", "cq1", "B&(L|M)", "v6");
 +    mput(m1, "cf1", "cq1", "A&B&(L|M)", "v7");
 +    mput(m1, "cf1", "cq1", "A&B&(L)", "v8");
 +    mput(m1, "cf1", "cq1", "A&FOO", "v9");
 +    mput(m1, "cf1", "cq1", "A&FOO&(L|M)", "v10");
 +    mput(m1, "cf1", "cq1", "FOO", "v11");
 +    mput(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)", "v12");
 +    mput(m1, "cf1", "cq1", "A&B&(L|M|FOO)", "v13");
 +
 +    bw.addMutation(m1);
 +    bw.close();
 +  }
 +
 +  private void deleteData(Connector c, String tableName) throws Exception {
 +
 +    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
 +    Mutation m1 = new Mutation(new Text("row1"));
 +
 +    mputDelete(m1, "cf1", "cq1", "");
 +    mputDelete(m1, "cf1", "cq1", "A");
 +    mputDelete(m1, "cf1", "cq1", "A&B");
 +    mputDelete(m1, "cf1", "cq1", "B&(L|M)");
 +    mputDelete(m1, "cf1", "cq1", "A&B&(L)");
 +    mputDelete(m1, "cf1", "cq1", "A&FOO&(L|M)");
 +    mputDelete(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)");
 +    mputDelete(m1, "cf1", "cq1", "FOO&A"); // should not delete anything
 +
 +    bw.addMutation(m1);
 +    bw.close();
 +
 +    Map<Set<String>,Set<String>> expected = new HashMap<Set<String>,Set<String>>();
 +
 +    expected.put(nss("A", "L"), nss("v5"));
 +    expected.put(nss("A", "M"), nss("v5"));
 +    expected.put(nss("B"), nss("v3"));
 +    expected.put(nss("Z"), nss());
 +    expected.put(nss("A", "B", "L"), nss("v7", "v13"));
 +    expected.put(nss("A", "B", "M"), nss("v7", "v13"));
 +    expected.put(nss("A", "B", "FOO"), nss("v13"));
 +    expected.put(nss("FOO"), nss("v11"));
 +    expected.put(nss("A", "FOO"), nss("v9"));
 +
 +    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
 +  }
 +
 +  private void insertDefaultData(Connector c, String tableName) throws Exception {
 +    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
 +    Mutation m1 = new Mutation(new Text("row1"));
 +
 +    mput(m1, "cf1", "cq1", "BASE", "v1");
 +    mput(m1, "cf1", "cq2", "DEFLABEL", "v2");
 +    mput(m1, "cf1", "cq3", "", "v3");
 +
 +    bw.addMutation(m1);
 +    bw.close();
 +  }
 +
 +  private static void uniqueCombos(List<Set<String>> all, Set<String> prefix, Set<String> suffix) {
 +
 +    all.add(prefix);
 +
 +    TreeSet<String> ss = new TreeSet<String>(suffix);
 +
 +    for (String s : suffix) {
 +      TreeSet<String> ps = new TreeSet<String>(prefix);
 +      ps.add(s);
 +      ss.remove(s);
 +
 +      uniqueCombos(all, ps, ss);
 +    }
 +  }
 +
 +  private void queryData(Connector c, String tableName) throws Exception {
 +    Map<Set<String>,Set<String>> expected = new HashMap<Set<String>,Set<String>>();
 +    expected.put(nss(), nss("v1"));
 +    expected.put(nss("A"), nss("v2"));
 +    expected.put(nss("A", "L"), nss("v5"));
 +    expected.put(nss("A", "M"), nss("v5"));
 +    expected.put(nss("B"), nss("v3"));
 +    expected.put(nss("B", "L"), nss("v6"));
 +    expected.put(nss("B", "M"), nss("v6"));
 +    expected.put(nss("Z"), nss());
 +    expected.put(nss("A", "B"), nss("v4"));
 +    expected.put(nss("A", "B", "L"), nss("v7", "v8", "v13"));
 +    expected.put(nss("A", "B", "M"), nss("v7", "v13"));
 +    expected.put(nss("A", "B", "FOO"), nss("v13"));
 +    expected.put(nss("FOO"), nss("v11"));
 +    expected.put(nss("A", "FOO"), nss("v9"));
 +    expected.put(nss("A", "FOO", "L"), nss("v10", "v12"));
 +    expected.put(nss("A", "FOO", "M"), nss("v10", "v12"));
 +    expected.put(nss("B", "FOO", "L"), nss("v12"));
 +    expected.put(nss("B", "FOO", "M"), nss("v12"));
 +
 +    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
 +    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "L", "M", "Z"), expected);
 +    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "Z"), expected);
 +    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("Z"), expected);
 +    queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss(), expected);
 +  }
 +
 +  private void queryData(Connector c, String tableName, Set<String> allAuths, Set<String> userAuths, Map<Set<String>,Set<String>> expected) throws Exception {
 +
 +    c.securityOperations().changeUserAuthorizations("root", new Authorizations(nbas(userAuths)));
 +
 +    ArrayList<Set<String>> combos = new ArrayList<Set<String>>();
 +    uniqueCombos(combos, nss(), allAuths);
 +
 +    for (Set<String> set1 : combos) {
 +      Set<String> e = new TreeSet<String>();
 +      for (Set<String> set2 : combos) {
 +
 +        set2 = new HashSet<String>(set2);
 +        set2.retainAll(userAuths);
 +
 +        if (set1.containsAll(set2) && expected.containsKey(set2)) {
 +          e.addAll(expected.get(set2));
 +        }
 +      }
 +
 +      set1.retainAll(userAuths);
 +      verify(c, tableName, set1, e);
 +    }
 +
 +  }
 +
 +  private void queryDefaultData(Connector c, String tableName) throws Exception {
 +    Scanner scanner;
 +
 +    // should return no records
 +    c.securityOperations().changeUserAuthorizations("root", new Authorizations("BASE", "DEFLABEL"));
 +    scanner = getConnector().createScanner(tableName, new Authorizations());
 +    verifyDefault(scanner, 0);
 +
 +    // should return one record
 +    scanner = getConnector().createScanner(tableName, new Authorizations("BASE"));
 +    verifyDefault(scanner, 1);
 +
 +    // should return all three records
 +    scanner = getConnector().createScanner(tableName, new Authorizations("BASE", "DEFLABEL"));
 +    verifyDefault(scanner, 3);
 +  }
 +
 +  private void verifyDefault(Scanner scanner, int expectedCount) throws Exception {
 +    for (@SuppressWarnings("unused")
 +    Entry<Key,Value> entry : scanner)
 +      --expectedCount;
 +    if (expectedCount != 0)
 +      throw new Exception(" expected count !=0 " + expectedCount);
 +  }
 +
 +  private void verify(Connector c, String tableName, Set<String> auths, Set<String> expectedValues) throws Exception {
 +    ByteArraySet bas = nbas(auths);
 +
 +    try {
 +      verify(c, tableName, bas, expectedValues.toArray(new String[0]));
 +    } catch (Exception e) {
 +      throw new Exception("Verification failed auths=" + auths + " exp=" + expectedValues, e);
 +    }
 +  }
 +
 +  private ByteArraySet nbas(Set<String> auths) {
 +    ByteArraySet bas = new ByteArraySet();
 +    for (String auth : auths) {
-       bas.add(auth.getBytes(Constants.UTF8));
++      bas.add(auth.getBytes(UTF_8));
 +    }
 +    return bas;
 +  }
 +
 +  private void verify(Connector c, String tableName, ByteArraySet nss, String... expected) throws Exception {
 +    Scanner scanner = c.createScanner(tableName, new Authorizations(nss));
 +    verify(scanner.iterator(), expected);
 +
 +    BatchScanner bs = getConnector().createBatchScanner(tableName, new Authorizations(nss), 3);
 +    bs.setRanges(Collections.singleton(new Range()));
 +    verify(bs.iterator(), expected);
 +    bs.close();
 +  }
 +
 +  private void verify(Iterator<Entry<Key,Value>> iter, String... expected) throws Exception {
 +    HashSet<String> valuesSeen = new HashSet<String>();
 +
 +    while (iter.hasNext()) {
 +      Entry<Key,Value> entry = iter.next();
 +      if (valuesSeen.contains(entry.getValue().toString())) {
 +        throw new Exception("Value seen twice");
 +      }
 +      valuesSeen.add(entry.getValue().toString());
 +    }
 +
 +    for (String ev : expected) {
 +      if (!valuesSeen.remove(ev)) {
 +        throw new Exception("Did not see expected value " + ev);
 +      }
 +    }
 +
 +    if (valuesSeen.size() != 0) {
 +      throw new Exception("Saw more values than expected " + valuesSeen);
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9b20a9d4/trace/src/main/java/org/apache/accumulo/trace/instrument/receivers/ZooSpanClient.java
----------------------------------------------------------------------


Mime
View raw message