accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [10/24] accumulo git commit: Merge branch '1.7' into 1.8
Date Tue, 25 Jul 2017 23:02:58 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
index 27d84de,0000000..39b0342
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
@@@ -1,337 -1,0 +1,336 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +
 +import java.util.HashMap;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.MutationsRejectedException;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.ConstraintViolationSummary;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint;
 +import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
 +import org.apache.accumulo.harness.AccumuloClusterHarness;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
- import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
- 
 +public class ConstraintIT extends AccumuloClusterHarness {
 +  private static final Logger log = LoggerFactory.getLogger(ConstraintIT.class);
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 30;
 +  }
 +
 +  @Test
 +  public void run() throws Exception {
 +    String[] tableNames = getUniqueNames(3);
 +    Connector c = getConnector();
 +    for (String table : tableNames) {
 +      c.tableOperations().create(table);
 +      c.tableOperations().addConstraint(table, NumericValueConstraint.class.getName());
 +      c.tableOperations().addConstraint(table, AlphaNumKeyConstraint.class.getName());
 +    }
 +
 +    // A static sleep to just let ZK do its thing
 +    Thread.sleep(10 * 1000);
 +
 +    // Then check that the client has at least gotten the updates
 +    for (String table : tableNames) {
 +      log.debug("Checking constraints on {}", table);
 +      Map<String,Integer> constraints = c.tableOperations().listConstraints(table);
 +      while (!constraints.containsKey(NumericValueConstraint.class.getName()) || !constraints.containsKey(AlphaNumKeyConstraint.class.getName())) {
 +        log.debug("Failed to verify constraints. Sleeping and retrying");
 +        Thread.sleep(2000);
 +        constraints = c.tableOperations().listConstraints(table);
 +      }
 +      log.debug("Verified all constraints on {}", table);
 +    }
 +
 +    log.debug("Verified constraints on all tables. Running tests");
 +
 +    test1(tableNames[0]);
 +
 +    test2(tableNames[1], false);
 +    test2(tableNames[2], true);
 +  }
 +
 +  private void test1(String tableName) throws Exception {
 +    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
 +
 +    Mutation mut1 = new Mutation(new Text("r1"));
 +    mut1.put(new Text("cf1"), new Text("cq1"), new Value("123".getBytes(UTF_8)));
 +
 +    bw.addMutation(mut1);
 +
 +    // should not throw any exceptions
 +    bw.close();
 +
 +    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
 +
 +    // create a mutation with a non numeric value
 +    Mutation mut2 = new Mutation(new Text("r1"));
 +    mut2.put(new Text("cf1"), new Text("cq1"), new Value("123a".getBytes(UTF_8)));
 +
 +    bw.addMutation(mut2);
 +
 +    boolean sawMRE = false;
 +
 +    try {
 +      bw.close();
 +      // should not get here
 +      throw new Exception("Test failed, constraint did not catch bad mutation");
 +    } catch (MutationsRejectedException mre) {
 +      sawMRE = true;
 +
 +      // verify constraint violation summary
 +      List<ConstraintViolationSummary> cvsl = mre.getConstraintViolationSummaries();
 +
 +      if (cvsl.size() != 1) {
 +        throw new Exception("Unexpected constraints");
 +      }
 +
 +      for (ConstraintViolationSummary cvs : cvsl) {
 +        if (!cvs.constrainClass.equals(NumericValueConstraint.class.getName())) {
 +          throw new Exception("Unexpected constraint class " + cvs.constrainClass);
 +        }
 +
 +        if (cvs.numberOfViolatingMutations != 1) {
 +          throw new Exception("Unexpected # violating mutations " + cvs.numberOfViolatingMutations);
 +        }
 +      }
 +    }
 +
 +    if (!sawMRE) {
 +      throw new Exception("Did not see MutationsRejectedException");
 +    }
 +
 +    // verify mutation did not go through
 +    Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
 +    scanner.setRange(new Range(new Text("r1")));
 +
 +    Iterator<Entry<Key,Value>> iter = scanner.iterator();
 +    Entry<Key,Value> entry = iter.next();
 +
 +    if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
 +        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123".getBytes(UTF_8)))) {
 +      throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    if (iter.hasNext()) {
 +      entry = iter.next();
 +      throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    // remove the numeric value constraint
 +    getConnector().tableOperations().removeConstraint(tableName, 2);
 +    sleepUninterruptibly(1, TimeUnit.SECONDS);
 +
 +    // now should be able to add a non numeric value
 +    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
 +    bw.addMutation(mut2);
 +    bw.close();
 +
 +    // verify mutation went through
 +    iter = scanner.iterator();
 +    entry = iter.next();
 +
 +    if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
 +        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123a".getBytes(UTF_8)))) {
 +      throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    if (iter.hasNext()) {
 +      entry = iter.next();
 +      throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    // add a constraint that references a non-existant class
 +    getConnector().tableOperations().setProperty(tableName, Property.TABLE_CONSTRAINT_PREFIX + "1", "com.foobar.nonExistantClass");
 +    sleepUninterruptibly(1, TimeUnit.SECONDS);
 +
 +    // add a mutation
 +    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
 +
 +    Mutation mut3 = new Mutation(new Text("r1"));
 +    mut3.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(UTF_8)));
 +
 +    bw.addMutation(mut3);
 +
 +    sawMRE = false;
 +
 +    try {
 +      bw.close();
 +      // should not get here
 +      throw new Exception("Test failed, mutation went through when table had bad constraints");
 +    } catch (MutationsRejectedException mre) {
 +      sawMRE = true;
 +    }
 +
 +    if (!sawMRE) {
 +      throw new Exception("Did not see MutationsRejectedException");
 +    }
 +
 +    // verify the mutation did not go through
 +    iter = scanner.iterator();
 +    entry = iter.next();
 +
 +    if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
 +        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123a".getBytes(UTF_8)))) {
 +      throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    if (iter.hasNext()) {
 +      entry = iter.next();
 +      throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    // remove the bad constraint
 +    getConnector().tableOperations().removeConstraint(tableName, 1);
 +    sleepUninterruptibly(1, TimeUnit.SECONDS);
 +
 +    // try the mutation again
 +    bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
 +    bw.addMutation(mut3);
 +    bw.close();
 +
 +    // verify it went through
 +    iter = scanner.iterator();
 +    entry = iter.next();
 +
 +    if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
 +        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("foo".getBytes(UTF_8)))) {
 +      throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    if (iter.hasNext()) {
 +      entry = iter.next();
 +      throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +  }
 +
 +  private Mutation newMut(String row, String cf, String cq, String val) {
 +    Mutation mut1 = new Mutation(new Text(row));
 +    mut1.put(new Text(cf), new Text(cq), new Value(val.getBytes(UTF_8)));
 +    return mut1;
 +  }
 +
 +  private void test2(String table, boolean doFlush) throws Exception {
 +    // test sending multiple mutations with multiple constrain violations... all of the non violating mutations
 +    // should go through
 +    int numericErrors = 2;
 +
 +    BatchWriter bw = getConnector().createBatchWriter(table, new BatchWriterConfig());
 +    bw.addMutation(newMut("r1", "cf1", "cq1", "123"));
 +    bw.addMutation(newMut("r1", "cf1", "cq2", "I'm a bad value"));
 +    if (doFlush) {
 +      try {
 +        bw.flush();
 +        throw new Exception("Didn't find a bad mutation");
 +      } catch (MutationsRejectedException mre) {
 +        // ignored
 +        try {
 +          bw.close();
 +        } catch (MutationsRejectedException ex) {
 +          // ignored
 +        }
 +        bw = getConnector().createBatchWriter(table, new BatchWriterConfig());
 +        numericErrors = 1;
 +      }
 +    }
 +    bw.addMutation(newMut("r1", "cf1", "cq3", "I'm a naughty value"));
 +    bw.addMutation(newMut("@bad row@", "cf1", "cq2", "456"));
 +    bw.addMutation(newMut("r1", "cf1", "cq4", "789"));
 +
 +    boolean sawMRE = false;
 +
 +    try {
 +      bw.close();
 +      // should not get here
 +      throw new Exception("Test failed, constraint did not catch bad mutation");
 +    } catch (MutationsRejectedException mre) {
 +      System.out.println(mre);
 +
 +      sawMRE = true;
 +
 +      // verify constraint violation summary
 +      List<ConstraintViolationSummary> cvsl = mre.getConstraintViolationSummaries();
 +
 +      if (cvsl.size() != 2) {
 +        throw new Exception("Unexpected constraints");
 +      }
 +
 +      HashMap<String,Integer> expected = new HashMap<>();
 +
 +      expected.put("org.apache.accumulo.examples.simple.constraints.NumericValueConstraint", numericErrors);
 +      expected.put("org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint", 1);
 +
 +      for (ConstraintViolationSummary cvs : cvsl) {
 +        if (expected.get(cvs.constrainClass) != cvs.numberOfViolatingMutations) {
 +          throw new Exception("Unexpected " + cvs.constrainClass + " " + cvs.numberOfViolatingMutations);
 +        }
 +      }
 +    }
 +
 +    if (!sawMRE) {
 +      throw new Exception("Did not see MutationsRejectedException");
 +    }
 +
 +    Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
 +
 +    Iterator<Entry<Key,Value>> iter = scanner.iterator();
 +
 +    Entry<Key,Value> entry = iter.next();
 +
 +    if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
 +        || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123".getBytes(UTF_8)))) {
 +      throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    entry = iter.next();
 +
 +    if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
 +        || !entry.getKey().getColumnQualifier().equals(new Text("cq4")) || !entry.getValue().equals(new Value("789".getBytes(UTF_8)))) {
 +      throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +    if (iter.hasNext()) {
 +      entry = iter.next();
 +      throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
 +    }
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
index 4577813,0000000..c82f721
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
@@@ -1,118 -1,0 +1,118 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +import static org.junit.Assert.assertEquals;
 +
 +import java.util.Map;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.harness.AccumuloClusterHarness;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.io.Text;
 +import org.junit.After;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +import com.google.common.collect.Iterables;
 +import com.google.common.collect.Iterators;
- import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +
 +public class DeleteEverythingIT extends AccumuloClusterHarness {
 +
 +  @Override
 +  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    Map<String,String> siteConfig = cfg.getSiteConfig();
 +    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1s");
 +    cfg.setSiteConfig(siteConfig);
 +  }
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 60;
 +  }
 +
 +  private String majcDelay;
 +
 +  @Before
 +  public void updateMajcDelay() throws Exception {
 +    Connector c = getConnector();
 +    majcDelay = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
 +    c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1s");
 +    if (getClusterType() == ClusterType.STANDALONE) {
 +      // Gotta wait for the cluster to get out of the default sleep value
 +      Thread.sleep(AccumuloConfiguration.getTimeInMillis(majcDelay));
 +    }
 +  }
 +
 +  @After
 +  public void resetMajcDelay() throws Exception {
 +    Connector c = getConnector();
 +    c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
 +  }
 +
 +  @Test
 +  public void run() throws Exception {
 +    Connector c = getConnector();
 +    String tableName = getUniqueNames(1)[0];
 +    c.tableOperations().create(tableName);
 +    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
 +    Mutation m = new Mutation(new Text("foo"));
 +    m.put(new Text("bar"), new Text("1910"), new Value("5".getBytes(UTF_8)));
 +    bw.addMutation(m);
 +    bw.flush();
 +
 +    getConnector().tableOperations().flush(tableName, null, null, true);
 +
 +    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
 +
 +    m = new Mutation(new Text("foo"));
 +    m.putDelete(new Text("bar"), new Text("1910"));
 +    bw.addMutation(m);
 +    bw.flush();
 +
 +    Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
 +    scanner.setRange(new Range());
 +    int count = Iterators.size(scanner.iterator());
 +    assertEquals("count == " + count, 0, count);
 +    getConnector().tableOperations().flush(tableName, null, null, true);
 +
 +    getConnector().tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
 +    sleepUninterruptibly(4, TimeUnit.SECONDS);
 +
 +    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
 +
 +    bw.close();
 +
 +    count = Iterables.size(scanner);
 +
 +    if (count != 0)
 +      throw new Exception("count == " + count);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
index 2a6653d,0000000..ed48d10
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
@@@ -1,149 -1,0 +1,148 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.List;
 +import java.util.Map.Entry;
 +import java.util.SortedSet;
 +import java.util.TreeSet;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.harness.AccumuloClusterHarness;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
- import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
- 
 +// attempt to reproduce ACCUMULO-315
 +public class DeleteRowsSplitIT extends AccumuloClusterHarness {
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 4 * 60;
 +  }
 +
 +  private static final Logger log = LoggerFactory.getLogger(DeleteRowsSplitIT.class);
 +
 +  private static final String LETTERS = "abcdefghijklmnopqrstuvwxyz";
 +  static final SortedSet<Text> SPLITS = new TreeSet<>();
 +  static final List<String> ROWS = new ArrayList<>();
 +  static {
 +    for (byte b : LETTERS.getBytes(UTF_8)) {
 +      SPLITS.add(new Text(new byte[] {b}));
 +      ROWS.add(new String(new byte[] {b}, UTF_8));
 +    }
 +  }
 +
 +  @Test
 +  public void run() throws Exception {
 +    // Delete ranges of rows, and verify the are removed
 +    // Do this while adding many splits
 +    final String tableName = getUniqueNames(1)[0];
 +    final Connector conn = getConnector();
 +
 +    // Eliminate whole tablets
 +    for (int test = 0; test < 10; test++) {
 +      // create a table
 +      log.info("Test " + test);
 +      conn.tableOperations().create(tableName);
 +
 +      // put some data in it
 +      fillTable(conn, tableName);
 +
 +      // generate a random delete range
 +      final Text start = new Text();
 +      final Text end = new Text();
 +      generateRandomRange(start, end);
 +
 +      // initiate the delete range
 +      final boolean fail[] = {false};
 +      Thread t = new Thread() {
 +        @Override
 +        public void run() {
 +          try {
 +            // split the table
 +            final SortedSet<Text> afterEnd = SPLITS.tailSet(new Text(end.toString() + "\0"));
 +            conn.tableOperations().addSplits(tableName, afterEnd);
 +          } catch (Exception ex) {
 +            log.error("Exception", ex);
 +            synchronized (fail) {
 +              fail[0] = true;
 +            }
 +          }
 +        }
 +      };
 +      t.start();
 +
 +      sleepUninterruptibly(test * 2, TimeUnit.MILLISECONDS);
 +
 +      conn.tableOperations().deleteRows(tableName, start, end);
 +
 +      t.join();
 +      synchronized (fail) {
 +        assertTrue(!fail[0]);
 +      }
 +
 +      // scan the table
 +      Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
 +      for (Entry<Key,Value> entry : scanner) {
 +        Text row = entry.getKey().getRow();
 +        assertTrue(row.compareTo(start) <= 0 || row.compareTo(end) > 0);
 +      }
 +
 +      // delete the table
 +      conn.tableOperations().delete(tableName);
 +    }
 +  }
 +
 +  private void generateRandomRange(Text start, Text end) {
 +    List<String> bunch = new ArrayList<>(ROWS);
 +    Collections.shuffle(bunch);
 +    if (bunch.get(0).compareTo((bunch.get(1))) < 0) {
 +      start.set(bunch.get(0));
 +      end.set(bunch.get(1));
 +    } else {
 +      start.set(bunch.get(1));
 +      end.set(bunch.get(0));
 +    }
 +
 +  }
 +
 +  private void fillTable(Connector conn, String table) throws Exception {
 +    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
 +    for (String row : ROWS) {
 +      Mutation m = new Mutation(row);
 +      m.put("cf", "cq", "value");
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
index 62bac85,0000000..b535534
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
@@@ -1,128 -1,0 +1,127 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +import static org.junit.Assert.fail;
 +
 +import java.util.Map;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.cli.BatchWriterOpts;
 +import org.apache.accumulo.core.client.ClientConfiguration;
 +import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.impl.ClientContext;
 +import org.apache.accumulo.core.client.impl.Credentials;
 +import org.apache.accumulo.core.client.impl.MasterClient;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.master.thrift.MasterClientService;
 +import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
 +import org.apache.accumulo.core.master.thrift.TableInfo;
 +import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 +import org.apache.accumulo.core.trace.Tracer;
 +import org.apache.accumulo.harness.AccumuloClusterHarness;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.accumulo.test.TestIngest;
 +import org.apache.hadoop.conf.Configuration;
 +import org.junit.After;
 +import org.junit.Before;
 +import org.junit.Test;
 +
- import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
- 
 +public class DynamicThreadPoolsIT extends AccumuloClusterHarness {
 +
 +  @Override
 +  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    cfg.setNumTservers(1);
 +    Map<String,String> siteConfig = cfg.getSiteConfig();
 +    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
 +    cfg.setSiteConfig(siteConfig);
 +  }
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 4 * 60;
 +  }
 +
 +  private String majcDelay;
 +
 +  @Before
 +  public void updateMajcDelay() throws Exception {
 +    Connector c = getConnector();
 +    majcDelay = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
 +    c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
 +    if (getClusterType() == ClusterType.STANDALONE) {
 +      Thread.sleep(AccumuloConfiguration.getTimeInMillis(majcDelay));
 +    }
 +  }
 +
 +  @After
 +  public void resetMajcDelay() throws Exception {
 +    Connector c = getConnector();
 +    c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
 +  }
 +
 +  @Test
 +  public void test() throws Exception {
 +    final String[] tables = getUniqueNames(15);
 +    String firstTable = tables[0];
 +    Connector c = getConnector();
 +    c.instanceOperations().setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "5");
 +    TestIngest.Opts opts = new TestIngest.Opts();
 +    opts.rows = 500 * 1000;
 +    opts.createTable = true;
 +    opts.setTableName(firstTable);
 +    ClientConfiguration clientConf = cluster.getClientConfig();
 +    if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
 +      opts.updateKerberosCredentials(clientConf);
 +    } else {
 +      opts.setPrincipal(getAdminPrincipal());
 +    }
 +    TestIngest.ingest(c, opts, new BatchWriterOpts());
 +    c.tableOperations().flush(firstTable, null, null, true);
 +    for (int i = 1; i < tables.length; i++)
 +      c.tableOperations().clone(firstTable, tables[i], true, null, null);
 +    sleepUninterruptibly(11, TimeUnit.SECONDS); // time between checks of the thread pool sizes
 +    Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
 +    for (int i = 1; i < tables.length; i++)
 +      c.tableOperations().compact(tables[i], null, null, true, false);
 +    for (int i = 0; i < 30; i++) {
 +      int count = 0;
 +      MasterClientService.Iface client = null;
 +      MasterMonitorInfo stats = null;
 +      try {
 +        client = MasterClient.getConnectionWithRetry(new ClientContext(c.getInstance(), creds, clientConf));
 +        stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(c.getInstance()));
 +      } finally {
 +        if (client != null)
 +          MasterClient.close(client);
 +      }
 +      for (TabletServerStatus server : stats.tServerInfo) {
 +        for (TableInfo table : server.tableMap.values()) {
 +          count += table.majors.running;
 +        }
 +      }
 +      System.out.println("count " + count);
 +      if (count > 3)
 +        return;
 +      sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
 +    }
 +    fail("Could not observe higher number of threads after changing the config");
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
index 12607a5,0000000..83f38ae
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
@@@ -1,309 -1,0 +1,309 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +import static org.junit.Assert.assertNull;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.io.IOException;
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map.Entry;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.cli.BatchWriterOpts;
 +import org.apache.accumulo.core.cli.ScannerOpts;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.ZooKeeperInstance;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.security.TablePermission;
 +import org.apache.accumulo.core.util.ServerServices;
 +import org.apache.accumulo.core.util.ServerServices.Service;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.zookeeper.ZooLock;
 +import org.apache.accumulo.gc.SimpleGarbageCollector;
 +import org.apache.accumulo.minicluster.MemoryUnit;
 +import org.apache.accumulo.minicluster.ServerType;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.accumulo.minicluster.impl.ProcessNotFoundException;
 +import org.apache.accumulo.minicluster.impl.ProcessReference;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 +import org.apache.accumulo.test.TestIngest;
 +import org.apache.accumulo.test.VerifyIngest;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.fs.RawLocalFileSystem;
 +import org.apache.hadoop.io.Text;
 +import org.apache.zookeeper.KeeperException;
 +import org.apache.zookeeper.KeeperException.NoNodeException;
 +import org.junit.Assert;
 +import org.junit.Test;
 +
 +import com.google.common.collect.Iterators;
- import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +
 +public class GarbageCollectorIT extends ConfigurableMacBase {
 +  private static final String OUR_SECRET = "itsreallysecret";
 +
 +  @Override
 +  public int defaultTimeoutSeconds() {
 +    return 5 * 60;
 +  }
 +
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s");
 +    cfg.setProperty(Property.INSTANCE_SECRET, OUR_SECRET);
 +    cfg.setProperty(Property.GC_CYCLE_START, "1");
 +    cfg.setProperty(Property.GC_CYCLE_DELAY, "1");
 +    cfg.setProperty(Property.GC_PORT, "0");
 +    cfg.setProperty(Property.TSERV_MAXMEM, "5K");
 +    cfg.setProperty(Property.TSERV_MAJC_DELAY, "1");
 +
 +    // use raw local file system so walogs sync and flush will work
 +    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
 +  }
 +
 +  private void killMacGc() throws ProcessNotFoundException, InterruptedException, KeeperException {
 +    // kill gc started by MAC
 +    getCluster().killProcess(ServerType.GARBAGE_COLLECTOR, getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR).iterator().next());
 +    // delete lock in zookeeper if there, this will allow next GC to start quickly
 +    String path = ZooUtil.getRoot(new ZooKeeperInstance(getCluster().getClientConfig())) + Constants.ZGC_LOCK;
 +    ZooReaderWriter zk = new ZooReaderWriter(cluster.getZooKeepers(), 30000, OUR_SECRET);
 +    try {
 +      ZooLock.deleteLock(zk, path);
 +    } catch (IllegalStateException e) {
 +
 +    }
 +
 +    assertNull(getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR));
 +  }
 +
 +  @Test
 +  public void gcTest() throws Exception {
 +    killMacGc();
 +    Connector c = getConnector();
 +    c.tableOperations().create("test_ingest");
 +    c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
 +    TestIngest.Opts opts = new TestIngest.Opts();
 +    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
 +    vopts.rows = opts.rows = 10000;
 +    vopts.cols = opts.cols = 1;
 +    opts.setPrincipal("root");
 +    vopts.setPrincipal("root");
 +    TestIngest.ingest(c, cluster.getFileSystem(), opts, new BatchWriterOpts());
 +    c.tableOperations().compact("test_ingest", null, null, true, true);
 +    int before = countFiles();
 +    while (true) {
 +      sleepUninterruptibly(1, TimeUnit.SECONDS);
 +      int more = countFiles();
 +      if (more <= before)
 +        break;
 +      before = more;
 +    }
 +
 +    // restart GC
 +    getCluster().start();
 +    sleepUninterruptibly(15, TimeUnit.SECONDS);
 +    int after = countFiles();
 +    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
 +    assertTrue(after < before);
 +  }
 +
 +  @Test
 +  public void gcLotsOfCandidatesIT() throws Exception {
 +    killMacGc();
 +
 +    log.info("Filling metadata table with bogus delete flags");
 +    Connector c = getConnector();
 +    addEntries(c, new BatchWriterOpts());
 +    cluster.getConfig().setDefaultMemory(10, MemoryUnit.MEGABYTE);
 +    Process gc = cluster.exec(SimpleGarbageCollector.class);
 +    sleepUninterruptibly(20, TimeUnit.SECONDS);
 +    String output = "";
 +    while (!output.contains("delete candidates has exceeded")) {
 +      byte buffer[] = new byte[10 * 1024];
 +      try {
 +        int n = gc.getInputStream().read(buffer);
 +        output = new String(buffer, 0, n, UTF_8);
 +      } catch (IOException ex) {
 +        break;
 +      }
 +    }
 +    gc.destroy();
 +    assertTrue(output.contains("delete candidates has exceeded"));
 +  }
 +
 +  @Test
 +  public void dontGCRootLog() throws Exception {
 +    killMacGc();
 +    // dirty metadata
 +    Connector c = getConnector();
 +    String table = getUniqueNames(1)[0];
 +    c.tableOperations().create(table);
 +    // let gc run for a bit
 +    cluster.start();
 +    sleepUninterruptibly(20, TimeUnit.SECONDS);
 +    killMacGc();
 +    // kill tservers
 +    for (ProcessReference ref : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
 +      cluster.killProcess(ServerType.TABLET_SERVER, ref);
 +    }
 +    // run recovery
 +    cluster.start();
 +    // did it recover?
 +    Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
 +    Iterators.size(scanner.iterator());
 +  }
 +
 +  private Mutation createDelMutation(String path, String cf, String cq, String val) {
 +    Text row = new Text(MetadataSchema.DeletesSection.getRowPrefix() + path);
 +    Mutation delFlag = new Mutation(row);
 +    delFlag.put(cf, cq, val);
 +    return delFlag;
 +  }
 +
 +  @Test
 +  public void testInvalidDelete() throws Exception {
 +    killMacGc();
 +
 +    String table = getUniqueNames(1)[0];
 +    getConnector().tableOperations().create(table);
 +
 +    BatchWriter bw2 = getConnector().createBatchWriter(table, new BatchWriterConfig());
 +    Mutation m1 = new Mutation("r1");
 +    m1.put("cf1", "cq1", "v1");
 +    bw2.addMutation(m1);
 +    bw2.close();
 +
 +    getConnector().tableOperations().flush(table, null, null, true);
 +
 +    // ensure an invalid delete entry does not cause GC to go berserk ACCUMULO-2520
 +    getConnector().securityOperations().grantTablePermission(getConnector().whoami(), MetadataTable.NAME, TablePermission.WRITE);
 +    BatchWriter bw3 = getConnector().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
 +
 +    bw3.addMutation(createDelMutation("", "", "", ""));
 +    bw3.addMutation(createDelMutation("", "testDel", "test", "valueTest"));
 +    bw3.addMutation(createDelMutation("/", "", "", ""));
 +    bw3.close();
 +
 +    Process gc = cluster.exec(SimpleGarbageCollector.class);
 +    try {
 +      String output = "";
 +      while (!output.contains("Ignoring invalid deletion candidate")) {
 +        sleepUninterruptibly(250, TimeUnit.MILLISECONDS);
 +        try {
 +          output = FunctionalTestUtils.readAll(cluster, SimpleGarbageCollector.class, gc);
 +        } catch (IOException ioe) {
 +          log.error("Could not read all from cluster.", ioe);
 +        }
 +      }
 +    } finally {
 +      gc.destroy();
 +    }
 +
 +    Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
 +    Iterator<Entry<Key,Value>> iter = scanner.iterator();
 +    assertTrue(iter.hasNext());
 +    Entry<Key,Value> entry = iter.next();
 +    Assert.assertEquals("r1", entry.getKey().getRow().toString());
 +    Assert.assertEquals("cf1", entry.getKey().getColumnFamily().toString());
 +    Assert.assertEquals("cq1", entry.getKey().getColumnQualifier().toString());
 +    Assert.assertEquals("v1", entry.getValue().toString());
 +    Assert.assertFalse(iter.hasNext());
 +  }
 +
 +  @Test
 +  public void testProperPortAdvertisement() throws Exception {
 +
 +    Connector conn = getConnector();
 +    Instance instance = conn.getInstance();
 +
 +    ZooReaderWriter zk = new ZooReaderWriter(cluster.getZooKeepers(), 30000, OUR_SECRET);
 +    String path = ZooUtil.getRoot(instance) + Constants.ZGC_LOCK;
 +    for (int i = 0; i < 5; i++) {
 +      List<String> locks;
 +      try {
 +        locks = zk.getChildren(path, null);
 +      } catch (NoNodeException e) {
 +        Thread.sleep(5000);
 +        continue;
 +      }
 +
 +      if (locks != null && locks.size() > 0) {
 +        Collections.sort(locks);
 +
 +        String lockPath = path + "/" + locks.get(0);
 +
 +        String gcLoc = new String(zk.getData(lockPath, null));
 +
 +        Assert.assertTrue("Found unexpected data in zookeeper for GC location: " + gcLoc, gcLoc.startsWith(Service.GC_CLIENT.name()));
 +        int loc = gcLoc.indexOf(ServerServices.SEPARATOR_CHAR);
 +        Assert.assertNotEquals("Could not find split point of GC location for: " + gcLoc, -1, loc);
 +        String addr = gcLoc.substring(loc + 1);
 +
 +        int addrSplit = addr.indexOf(':');
 +        Assert.assertNotEquals("Could not find split of GC host:port for: " + addr, -1, addrSplit);
 +
 +        String host = addr.substring(0, addrSplit), port = addr.substring(addrSplit + 1);
 +        // We shouldn't have the "bindall" address in zk
 +        Assert.assertNotEquals("0.0.0.0", host);
 +        // Nor should we have the "random port" in zk
 +        Assert.assertNotEquals(0, Integer.parseInt(port));
 +        return;
 +      }
 +
 +      Thread.sleep(5000);
 +    }
 +
 +    Assert.fail("Could not find advertised GC address");
 +  }
 +
 +  private int countFiles() throws Exception {
 +    Path path = new Path(cluster.getConfig().getDir() + "/accumulo/tables/1/*/*.rf");
 +    return Iterators.size(Arrays.asList(cluster.getFileSystem().globStatus(path)).iterator());
 +  }
 +
 +  public static void addEntries(Connector conn, BatchWriterOpts bwOpts) throws Exception {
 +    conn.securityOperations().grantTablePermission(conn.whoami(), MetadataTable.NAME, TablePermission.WRITE);
 +    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, bwOpts.getBatchWriterConfig());
 +
 +    for (int i = 0; i < 100000; ++i) {
 +      final Text emptyText = new Text("");
 +      Text row = new Text(String.format("%s/%020d/%s", MetadataSchema.DeletesSection.getRowPrefix(), i,
 +          "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj"));
 +      Mutation delFlag = new Mutation(row);
 +      delFlag.put(emptyText, emptyText, new Value(new byte[] {}));
 +      bw.addMutation(delFlag);
 +    }
 +    bw.close();
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
index 76a8c5d,0000000..f1fbac3
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
@@@ -1,220 -1,0 +1,219 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertTrue;
 +import static org.junit.Assert.fail;
 +
 +import java.io.BufferedReader;
 +import java.io.File;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.io.InputStreamReader;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Map;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.cli.ScannerOpts;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.util.Daemon;
 +import org.apache.accumulo.minicluster.ServerType;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.accumulo.start.Main;
 +import org.apache.accumulo.test.TestIngest;
 +import org.apache.accumulo.test.VerifyIngest;
 +import org.apache.accumulo.tserver.TabletServer;
 +import org.apache.hadoop.conf.Configuration;
 +import org.junit.Test;
 +
- import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
- 
 +public class HalfDeadTServerIT extends ConfigurableMacBase {
 +
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    cfg.setNumTservers(1);
 +    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s");
 +    cfg.setProperty(Property.GENERAL_RPC_TIMEOUT, "5s");
 +    cfg.useMiniDFS(true);
 +  }
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 4 * 60;
 +  }
 +
 +  class DumpOutput extends Daemon {
 +
 +    private final BufferedReader rdr;
 +    private final StringBuilder output;
 +
 +    DumpOutput(InputStream is) {
 +      rdr = new BufferedReader(new InputStreamReader(is));
 +      output = new StringBuilder();
 +    }
 +
 +    @Override
 +    public void run() {
 +      try {
 +        while (true) {
 +          String line = rdr.readLine();
 +          if (line == null)
 +            break;
 +          System.out.println(line);
 +          output.append(line);
 +          output.append("\n");
 +        }
 +      } catch (IOException ex) {
 +        log.error("IOException", ex);
 +      }
 +    }
 +
 +    @Override
 +    public String toString() {
 +      return output.toString();
 +    }
 +  }
 +
 +  @Test
 +  public void testRecover() throws Exception {
 +    test(10);
 +  }
 +
 +  @Test
 +  public void testTimeout() throws Exception {
 +    String results = test(20, true);
 +    if (results != null) {
 +      if (!results.contains("Session expired")) {
 +        log.info("Failed to find 'Session expired' in output, but TServer did die which is expected");
 +      }
 +    }
 +  }
 +
 +  public String test(int seconds) throws Exception {
 +    return test(seconds, false);
 +  }
 +
 +  public String test(int seconds, boolean expectTserverDied) throws Exception {
 +    if (!makeDiskFailureLibrary())
 +      return null;
 +    Connector c = getConnector();
 +    assertEquals(1, c.instanceOperations().getTabletServers().size());
 +
 +    // create our own tablet server with the special test library
 +    String javaHome = System.getProperty("java.home");
 +    String javaBin = javaHome + File.separator + "bin" + File.separator + "java";
 +    String classpath = System.getProperty("java.class.path");
 +    classpath = new File(cluster.getConfig().getDir(), "conf") + File.pathSeparator + classpath;
 +    String className = TabletServer.class.getName();
 +    ArrayList<String> argList = new ArrayList<>();
 +    argList.addAll(Arrays.asList(javaBin, "-cp", classpath));
 +    argList.addAll(Arrays.asList(Main.class.getName(), className));
 +    ProcessBuilder builder = new ProcessBuilder(argList);
 +    Map<String,String> env = builder.environment();
 +    env.put("ACCUMULO_HOME", cluster.getConfig().getDir().getAbsolutePath());
 +    env.put("ACCUMULO_LOG_DIR", cluster.getConfig().getLogDir().getAbsolutePath());
 +    String trickFilename = cluster.getConfig().getLogDir().getAbsolutePath() + "/TRICK_FILE";
 +    env.put("TRICK_FILE", trickFilename);
 +    String libPath = System.getProperty("user.dir") + "/target/fake_disk_failure.so";
 +    env.put("LD_PRELOAD", libPath);
 +    env.put("DYLD_INSERT_LIBRARIES", libPath);
 +    env.put("DYLD_FORCE_FLAT_NAMESPACE", "true");
 +    Process ingest = null;
 +    Process tserver = builder.start();
 +    DumpOutput t = new DumpOutput(tserver.getInputStream());
 +    try {
 +      t.start();
 +      sleepUninterruptibly(1, TimeUnit.SECONDS);
 +      // don't need the regular tablet server
 +      cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
 +      sleepUninterruptibly(1, TimeUnit.SECONDS);
 +      c.tableOperations().create("test_ingest");
 +      assertEquals(1, c.instanceOperations().getTabletServers().size());
 +      int rows = 100 * 1000;
 +      ingest = cluster.exec(TestIngest.class, "-u", "root", "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", ROOT_PASSWORD, "--rows", rows
 +          + "");
 +      sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
 +
 +      // block I/O with some side-channel trickiness
 +      File trickFile = new File(trickFilename);
 +      try {
 +        assertTrue(trickFile.createNewFile());
 +        sleepUninterruptibly(seconds, TimeUnit.SECONDS);
 +      } finally {
 +        if (!trickFile.delete()) {
 +          log.error("Couldn't delete " + trickFile);
 +        }
 +      }
 +
 +      if (seconds <= 10) {
 +        assertEquals(0, ingest.waitFor());
 +        VerifyIngest.Opts vopts = new VerifyIngest.Opts();
 +        vopts.rows = rows;
 +        vopts.setPrincipal("root");
 +        VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
 +      } else {
 +        sleepUninterruptibly(5, TimeUnit.SECONDS);
 +        tserver.waitFor();
 +        t.join();
 +        tserver = null;
 +      }
 +      // verify the process was blocked
 +      String results = t.toString();
 +      assertTrue(results.contains("sleeping\nsleeping\nsleeping\n"));
 +      return results;
 +    } finally {
 +      if (ingest != null) {
 +        ingest.destroy();
 +        ingest.waitFor();
 +      }
 +      if (tserver != null) {
 +        try {
 +          if (expectTserverDied) {
 +            try {
 +              tserver.exitValue();
 +            } catch (IllegalThreadStateException e) {
 +              fail("Expected TServer to kill itself, but it is still running");
 +            }
 +          }
 +        } finally {
 +          tserver.destroy();
 +          tserver.waitFor();
 +          t.join();
 +        }
 +      }
 +    }
 +  }
 +
 +  private boolean makeDiskFailureLibrary() throws Exception {
 +    String root = System.getProperty("user.dir");
 +    String source = root + "/src/test/c/fake_disk_failure.c";
 +    String lib = root + "/target/fake_disk_failure.so";
 +    String platform = System.getProperty("os.name");
 +    String cmd[];
 +    if (platform.equals("Darwin")) {
 +      cmd = new String[] {"gcc", "-arch", "x86_64", "-arch", "i386", "-dynamiclib", "-O3", "-fPIC", source, "-o", lib};
 +    } else {
 +      cmd = new String[] {"gcc", "-D_GNU_SOURCE", "-Wall", "-fPIC", source, "-shared", "-o", lib, "-ldl"};
 +    }
 +    Process gcc = Runtime.getRuntime().exec(cmd);
 +    return gcc.waitFor() == 0;
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
index 160b164,0000000..3f76594
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
@@@ -1,221 -1,0 +1,220 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.Random;
 +import java.util.TreeSet;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.harness.AccumuloClusterHarness;
 +import org.apache.accumulo.minicluster.MemoryUnit;
 +import org.apache.accumulo.minicluster.ServerType;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.accumulo.test.TestIngest;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.io.Text;
 +import org.junit.After;
 +import org.junit.Assert;
 +import org.junit.Before;
 +import org.junit.Test;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
- import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
- 
 +public class LargeRowIT extends AccumuloClusterHarness {
 +  private static final Logger log = LoggerFactory.getLogger(LargeRowIT.class);
 +
 +  @Override
 +  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    cfg.setMemory(ServerType.TABLET_SERVER, cfg.getMemory(ServerType.TABLET_SERVER) * 2, MemoryUnit.BYTE);
 +    Map<String,String> siteConfig = cfg.getSiteConfig();
 +    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "10ms");
 +    cfg.setSiteConfig(siteConfig);
 +  }
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 4 * 60;
 +  }
 +
 +  private static final int SEED = 42;
 +  private static final int NUM_ROWS = 100;
 +  private static final int ROW_SIZE = 1 << 17;
 +  private static final int NUM_PRE_SPLITS = 9;
 +  private static final int SPLIT_THRESH = ROW_SIZE * NUM_ROWS / NUM_PRE_SPLITS;
 +
 +  private String REG_TABLE_NAME;
 +  private String PRE_SPLIT_TABLE_NAME;
 +  private int timeoutFactor = 1;
 +  private String tservMajcDelay;
 +
 +  @Before
 +  public void getTimeoutFactor() throws Exception {
 +    try {
 +      timeoutFactor = Integer.parseInt(System.getProperty("timeout.factor"));
 +    } catch (NumberFormatException e) {
 +      log.warn("Could not parse property value for 'timeout.factor' as integer: " + System.getProperty("timeout.factor"));
 +    }
 +
 +    Assert.assertTrue("Timeout factor must be greater than or equal to 1", timeoutFactor >= 1);
 +
 +    String[] names = getUniqueNames(2);
 +    REG_TABLE_NAME = names[0];
 +    PRE_SPLIT_TABLE_NAME = names[1];
 +
 +    Connector c = getConnector();
 +    tservMajcDelay = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
 +    c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "10ms");
 +  }
 +
 +  @After
 +  public void resetMajcDelay() throws Exception {
 +    if (null != tservMajcDelay) {
 +      Connector conn = getConnector();
 +      conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), tservMajcDelay);
 +    }
 +  }
 +
 +  @Test
 +  public void run() throws Exception {
 +    Random r = new Random();
 +    byte rowData[] = new byte[ROW_SIZE];
 +    r.setSeed(SEED + 1);
 +    TreeSet<Text> splitPoints = new TreeSet<>();
 +    for (int i = 0; i < NUM_PRE_SPLITS; i++) {
 +      r.nextBytes(rowData);
 +      TestIngest.toPrintableChars(rowData);
 +      splitPoints.add(new Text(rowData));
 +    }
 +    Connector c = getConnector();
 +    c.tableOperations().create(REG_TABLE_NAME);
 +    c.tableOperations().create(PRE_SPLIT_TABLE_NAME);
 +    c.tableOperations().setProperty(PRE_SPLIT_TABLE_NAME, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "256K");
 +    sleepUninterruptibly(3, TimeUnit.SECONDS);
 +    c.tableOperations().addSplits(PRE_SPLIT_TABLE_NAME, splitPoints);
 +    test1(c);
 +    test2(c);
 +  }
 +
 +  private void test1(Connector c) throws Exception {
 +
 +    basicTest(c, REG_TABLE_NAME, 0);
 +
 +    c.tableOperations().setProperty(REG_TABLE_NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "" + SPLIT_THRESH);
 +
 +    sleepUninterruptibly(timeoutFactor * 12, TimeUnit.SECONDS);
 +    log.info("checking splits");
 +    FunctionalTestUtils.checkSplits(c, REG_TABLE_NAME, NUM_PRE_SPLITS / 2, NUM_PRE_SPLITS * 4);
 +
 +    verify(c, REG_TABLE_NAME);
 +  }
 +
 +  private void test2(Connector c) throws Exception {
 +    basicTest(c, PRE_SPLIT_TABLE_NAME, NUM_PRE_SPLITS);
 +  }
 +
 +  private void basicTest(Connector c, String table, int expectedSplits) throws Exception {
 +    BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
 +
 +    Random r = new Random();
 +    byte rowData[] = new byte[ROW_SIZE];
 +
 +    r.setSeed(SEED);
 +
 +    for (int i = 0; i < NUM_ROWS; i++) {
 +
 +      r.nextBytes(rowData);
 +      TestIngest.toPrintableChars(rowData);
 +
 +      Mutation mut = new Mutation(new Text(rowData));
 +      mut.put(new Text(""), new Text(""), new Value(Integer.toString(i).getBytes(UTF_8)));
 +      bw.addMutation(mut);
 +    }
 +
 +    bw.close();
 +
 +    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
 +
 +    verify(c, table);
 +
 +    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
 +
 +    c.tableOperations().flush(table, null, null, false);
 +
 +    // verify while table flush is running
 +    verify(c, table);
 +
 +    // give split time to complete
 +    c.tableOperations().flush(table, null, null, true);
 +
 +    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
 +
 +    verify(c, table);
 +
 +    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
 +  }
 +
 +  private void verify(Connector c, String table) throws Exception {
 +    Random r = new Random();
 +    byte rowData[] = new byte[ROW_SIZE];
 +
 +    r.setSeed(SEED);
 +
 +    Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
 +
 +    for (int i = 0; i < NUM_ROWS; i++) {
 +
 +      r.nextBytes(rowData);
 +      TestIngest.toPrintableChars(rowData);
 +
 +      scanner.setRange(new Range(new Text(rowData)));
 +
 +      int count = 0;
 +
 +      for (Entry<Key,Value> entry : scanner) {
 +        if (!entry.getKey().getRow().equals(new Text(rowData))) {
 +          throw new Exception("verification failed, unexpected row i =" + i);
 +        }
 +        if (!entry.getValue().equals(Integer.toString(i).getBytes(UTF_8))) {
 +          throw new Exception("verification failed, unexpected value i =" + i + " value = " + entry.getValue());
 +        }
 +        count++;
 +      }
 +
 +      if (count != 1) {
 +        throw new Exception("verification failed, unexpected count i =" + i + " count=" + count);
 +      }
 +
 +    }
 +
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/functional/MetadataIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/functional/MetadataIT.java
index 883b8dc,0000000..42ef7b4
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MetadataIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MetadataIT.java
@@@ -1,149 -1,0 +1,149 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.util.Collections;
 +import java.util.HashSet;
 +import java.util.Map.Entry;
 +import java.util.Set;
 +import java.util.SortedSet;
 +import java.util.TreeSet;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.client.BatchScanner;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.RootTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.harness.AccumuloClusterHarness;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Assert;
 +import org.junit.Test;
 +
 +import com.google.common.collect.Iterators;
- import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +
 +public class MetadataIT extends AccumuloClusterHarness {
 +
 +  @Override
 +  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    cfg.setNumTservers(1);
 +  }
 +
 +  @Override
 +  public int defaultTimeoutSeconds() {
 +    return 2 * 60;
 +  }
 +
 +  @Test
 +  public void testFlushAndCompact() throws Exception {
 +    Connector c = getConnector();
 +    String tableNames[] = getUniqueNames(2);
 +
 +    // create a table to write some data to metadata table
 +    c.tableOperations().create(tableNames[0]);
 +
 +    Scanner rootScanner = c.createScanner(RootTable.NAME, Authorizations.EMPTY);
 +    rootScanner.setRange(MetadataSchema.TabletsSection.getRange());
 +    rootScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
 +
 +    Set<String> files1 = new HashSet<>();
 +    for (Entry<Key,Value> entry : rootScanner)
 +      files1.add(entry.getKey().getColumnQualifier().toString());
 +
 +    c.tableOperations().create(tableNames[1]);
 +    c.tableOperations().flush(MetadataTable.NAME, null, null, true);
 +
 +    Set<String> files2 = new HashSet<>();
 +    for (Entry<Key,Value> entry : rootScanner)
 +      files2.add(entry.getKey().getColumnQualifier().toString());
 +
 +    // flush of metadata table should change file set in root table
 +    Assert.assertTrue(files2.size() > 0);
 +    Assert.assertNotEquals(files1, files2);
 +
 +    c.tableOperations().compact(MetadataTable.NAME, null, null, false, true);
 +
 +    Set<String> files3 = new HashSet<>();
 +    for (Entry<Key,Value> entry : rootScanner)
 +      files3.add(entry.getKey().getColumnQualifier().toString());
 +
 +    // compaction of metadata table should change file set in root table
 +    Assert.assertNotEquals(files2, files3);
 +  }
 +
 +  @Test
 +  public void mergeMeta() throws Exception {
 +    Connector c = getConnector();
 +    String[] names = getUniqueNames(5);
 +    SortedSet<Text> splits = new TreeSet<>();
 +    for (String id : "1 2 3 4 5".split(" ")) {
 +      splits.add(new Text(id));
 +    }
 +    c.tableOperations().addSplits(MetadataTable.NAME, splits);
 +    for (String tableName : names) {
 +      c.tableOperations().create(tableName);
 +    }
 +    c.tableOperations().merge(MetadataTable.NAME, null, null);
 +    Scanner s = c.createScanner(RootTable.NAME, Authorizations.EMPTY);
 +    s.setRange(MetadataSchema.DeletesSection.getRange());
 +    while (Iterators.size(s.iterator()) == 0) {
 +      sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
 +    }
 +    assertEquals(0, c.tableOperations().listSplits(MetadataTable.NAME).size());
 +  }
 +
 +  @Test
 +  public void batchScanTest() throws Exception {
 +    Connector c = getConnector();
 +    String tableName = getUniqueNames(1)[0];
 +    c.tableOperations().create(tableName);
 +
 +    // batch scan regular metadata table
 +    BatchScanner s = c.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1);
 +    s.setRanges(Collections.singleton(new Range()));
 +    int count = 0;
 +    for (Entry<Key,Value> e : s) {
 +      if (e != null)
 +        count++;
 +    }
 +    s.close();
 +    assertTrue(count > 0);
 +
 +    // batch scan root metadata table
 +    s = c.createBatchScanner(RootTable.NAME, Authorizations.EMPTY, 1);
 +    s.setRanges(Collections.singleton(new Range()));
 +    count = 0;
 +    for (Entry<Key,Value> e : s) {
 +      if (e != null)
 +        count++;
 +    }
 +    s.close();
 +    assertTrue(count > 0);
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
index 6c4939f,0000000..6a20297
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
@@@ -1,117 -1,0 +1,116 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +import static org.junit.Assert.assertEquals;
 +
 +import java.util.Map.Entry;
 +import java.util.SortedSet;
 +import java.util.TreeSet;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.impl.ClientContext;
 +import org.apache.accumulo.core.client.impl.Credentials;
 +import org.apache.accumulo.core.client.impl.MasterClient;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.master.thrift.MasterClientService.Client;
 +import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
 +import org.apache.accumulo.core.master.thrift.TableInfo;
 +import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.RootTable;
 +import org.apache.accumulo.core.trace.Tracer;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.accumulo.server.util.Admin;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.RawLocalFileSystem;
 +import org.apache.hadoop.io.Text;
 +import org.junit.Test;
 +
- import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
- 
 +public class MetadataMaxFilesIT extends ConfigurableMacBase {
 +
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    cfg.setProperty(Property.TSERV_MAJC_DELAY, "1");
 +    cfg.setProperty(Property.TSERV_SCAN_MAX_OPENFILES, "10");
 +    cfg.setProperty(Property.TSERV_ASSIGNMENT_MAXCONCURRENT, "100");
 +    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
 +  }
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 4 * 60;
 +  }
 +
 +  @Test
 +  public void test() throws Exception {
 +    Connector c = getConnector();
 +    SortedSet<Text> splits = new TreeSet<>();
 +    for (int i = 0; i < 1000; i++) {
 +      splits.add(new Text(String.format("%03d", i)));
 +    }
 +    c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10000");
 +    // propagation time
 +    sleepUninterruptibly(5, TimeUnit.SECONDS);
 +    for (int i = 0; i < 5; i++) {
 +      String tableName = "table" + i;
 +      log.info("Creating " + tableName);
 +      c.tableOperations().create(tableName);
 +      log.info("adding splits");
 +      c.tableOperations().addSplits(tableName, splits);
 +      log.info("flushing");
 +      c.tableOperations().flush(MetadataTable.NAME, null, null, true);
 +      c.tableOperations().flush(RootTable.NAME, null, null, true);
 +    }
 +    log.info("shutting down");
 +    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
 +    cluster.stop();
 +    log.info("starting up");
 +    cluster.start();
 +
 +    Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD));
 +
 +    while (true) {
 +      MasterMonitorInfo stats = null;
 +      Client client = null;
 +      try {
 +        ClientContext context = new ClientContext(c.getInstance(), creds, getClientConfig());
 +        client = MasterClient.getConnectionWithRetry(context);
 +        log.info("Fetching stats");
 +        stats = client.getMasterStats(Tracer.traceInfo(), context.rpcCreds());
 +      } finally {
 +        if (client != null)
 +          MasterClient.close(client);
 +      }
 +      int tablets = 0;
 +      for (TabletServerStatus tserver : stats.tServerInfo) {
 +        for (Entry<String,TableInfo> entry : tserver.tableMap.entrySet()) {
 +          if (entry.getKey().startsWith("!") || entry.getKey().startsWith("+"))
 +            continue;
 +          tablets += entry.getValue().onlineTablets;
 +        }
 +      }
 +      log.info("Online tablets " + tablets);
 +      if (tablets == 5005)
 +        break;
 +      sleepUninterruptibly(1, TimeUnit.SECONDS);
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
index 58480bc,0000000..fc66c12
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
@@@ -1,58 -1,0 +1,57 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test.functional;
 +
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.util.Collections;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.hadoop.conf.Configuration;
 +import org.junit.Test;
 +
- import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
- 
 +public class MetadataSplitIT extends ConfigurableMacBase {
 +
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "100ms"));
 +  }
 +
 +  @Override
 +  protected int defaultTimeoutSeconds() {
 +    return 2 * 60;
 +  }
 +
 +  @Test
 +  public void test() throws Exception {
 +    Connector c = getConnector();
 +    assertEquals(1, c.tableOperations().listSplits(MetadataTable.NAME).size());
 +    c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "500");
 +    for (int i = 0; i < 10; i++) {
 +      c.tableOperations().create("table" + i);
 +      c.tableOperations().flush(MetadataTable.NAME, null, null, true);
 +    }
 +    sleepUninterruptibly(10, TimeUnit.SECONDS);
 +    assertTrue(c.tableOperations().listSplits(MetadataTable.NAME).size() > 2);
 +  }
 +}


Mime
View raw message