accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [13/24] accumulo git commit: Merge branch '1.7' into 1.8
Date Tue, 25 Jul 2017 23:03:01 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/core/src/test/java/org/apache/accumulo/core/iterators/system/SourceSwitchingIteratorTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/iterators/system/SourceSwitchingIteratorTest.java
index 037ee7e,9751b7d..9328490
--- a/core/src/test/java/org/apache/accumulo/core/iterators/system/SourceSwitchingIteratorTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/iterators/system/SourceSwitchingIteratorTest.java
@@@ -23,12 -21,8 +23,9 @@@ import java.util.List
  import java.util.TreeMap;
  import java.util.concurrent.atomic.AtomicBoolean;
  
- import com.google.common.base.Optional;
- import junit.framework.TestCase;
- 
  import org.apache.accumulo.core.data.ByteSequence;
  import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.PartialKey;
  import org.apache.accumulo.core.data.Range;
  import org.apache.accumulo.core.data.Value;
  import org.apache.accumulo.core.iterators.IterationInterruptedException;
@@@ -41,6 -32,8 +38,10 @@@ import org.apache.accumulo.core.iterato
  import org.apache.accumulo.core.iterators.system.SourceSwitchingIterator.DataSource;
  import org.apache.hadoop.io.Text;
  
++import com.google.common.base.Optional;
++
+ import junit.framework.TestCase;
+ 
  public class SourceSwitchingIteratorTest extends TestCase {
  
    Key newKey(String row, String cf, String cq, long time) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/core/src/test/java/org/apache/accumulo/core/util/format/DateFormatSupplierTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/util/format/DateFormatSupplierTest.java
index b095b04,0000000..b5fc10d
mode 100644,000000..100644
--- a/core/src/test/java/org/apache/accumulo/core/util/format/DateFormatSupplierTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/util/format/DateFormatSupplierTest.java
@@@ -1,74 -1,0 +1,75 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.util.format;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertNotSame;
 +import static org.junit.Assert.assertSame;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.text.DateFormat;
 +import java.util.Date;
 +import java.util.TimeZone;
++
 +import org.junit.Test;
 +
 +public class DateFormatSupplierTest {
 +
 +  /** Asserts two supplier instance create independent objects */
 +  private void assertSuppliersIndependent(ThreadLocal<DateFormat> supplierA, ThreadLocal<DateFormat> supplierB) {
 +    DateFormat getA1 = supplierA.get();
 +    DateFormat getA2 = supplierA.get();
 +    assertSame(getA1, getA2);
 +
 +    DateFormat getB1 = supplierB.get();
 +    DateFormat getB2 = supplierB.get();
 +
 +    assertSame(getB1, getB2);
 +    assertNotSame(getA1, getB1);
 +  }
 +
 +  @Test
 +  public void testCreateDefaultFormatSupplier() throws Exception {
 +    ThreadLocal<DateFormat> supplierA = DateFormatSupplier.createDefaultFormatSupplier();
 +    ThreadLocal<DateFormat> supplierB = DateFormatSupplier.createDefaultFormatSupplier();
 +    assertSuppliersIndependent(supplierA, supplierB);
 +  }
 +
 +  @Test
 +  public void testCreateSimpleFormatSupplier() throws Exception {
 +    final String format = DateFormatSupplier.HUMAN_READABLE_FORMAT;
 +    DateFormatSupplier supplierA = DateFormatSupplier.createSimpleFormatSupplier(format);
 +    DateFormatSupplier supplierB = DateFormatSupplier.createSimpleFormatSupplier(format);
 +    assertSuppliersIndependent(supplierA, supplierB);
 +
 +    // since dfA and dfB come from different suppliers, altering the TimeZone on one does not affect the other
 +    supplierA.setTimeZone(TimeZone.getTimeZone("UTC"));
 +    final DateFormat dfA = supplierA.get();
 +
 +    supplierB.setTimeZone(TimeZone.getTimeZone("EST"));
 +    final DateFormat dfB = supplierB.get();
 +
 +    final String resultA = dfA.format(new Date(0));
 +    assertEquals("1970/01/01 00:00:00.000", resultA);
 +
 +    final String resultB = dfB.format(new Date(0));
 +    assertEquals("1969/12/31 19:00:00.000", resultB);
 +
 +    assertTrue(!resultA.equals(resultB));
 +
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/core/src/test/java/org/apache/accumulo/core/util/format/DateStringFormatterTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/core/src/test/java/org/apache/accumulo/core/util/format/DefaultFormatterTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/util/format/DefaultFormatterTest.java
index 9c688db,7b654d0..53d595f
--- a/core/src/test/java/org/apache/accumulo/core/util/format/DefaultFormatterTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/util/format/DefaultFormatterTest.java
@@@ -19,10 -19,8 +19,11 @@@ package org.apache.accumulo.core.util.f
  import static org.junit.Assert.assertEquals;
  
  import java.util.Collections;
 +import java.util.Map;
  import java.util.Map.Entry;
 +import java.util.TimeZone;
 +import java.util.TreeMap;
+ 
  import org.apache.accumulo.core.data.Key;
  import org.apache.accumulo.core.data.Value;
  import org.apache.hadoop.io.Text;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/core/src/test/java/org/apache/accumulo/core/util/format/FormatterConfigTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/util/format/FormatterConfigTest.java
index aa88e03,0000000..7975dc1
mode 100644,000000..100644
--- a/core/src/test/java/org/apache/accumulo/core/util/format/FormatterConfigTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/util/format/FormatterConfigTest.java
@@@ -1,81 -1,0 +1,82 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.util.format;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertNotSame;
 +import static org.junit.Assert.assertSame;
 +import static org.junit.Assert.fail;
 +
 +import java.text.DateFormat;
++
 +import org.junit.Test;
 +
 +public class FormatterConfigTest {
 +
 +  @Test
 +  public void testConstructor() {
 +    FormatterConfig config = new FormatterConfig();
 +    assertEquals(false, config.willLimitShowLength());
 +    assertEquals(false, config.willPrintTimestamps());
 +  }
 +
 +  @Test
 +  public void testSetShownLength() throws Exception {
 +    FormatterConfig config = new FormatterConfig();
 +    try {
 +      config.setShownLength(-1);
 +      fail("Should throw on negative length.");
 +    } catch (IllegalArgumentException e) {}
 +
 +    config.setShownLength(0);
 +    assertEquals(0, config.getShownLength());
 +    assertEquals(true, config.willLimitShowLength());
 +
 +    config.setShownLength(1);
 +    assertEquals(1, config.getShownLength());
 +    assertEquals(true, config.willLimitShowLength());
 +  }
 +
 +  @Test
 +  public void testDoNotLimitShowLength() {
 +    FormatterConfig config = new FormatterConfig();
 +    assertEquals(false, config.willLimitShowLength());
 +
 +    config.setShownLength(1);
 +    assertEquals(true, config.willLimitShowLength());
 +
 +    config.doNotLimitShowLength();
 +    assertEquals(false, config.willLimitShowLength());
 +  }
 +
 +  @Test
 +  public void testGetDateFormat() {
 +    FormatterConfig config1 = new FormatterConfig();
 +    DateFormat df1 = config1.getDateFormatSupplier().get();
 +
 +    FormatterConfig config2 = new FormatterConfig();
 +    assertNotSame(df1, config2.getDateFormatSupplier().get());
 +
 +    config2.setDateFormatSupplier(config1.getDateFormatSupplier());
 +    assertSame(df1, config2.getDateFormatSupplier().get());
 +
 +    // even though copying, it can't copy the Generator, so will pull out the same DateFormat
 +    FormatterConfig configCopy = new FormatterConfig(config1);
 +    assertSame(df1, configCopy.getDateFormatSupplier().get());
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/iterator-test-harness/src/main/java/org/apache/accumulo/iteratortest/testcases/YieldingTestCase.java
----------------------------------------------------------------------
diff --cc iterator-test-harness/src/main/java/org/apache/accumulo/iteratortest/testcases/YieldingTestCase.java
index f9de207,0000000..bfa0ce3
mode 100644,000000..100644
--- a/iterator-test-harness/src/main/java/org/apache/accumulo/iteratortest/testcases/YieldingTestCase.java
+++ b/iterator-test-harness/src/main/java/org/apache/accumulo/iteratortest/testcases/YieldingTestCase.java
@@@ -1,87 -1,0 +1,87 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to you under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.iteratortest.testcases;
 +
++import java.io.IOException;
++import java.util.TreeMap;
++
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 +import org.apache.accumulo.core.iterators.YieldCallback;
 +import org.apache.accumulo.core.iterators.YieldingKeyValueIterator;
 +import org.apache.accumulo.iteratortest.IteratorTestInput;
 +import org.apache.accumulo.iteratortest.IteratorTestOutput;
 +import org.apache.accumulo.iteratortest.IteratorTestUtil;
 +import org.apache.accumulo.iteratortest.environments.SimpleIteratorEnvironment;
 +
- import java.io.IOException;
- import java.util.TreeMap;
- 
 +/**
 + * Test case that verifies that an iterator works correctly with the yielding api. Note that most iterators do nothing in terms of yielding in which case this
 + * merely tests that the iterator produces the correct output. If however the iterator does override the yielding api, then this ensures that it works correctly
 + * iff the iterator actually decides to yield. Nothing can force an iterator to yield without knowing something about the internals of the iterator being
 + * tested.
 + */
 +public class YieldingTestCase extends OutputVerifyingTestCase {
 +
 +  @Override
 +  public IteratorTestOutput test(IteratorTestInput testInput) {
 +    final SortedKeyValueIterator<Key,Value> skvi = IteratorTestUtil.instantiateIterator(testInput);
 +    final SortedKeyValueIterator<Key,Value> source = IteratorTestUtil.createSource(testInput);
 +
 +    try {
 +      skvi.init(source, testInput.getIteratorOptions(), new SimpleIteratorEnvironment());
 +
 +      YieldCallback<Key> yield = new YieldCallback<>();
 +      if (skvi instanceof YieldingKeyValueIterator) {
 +        ((YieldingKeyValueIterator<Key,Value>) skvi).enableYielding(yield);
 +      }
 +
 +      skvi.seek(testInput.getRange(), testInput.getFamilies(), testInput.isInclusive());
 +      return new IteratorTestOutput(consume(testInput, skvi, yield));
 +    } catch (IOException e) {
 +      return new IteratorTestOutput(e);
 +    }
 +  }
 +
 +  TreeMap<Key,Value> consume(IteratorTestInput testInput, SortedKeyValueIterator<Key,Value> skvi, YieldCallback<Key> yield) throws IOException {
 +    TreeMap<Key,Value> data = new TreeMap<>();
 +    Key lastKey = null;
 +    while (yield.hasYielded() || skvi.hasTop()) {
 +      if (yield.hasYielded()) {
 +        Range r = testInput.getRange();
 +        Key yieldPosition = yield.getPositionAndReset();
 +        if (!r.contains(yieldPosition)) {
 +          throw new IOException("Underlying iterator yielded to a position outside of its range: " + yieldPosition + " not in " + r);
 +        }
 +        if (skvi.hasTop()) {
 +          throw new IOException("Underlying iterator reports having a top, but has yielded: " + yieldPosition);
 +        }
 +        if (lastKey != null && yieldPosition.compareTo(lastKey) <= 0) {
 +          throw new IOException("Underlying iterator yielded at a position that is not past the last key returned");
 +        }
 +        skvi.seek(new Range(yieldPosition, false, r.getEndKey(), r.isEndKeyInclusive()), testInput.getFamilies(), testInput.isInclusive());
 +      } else {
 +        // Make sure to copy the K-V
 +        data.put(new Key(skvi.getTopKey()), new Value(skvi.getTopValue()));
 +        skvi.next();
 +      }
 +    }
 +    return data;
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterControl.java
----------------------------------------------------------------------
diff --cc minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterControl.java
index 9a433cf,8cc7950..2fd403f
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterControl.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterControl.java
@@@ -22,7 -22,7 +22,9 @@@ import java.io.FileInputStream
  import java.io.IOException;
  import java.io.InputStream;
  import java.util.ArrayList;
++import java.util.Collections;
  import java.util.List;
++import java.util.Map;
  import java.util.Map.Entry;
  import java.util.concurrent.ExecutionException;
  import java.util.concurrent.TimeUnit;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/pom.xml
----------------------------------------------------------------------
diff --cc pom.xml
index 957792a,836c3f8..7e4e591
--- a/pom.xml
+++ b/pom.xml
@@@ -1370,7 -1358,68 +1394,32 @@@
          <failsafe.groups>org.apache.accumulo.test.categories.SunnyDayTests</failsafe.groups>
        </properties>
      </profile>
 -    <!-- profile for our default Hadoop build
 -         unfortunately, has to duplicate one of our
 -         specified profiles. see MNG-3328 -->
 -    <profile>
 -      <id>hadoop-default</id>
 -      <activation>
 -        <property>
 -          <name>!hadoop.profile</name>
 -        </property>
 -      </activation>
 -      <properties>
 -        <!-- Denotes intention and allows the enforcer plugin to pass when
 -             the user is relying on default behavior; won't work to activate profile -->
 -        <hadoop.profile>2</hadoop.profile>
 -        <hadoop.version>2.2.0</hadoop.version>
 -        <httpclient.version>3.1</httpclient.version>
 -        <slf4j.version>1.7.5</slf4j.version>
 -      </properties>
 -    </profile>
 -    <!-- profile for building against Hadoop 2.x
 -     XXX Since this is the default, make sure to sync hadoop-default when changing.
 -    Activate using: mvn -Dhadoop.profile=2 -->
 -    <profile>
 -      <id>hadoop-2</id>
 -      <activation>
 -        <property>
 -          <name>hadoop.profile</name>
 -          <value>2</value>
 -        </property>
 -      </activation>
 -      <properties>
 -        <hadoop.version>2.2.0</hadoop.version>
 -        <httpclient.version>3.1</httpclient.version>
 -        <slf4j.version>1.7.5</slf4j.version>
 -      </properties>
 -    </profile>
      <profile>
+       <id>autoformat-with-jdk8</id>
+       <activation>
+         <jdk>[1.8,1.9)</jdk>
+         <property>
+           <name>!skipFormat</name>
+         </property>
+       </activation>
+       <build>
+         <plugins>
+           <plugin>
+             <groupId>net.revelc.code</groupId>
+             <artifactId>impsort-maven-plugin</artifactId>
+             <executions>
+               <execution>
+                 <id>sort-imports</id>
+                 <goals>
+                   <goal>sort</goal>
+                 </goals>
+               </execution>
+             </executions>
+           </plugin>
+         </plugins>
+       </build>
+     </profile>
+     <profile>
        <id>jdk8</id>
        <activation>
          <jdk>[1.8,1.9)</jdk>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
index 3cfd759,1589e9d..43b9209
--- a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
@@@ -16,6 -16,6 +16,7 @@@
   */
  package org.apache.accumulo.server;
  
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
  import static java.nio.charset.StandardCharsets.UTF_8;
  
  import java.io.File;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
index 49bf1f4,2dacf61..c245a5e
--- a/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
@@@ -54,10 -53,11 +54,11 @@@ import org.apache.accumulo.server.fs.Vo
  import org.apache.accumulo.server.zookeeper.ZooLock;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.io.Text;
- import com.google.common.base.Joiner;
 -import org.apache.log4j.Level;
 -import org.apache.log4j.Logger;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
  
+ import com.google.common.base.Joiner;
+ 
  /**
   * An implementation of Instance that looks in HDFS and ZooKeeper to find the master and root tablet location.
   *

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
index 69f883f,e761e4f..947057c
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
@@@ -27,9 -27,8 +27,9 @@@ import org.apache.hadoop.fs.FSDataInput
  import org.apache.hadoop.fs.FSDataOutputStream;
  import org.apache.hadoop.fs.FileStatus;
  import org.apache.hadoop.fs.Path;
++import org.apache.hadoop.fs.permission.FsPermission;
  
  import com.google.common.base.Optional;
- import org.apache.hadoop.fs.permission.FsPermission;
  
  /**
   * A wrapper around multiple hadoop FileSystem objects, which are assumed to be different volumes. This also concentrates a bunch of meta-operations like

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/base/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
index df135d6,76ae39e..d2a8297
--- a/server/base/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
@@@ -33,6 -33,6 +33,7 @@@ import org.apache.accumulo.core.data.im
  import org.apache.accumulo.core.master.thrift.TabletServerStatus;
  import org.apache.accumulo.core.rpc.ThriftUtil;
  import org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException;
++import org.apache.accumulo.core.tabletserver.thrift.TUnloadTabletGoal;
  import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
  import org.apache.accumulo.core.trace.Tracer;
  import org.apache.accumulo.core.util.AddressUtil;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
index 72a3a23,d7702f2..030598a
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
@@@ -53,6 -48,6 +50,10 @@@ import org.apache.thrift.TException
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
++import com.google.common.collect.HashMultimap;
++import com.google.common.collect.Iterables;
++import com.google.common.collect.Multimap;
++
  /**
   * This balancer creates groups of tablet servers using user-provided regular expressions over the tablet server hostnames. Then it delegates to the table
   * balancer to balance the tablets within the resulting group of tablet servers. All tablet servers that do not match a regex are grouped into a default group.<br>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/base/src/main/java/org/apache/accumulo/server/master/state/SuspendingTServer.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/master/state/SuspendingTServer.java
index 3f4e49e,0000000..9d9cc55
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/SuspendingTServer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/SuspendingTServer.java
@@@ -1,71 -1,0 +1,74 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.server.master.state;
 +
- import com.google.common.net.HostAndPort;
++import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.SuspendLocationColumn.SUSPEND_COLUMN;
++
 +import java.util.Objects;
++
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
- import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.SuspendLocationColumn.SUSPEND_COLUMN;
++
++import com.google.common.net.HostAndPort;
 +
 +/** For a suspended tablet, the time of suspension and the server it was suspended from. */
 +public class SuspendingTServer {
 +  public final HostAndPort server;
 +  public final long suspensionTime;
 +
 +  SuspendingTServer(HostAndPort server, long suspensionTime) {
 +    this.server = Objects.requireNonNull(server);
 +    this.suspensionTime = suspensionTime;
 +  }
 +
 +  public static SuspendingTServer fromValue(Value value) {
 +    String valStr = value.toString();
 +    String[] parts = valStr.split("[|]", 2);
 +    return new SuspendingTServer(HostAndPort.fromString(parts[0]), Long.parseLong(parts[1]));
 +  }
 +
 +  public Value toValue() {
 +    return new Value(server.toString() + "|" + suspensionTime);
 +  }
 +
 +  @Override
 +  public boolean equals(Object rhsObject) {
 +    if (!(rhsObject instanceof SuspendingTServer)) {
 +      return false;
 +    }
 +    SuspendingTServer rhs = (SuspendingTServer) rhsObject;
 +    return server.equals(rhs.server) && suspensionTime == rhs.suspensionTime;
 +  }
 +
 +  public void setSuspension(Mutation m) {
 +    m.put(SUSPEND_COLUMN.getColumnFamily(), SUSPEND_COLUMN.getColumnQualifier(), toValue());
 +  }
 +
 +  public static void clearSuspension(Mutation m) {
 +    m.putDelete(SUSPEND_COLUMN.getColumnFamily(), SUSPEND_COLUMN.getColumnQualifier());
 +  }
 +
 +  @Override
 +  public int hashCode() {
 +    return Objects.hash(server, suspensionTime);
 +  }
 +
 +  @Override
 +  public String toString() {
 +    return server.toString() + "[" + suspensionTime + "]";
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java
index 6872466,5413e31..9315754
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java
@@@ -18,12 -18,8 +18,12 @@@ package org.apache.accumulo.server.mast
  
  import java.util.Collection;
  import java.util.Collections;
 +import java.util.List;
 +import java.util.Map;
- import org.apache.accumulo.core.data.impl.KeyExtent;
  
++import org.apache.accumulo.core.data.impl.KeyExtent;
  import org.apache.accumulo.server.AccumuloServerContext;
 +import org.apache.hadoop.fs.Path;
  
  /**
   * Interface for storing information about tablet assignments. There are three implementations:

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/base/src/main/java/org/apache/accumulo/server/replication/StatusFormatter.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/base/src/main/java/org/apache/accumulo/server/util/Halt.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/util/Halt.java
index cbd8510,7f57687..e0dadee
--- a/server/base/src/main/java/org/apache/accumulo/server/util/Halt.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/Halt.java
@@@ -16,9 -16,8 +16,11 @@@
   */
  package org.apache.accumulo.server.util;
  
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
++
 +import java.util.concurrent.TimeUnit;
 +
  import org.apache.accumulo.core.util.Daemon;
 -import org.apache.accumulo.core.util.UtilWaitThread;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/master/src/main/java/org/apache/accumulo/master/MasterTime.java
----------------------------------------------------------------------
diff --cc server/master/src/main/java/org/apache/accumulo/master/MasterTime.java
index 93e81e7,0000000..19683a3
mode 100644,000000..100644
--- a/server/master/src/main/java/org/apache/accumulo/master/MasterTime.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/MasterTime.java
@@@ -1,108 -1,0 +1,110 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.master;
 +
++import static java.util.concurrent.TimeUnit.MILLISECONDS;
++import static java.util.concurrent.TimeUnit.NANOSECONDS;
++import static java.util.concurrent.TimeUnit.SECONDS;
++
 +import java.io.IOException;
 +import java.nio.charset.StandardCharsets;
 +import java.util.Timer;
 +import java.util.TimerTask;
- import static java.util.concurrent.TimeUnit.MILLISECONDS;
- import static java.util.concurrent.TimeUnit.NANOSECONDS;
- import static java.util.concurrent.TimeUnit.SECONDS;
++
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 +import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +/** Keep a persistent roughly monotone view of how long a master has been overseeing this cluster. */
 +public class MasterTime extends TimerTask {
 +  private static final Logger log = LoggerFactory.getLogger(MasterTime.class);
 +
 +  private final String zPath;
 +  private final ZooReaderWriter zk;
 +  private final Master master;
 +  private final Timer timer;
 +
 +  /** Difference between time stored in ZooKeeper and System.nanoTime() when we last read from ZooKeeper. */
 +  private long skewAmount;
 +
 +  public MasterTime(Master master) throws IOException {
 +    this.zPath = ZooUtil.getRoot(master.getInstance()) + Constants.ZMASTER_TICK;
 +    this.zk = ZooReaderWriter.getInstance();
 +    this.master = master;
 +
 +    try {
 +      zk.putPersistentData(zPath, "0".getBytes(StandardCharsets.UTF_8), NodeExistsPolicy.SKIP);
 +      skewAmount = Long.parseLong(new String(zk.getData(zPath, null), StandardCharsets.UTF_8)) - System.nanoTime();
 +    } catch (Exception ex) {
 +      throw new IOException("Error updating master time", ex);
 +    }
 +
 +    this.timer = new Timer();
 +    timer.schedule(this, 0, MILLISECONDS.convert(10, SECONDS));
 +  }
 +
 +  /**
 +   * How long has this cluster had a Master?
 +   *
 +   * @return Approximate total duration this cluster has had a Master, in milliseconds.
 +   */
 +  public synchronized long getTime() {
 +    return MILLISECONDS.convert(System.nanoTime() + skewAmount, NANOSECONDS);
 +  }
 +
 +  /** Shut down the time keeping. */
 +  public void shutdown() {
 +    timer.cancel();
 +  }
 +
 +  @Override
 +  public void run() {
 +    switch (master.getMasterState()) {
 +    // If we don't have the lock, periodically re-read the value in ZooKeeper, in case there's another master we're
 +    // shadowing for.
 +      case INITIAL:
 +      case STOP:
 +        try {
 +          long zkTime = Long.parseLong(new String(zk.getData(zPath, null), StandardCharsets.UTF_8));
 +          synchronized (this) {
 +            skewAmount = zkTime - System.nanoTime();
 +          }
 +        } catch (Exception ex) {
 +          if (log.isDebugEnabled()) {
 +            log.debug("Failed to retrieve master tick time", ex);
 +          }
 +        }
 +        break;
 +      // If we do have the lock, periodically write our clock to ZooKeeper.
 +      case HAVE_LOCK:
 +      case SAFE_MODE:
 +      case NORMAL:
 +      case UNLOAD_METADATA_TABLETS:
 +      case UNLOAD_ROOT_TABLET:
 +        try {
 +          zk.putPersistentData(zPath, Long.toString(System.nanoTime() + skewAmount).getBytes(StandardCharsets.UTF_8), NodeExistsPolicy.OVERWRITE);
 +        } catch (Exception ex) {
 +          if (log.isDebugEnabled()) {
 +            log.debug("Failed to update master tick time", ex);
 +          }
 +        }
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/master/src/main/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssigner.java
----------------------------------------------------------------------
diff --cc server/master/src/main/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssigner.java
index a10c90d,4e3a079..3d22d9f
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssigner.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssigner.java
@@@ -16,6 -16,6 +16,8 @@@
   */
  package org.apache.accumulo.master.replication;
  
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
++
  import java.util.Collection;
  import java.util.Map.Entry;
  import java.util.Set;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/master/src/main/java/org/apache/accumulo/master/replication/UnorderedWorkAssigner.java
----------------------------------------------------------------------
diff --cc server/master/src/main/java/org/apache/accumulo/master/replication/UnorderedWorkAssigner.java
index ab5b041,9a28dd4..93d4cc2
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/UnorderedWorkAssigner.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/UnorderedWorkAssigner.java
@@@ -16,6 -16,6 +16,8 @@@
   */
  package org.apache.accumulo.master.replication;
  
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
++
  import java.util.HashSet;
  import java.util.Iterator;
  import java.util.Set;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/master/src/main/java/org/apache/accumulo/master/replication/WorkDriver.java
----------------------------------------------------------------------
diff --cc server/master/src/main/java/org/apache/accumulo/master/replication/WorkDriver.java
index f0c368a,3558d2d..cc9d936
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkDriver.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkDriver.java
@@@ -16,8 -16,6 +16,10 @@@
   */
  package org.apache.accumulo.master.replication;
  
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
++
 +import java.util.concurrent.TimeUnit;
 +
  import org.apache.accumulo.core.client.AccumuloException;
  import org.apache.accumulo.core.client.AccumuloSecurityException;
  import org.apache.accumulo.core.client.Connector;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/monitor/src/main/java/org/apache/accumulo/monitor/ZooKeeperStatus.java
----------------------------------------------------------------------
diff --cc server/monitor/src/main/java/org/apache/accumulo/monitor/ZooKeeperStatus.java
index f2a295d,1125fc6..62f872f
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/ZooKeeperStatus.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/ZooKeeperStatus.java
@@@ -16,6 -16,6 +16,7 @@@
   */
  package org.apache.accumulo.monitor;
  
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
  import static java.nio.charset.StandardCharsets.UTF_8;
  
  import java.util.Collection;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ShellServlet.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/NullScanner.java
----------------------------------------------------------------------
diff --cc server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/NullScanner.java
index b91d454,0ba13c7..61ec171
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/NullScanner.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/NullScanner.java
@@@ -22,8 -22,7 +22,8 @@@ import java.util.concurrent.TimeUnit
  
  import org.apache.accumulo.core.client.IteratorSetting;
  import org.apache.accumulo.core.client.IteratorSetting.Column;
- import org.apache.accumulo.core.client.sample.SamplerConfiguration;
  import org.apache.accumulo.core.client.Scanner;
++import org.apache.accumulo.core.client.sample.SamplerConfiguration;
  import org.apache.accumulo.core.data.Key;
  import org.apache.accumulo.core.data.Range;
  import org.apache.accumulo.core.data.Value;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceFormatter.java
----------------------------------------------------------------------
diff --cc server/tracer/src/main/java/org/apache/accumulo/tracer/TraceFormatter.java
index 775e6aa,48ec8cf..5f98c4e
--- a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceFormatter.java
+++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceFormatter.java
@@@ -20,12 -20,11 +20,13 @@@ import java.text.SimpleDateFormat
  import java.util.Date;
  import java.util.Iterator;
  import java.util.Map.Entry;
+ 
  import org.apache.accumulo.core.data.Key;
  import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.util.format.DateFormatSupplier;
  import org.apache.accumulo.core.util.format.DefaultFormatter;
  import org.apache.accumulo.core.util.format.Formatter;
 +import org.apache.accumulo.core.util.format.FormatterConfig;
  import org.apache.accumulo.tracer.thrift.Annotation;
  import org.apache.accumulo.tracer.thrift.RemoteSpan;
  import org.apache.commons.lang.NotImplementedException;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java
----------------------------------------------------------------------
diff --cc server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java
index 7c0d9b2,67bd9d5..2fe9a27
--- a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java
+++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java
@@@ -16,6 -16,6 +16,7 @@@
   */
  package org.apache.accumulo.tracer;
  
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
  import static java.nio.charset.StandardCharsets.UTF_8;
  
  import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
----------------------------------------------------------------------
diff --cc server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
index 9c2d3e3,6c585d0..1deb1cc
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
@@@ -16,10 -16,10 +16,12 @@@
   */
  package org.apache.accumulo.tserver;
  
 +import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
  import static java.nio.charset.StandardCharsets.UTF_8;
++import static java.util.concurrent.TimeUnit.MILLISECONDS;
++import static java.util.concurrent.TimeUnit.NANOSECONDS;
  import static org.apache.accumulo.server.problems.ProblemType.TABLET_LOAD;
  
 -import java.io.FileNotFoundException;
  import java.io.IOException;
  import java.lang.management.ManagementFactory;
  import java.net.UnknownHostException;
@@@ -124,8 -119,6 +126,9 @@@ import org.apache.accumulo.core.tablets
  import org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException;
  import org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException;
  import org.apache.accumulo.core.tabletserver.thrift.TDurability;
 +import org.apache.accumulo.core.tabletserver.thrift.TSampleNotPresentException;
 +import org.apache.accumulo.core.tabletserver.thrift.TSamplerConfiguration;
++import org.apache.accumulo.core.tabletserver.thrift.TUnloadTabletGoal;
  import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
  import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Iface;
  import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Processor;
@@@ -259,12 -247,8 +262,9 @@@ import org.slf4j.Logger
  import org.slf4j.LoggerFactory;
  
  import com.google.common.net.HostAndPort;
- import static java.util.concurrent.TimeUnit.MILLISECONDS;
- import static java.util.concurrent.TimeUnit.NANOSECONDS;
- import org.apache.accumulo.core.tabletserver.thrift.TUnloadTabletGoal;
  
  public class TabletServer extends AccumuloServerContext implements Runnable {
 +
    private static final Logger log = LoggerFactory.getLogger(TabletServer.class);
    private static final long MAX_TIME_TO_WAIT_FOR_SCAN_RESULT_MILLIS = 1000;
    private static final long RECENTLY_SPLIT_MILLIES = 60 * 1000;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
----------------------------------------------------------------------
diff --cc server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
index 97606ea,1944569..d8ae94c
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
@@@ -16,6 -16,6 +16,7 @@@
   */
  package org.apache.accumulo.tserver;
  
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
  import static java.util.Objects.requireNonNull;
  
  import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/shell/src/main/java/org/apache/accumulo/shell/commands/GrepCommand.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/shell/src/main/java/org/apache/accumulo/shell/commands/ScanCommand.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/shell/src/main/java/org/apache/accumulo/shell/commands/TraceCommand.java
----------------------------------------------------------------------
diff --cc shell/src/main/java/org/apache/accumulo/shell/commands/TraceCommand.java
index edc6550,f5e2a0d..7c298f8
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/TraceCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/TraceCommand.java
@@@ -16,9 -16,8 +16,11 @@@
   */
  package org.apache.accumulo.shell.commands;
  
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
++
  import java.io.IOException;
  import java.util.Map;
 +import java.util.concurrent.TimeUnit;
  
  import org.apache.accumulo.core.client.Scanner;
  import org.apache.accumulo.core.conf.Property;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/shell/src/main/java/org/apache/accumulo/shell/format/DeleterFormatter.java
----------------------------------------------------------------------
diff --cc shell/src/main/java/org/apache/accumulo/shell/format/DeleterFormatter.java
index 275592e,1dd2234..670bd55
--- a/shell/src/main/java/org/apache/accumulo/shell/format/DeleterFormatter.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/format/DeleterFormatter.java
@@@ -26,9 -27,7 +27,8 @@@ import org.apache.accumulo.core.data.Mu
  import org.apache.accumulo.core.data.Value;
  import org.apache.accumulo.core.security.ColumnVisibility;
  import org.apache.accumulo.core.util.format.DefaultFormatter;
 +import org.apache.accumulo.core.util.format.FormatterConfig;
  import org.apache.accumulo.shell.Shell;
- 
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/shell/src/main/java/org/apache/accumulo/shell/mock/MockShell.java
----------------------------------------------------------------------
diff --cc shell/src/main/java/org/apache/accumulo/shell/mock/MockShell.java
index ebc92f7,729a61c..9f44d15
--- a/shell/src/main/java/org/apache/accumulo/shell/mock/MockShell.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/mock/MockShell.java
@@@ -33,12 -32,11 +31,14 @@@ import org.apache.accumulo.shell.ShellO
  import org.apache.commons.cli.CommandLine;
  import org.apache.commons.vfs2.FileSystemException;
  
+ import jline.console.ConsoleReader;
+ 
  /**
   * An Accumulo Shell implementation that allows a developer to attach an InputStream and Writer to the Shell for testing purposes.
 + *
 + * @deprecated since 1.8.0; use MiniAccumuloCluster or a standard mock framework instead.
   */
 +@Deprecated
  public class MockShell extends Shell {
    private static final String NEWLINE = "\n";
  

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/shell/src/test/java/org/apache/accumulo/shell/ShellConfigTest.java
----------------------------------------------------------------------
diff --cc shell/src/test/java/org/apache/accumulo/shell/ShellConfigTest.java
index 1e72293,2edee43..d948768
--- a/shell/src/test/java/org/apache/accumulo/shell/ShellConfigTest.java
+++ b/shell/src/test/java/org/apache/accumulo/shell/ShellConfigTest.java
@@@ -25,10 -25,9 +25,8 @@@ import java.io.FileDescriptor
  import java.io.FileInputStream;
  import java.io.IOException;
  import java.io.PrintStream;
 -import java.io.PrintWriter;
  import java.nio.file.Files;
  
- import jline.console.ConsoleReader;
- 
  import org.apache.accumulo.core.client.ClientConfiguration;
  import org.apache.accumulo.core.client.security.tokens.PasswordToken;
  import org.apache.accumulo.shell.ShellTest.TestOutputStream;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/shell/src/test/java/org/apache/accumulo/shell/ShellSetInstanceTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/shell/src/test/java/org/apache/accumulo/shell/commands/DeleteAuthsCommandTest.java
----------------------------------------------------------------------
diff --cc shell/src/test/java/org/apache/accumulo/shell/commands/DeleteAuthsCommandTest.java
index d19e4d0,0000000..3d6ee55
mode 100644,000000..100644
--- a/shell/src/test/java/org/apache/accumulo/shell/commands/DeleteAuthsCommandTest.java
+++ b/shell/src/test/java/org/apache/accumulo/shell/commands/DeleteAuthsCommandTest.java
@@@ -1,129 -1,0 +1,129 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.shell.commands;
 +
- import jline.console.ConsoleReader;
- 
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.admin.SecurityOperations;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.shell.Shell;
 +import org.apache.commons.cli.CommandLine;
 +import org.easymock.EasyMock;
 +import org.junit.Before;
 +import org.junit.Test;
 +
++import jline.console.ConsoleReader;
++
 +/**
 + *
 + */
 +public class DeleteAuthsCommandTest {
 +
 +  private DeleteAuthsCommand cmd;
 +
 +  @Before
 +  public void setup() {
 +    cmd = new DeleteAuthsCommand();
 +
 +    // Initialize that internal state
 +    cmd.getOptions();
 +  }
 +
 +  @Test
 +  public void deleteExistingAuth() throws Exception {
 +    Connector conn = EasyMock.createMock(Connector.class);
 +    CommandLine cli = EasyMock.createMock(CommandLine.class);
 +    Shell shellState = EasyMock.createMock(Shell.class);
 +    ConsoleReader reader = EasyMock.createMock(ConsoleReader.class);
 +    SecurityOperations secOps = EasyMock.createMock(SecurityOperations.class);
 +
 +    EasyMock.expect(shellState.getConnector()).andReturn(conn);
 +
 +    // We're the root user
 +    EasyMock.expect(conn.whoami()).andReturn("root");
 +    EasyMock.expect(cli.getOptionValue("u", "root")).andReturn("foo");
 +    EasyMock.expect(cli.getOptionValue("s")).andReturn("abc");
 +
 +    EasyMock.expect(conn.securityOperations()).andReturn(secOps);
 +    EasyMock.expect(conn.securityOperations()).andReturn(secOps);
 +    EasyMock.expect(secOps.getUserAuthorizations("foo")).andReturn(new Authorizations("abc", "123"));
 +    secOps.changeUserAuthorizations("foo", new Authorizations("123"));
 +    EasyMock.expectLastCall();
 +
 +    EasyMock.replay(conn, cli, shellState, reader, secOps);
 +
 +    cmd.execute("deleteauths -u foo -s abc", cli, shellState);
 +
 +    EasyMock.verify(conn, cli, shellState, reader, secOps);
 +  }
 +
 +  @Test
 +  public void deleteNonExistingAuth() throws Exception {
 +    Connector conn = EasyMock.createMock(Connector.class);
 +    CommandLine cli = EasyMock.createMock(CommandLine.class);
 +    Shell shellState = EasyMock.createMock(Shell.class);
 +    ConsoleReader reader = EasyMock.createMock(ConsoleReader.class);
 +    SecurityOperations secOps = EasyMock.createMock(SecurityOperations.class);
 +
 +    EasyMock.expect(shellState.getConnector()).andReturn(conn);
 +
 +    // We're the root user
 +    EasyMock.expect(conn.whoami()).andReturn("root");
 +    EasyMock.expect(cli.getOptionValue("u", "root")).andReturn("foo");
 +    EasyMock.expect(cli.getOptionValue("s")).andReturn("def");
 +
 +    EasyMock.expect(conn.securityOperations()).andReturn(secOps);
 +    EasyMock.expect(conn.securityOperations()).andReturn(secOps);
 +    EasyMock.expect(secOps.getUserAuthorizations("foo")).andReturn(new Authorizations("abc", "123"));
 +    secOps.changeUserAuthorizations("foo", new Authorizations("abc", "123"));
 +    EasyMock.expectLastCall();
 +
 +    EasyMock.replay(conn, cli, shellState, reader, secOps);
 +
 +    cmd.execute("deleteauths -u foo -s def", cli, shellState);
 +
 +    EasyMock.verify(conn, cli, shellState, reader, secOps);
 +  }
 +
 +  @Test
 +  public void deleteAllAuth() throws Exception {
 +    Connector conn = EasyMock.createMock(Connector.class);
 +    CommandLine cli = EasyMock.createMock(CommandLine.class);
 +    Shell shellState = EasyMock.createMock(Shell.class);
 +    ConsoleReader reader = EasyMock.createMock(ConsoleReader.class);
 +    SecurityOperations secOps = EasyMock.createMock(SecurityOperations.class);
 +
 +    EasyMock.expect(shellState.getConnector()).andReturn(conn);
 +
 +    // We're the root user
 +    EasyMock.expect(conn.whoami()).andReturn("root");
 +    EasyMock.expect(cli.getOptionValue("u", "root")).andReturn("foo");
 +    EasyMock.expect(cli.getOptionValue("s")).andReturn("abc,123");
 +
 +    EasyMock.expect(conn.securityOperations()).andReturn(secOps);
 +    EasyMock.expect(conn.securityOperations()).andReturn(secOps);
 +    EasyMock.expect(secOps.getUserAuthorizations("foo")).andReturn(new Authorizations("abc", "123"));
 +    secOps.changeUserAuthorizations("foo", new Authorizations());
 +    EasyMock.expectLastCall();
 +
 +    EasyMock.replay(conn, cli, shellState, reader, secOps);
 +
 +    cmd.execute("deleteauths -u foo -s abc,123", cli, shellState);
 +
 +    EasyMock.verify(conn, cli, shellState, reader, secOps);
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/shell/src/test/java/org/apache/accumulo/shell/commands/HistoryCommandTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
index aa1ad54,0000000..cde84c1
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
@@@ -1,178 -1,0 +1,178 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test;
 +
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
++
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.SortedSet;
 +import java.util.TreeSet;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.Constants;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.admin.InstanceOperations;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.fate.zookeeper.ZooCache;
 +import org.apache.accumulo.fate.zookeeper.ZooLock;
 +import org.apache.accumulo.harness.AccumuloClusterHarness;
 +import org.apache.accumulo.minicluster.ServerType;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.io.Text;
 +import org.junit.After;
 +import org.junit.Assert;
 +import org.junit.Before;
 +import org.junit.Test;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
- import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
- 
 +// Accumulo3047
 +public class BadDeleteMarkersCreatedIT extends AccumuloClusterHarness {
 +  private static final Logger log = LoggerFactory.getLogger(BadDeleteMarkersCreatedIT.class);
 +
 +  @Override
 +  public int defaultTimeoutSeconds() {
 +    return 120;
 +  }
 +
 +  @Override
 +  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    cfg.setNumTservers(1);
 +    cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
 +    cfg.setProperty(Property.GC_CYCLE_START, "0s");
 +  }
 +
 +  private int timeoutFactor = 1;
 +
 +  @Before
 +  public void getTimeoutFactor() {
 +    try {
 +      timeoutFactor = Integer.parseInt(System.getProperty("timeout.factor"));
 +    } catch (NumberFormatException e) {
 +      log.warn("Could not parse integer from timeout.factor");
 +    }
 +
 +    Assert.assertTrue("timeout.factor must be greater than or equal to 1", timeoutFactor >= 1);
 +  }
 +
 +  private String gcCycleDelay, gcCycleStart;
 +
 +  @Before
 +  public void alterConfig() throws Exception {
 +    InstanceOperations iops = getConnector().instanceOperations();
 +    Map<String,String> config = iops.getSystemConfiguration();
 +    gcCycleDelay = config.get(Property.GC_CYCLE_DELAY.getKey());
 +    gcCycleStart = config.get(Property.GC_CYCLE_START.getKey());
 +    iops.setProperty(Property.GC_CYCLE_DELAY.getKey(), "1s");
 +    iops.setProperty(Property.GC_CYCLE_START.getKey(), "0s");
 +    log.info("Restarting garbage collector");
 +
 +    getCluster().getClusterControl().stopAllServers(ServerType.GARBAGE_COLLECTOR);
 +
 +    Instance instance = getConnector().getInstance();
 +    ZooCache zcache = new ZooCache(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut());
 +    zcache.clear();
 +    String path = ZooUtil.getRoot(instance) + Constants.ZGC_LOCK;
 +    byte[] gcLockData;
 +    do {
 +      gcLockData = ZooLock.getLockData(zcache, path, null);
 +      if (null != gcLockData) {
 +        log.info("Waiting for GC ZooKeeper lock to expire");
 +        Thread.sleep(2000);
 +      }
 +    } while (null != gcLockData);
 +
 +    log.info("GC lock was lost");
 +
 +    getCluster().getClusterControl().startAllServers(ServerType.GARBAGE_COLLECTOR);
 +    log.info("Garbage collector was restarted");
 +
 +    gcLockData = null;
 +    do {
 +      gcLockData = ZooLock.getLockData(zcache, path, null);
 +      if (null == gcLockData) {
 +        log.info("Waiting for GC ZooKeeper lock to be acquired");
 +        Thread.sleep(2000);
 +      }
 +    } while (null == gcLockData);
 +
 +    log.info("GC lock was acquired");
 +  }
 +
 +  @After
 +  public void restoreConfig() throws Exception {
 +    InstanceOperations iops = getConnector().instanceOperations();
 +    if (null != gcCycleDelay) {
 +      iops.setProperty(Property.GC_CYCLE_DELAY.getKey(), gcCycleDelay);
 +    }
 +    if (null != gcCycleStart) {
 +      iops.setProperty(Property.GC_CYCLE_START.getKey(), gcCycleStart);
 +    }
 +    log.info("Restarting garbage collector");
 +    getCluster().getClusterControl().stopAllServers(ServerType.GARBAGE_COLLECTOR);
 +    getCluster().getClusterControl().startAllServers(ServerType.GARBAGE_COLLECTOR);
 +    log.info("Garbage collector was restarted");
 +  }
 +
 +  @Test
 +  public void test() throws Exception {
 +    // make a table
 +    String tableName = getUniqueNames(1)[0];
 +    Connector c = getConnector();
 +    log.info("Creating table to be deleted");
 +    c.tableOperations().create(tableName);
 +    final String tableId = c.tableOperations().tableIdMap().get(tableName);
 +    Assert.assertNotNull("Expected to find a tableId", tableId);
 +
 +    // add some splits
 +    SortedSet<Text> splits = new TreeSet<>();
 +    for (int i = 0; i < 10; i++) {
 +      splits.add(new Text("" + i));
 +    }
 +    c.tableOperations().addSplits(tableName, splits);
 +    // get rid of all the splits
 +    c.tableOperations().deleteRows(tableName, null, null);
 +    // get rid of the table
 +    c.tableOperations().delete(tableName);
 +    log.info("Sleeping to let garbage collector run");
 +    // let gc run
 +    sleepUninterruptibly(timeoutFactor * 15, TimeUnit.SECONDS);
 +    log.info("Verifying that delete markers were deleted");
 +    // look for delete markers
 +    Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
 +    scanner.setRange(MetadataSchema.DeletesSection.getRange());
 +    for (Entry<Key,Value> entry : scanner) {
 +      String row = entry.getKey().getRow().toString();
 +      if (!row.contains("/" + tableId + "/")) {
 +        log.info("Ignoring delete entry for a table other than the one we deleted");
 +        continue;
 +      }
 +      Assert.fail("Delete entry should have been deleted by the garbage collector: " + entry.getKey().getRow().toString());
 +    }
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/BatchWriterInTabletServerIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/BatchWriterInTabletServerIT.java
index 6bd5da4,0000000..b9560b6
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/BatchWriterInTabletServerIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/BatchWriterInTabletServerIT.java
@@@ -1,126 -1,0 +1,127 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test;
 +
- import com.google.common.collect.Iterators;
++import java.util.Map;
++
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.PartialKey;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.LongCombiner;
 +import org.apache.accumulo.core.iterators.user.SummingCombiner;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.harness.AccumuloClusterHarness;
 +import org.apache.hadoop.io.Text;
 +import org.apache.log4j.Logger;
 +import org.junit.Assert;
 +import org.junit.Test;
 +
- import java.util.Map;
++import com.google.common.collect.Iterators;
 +
 +/**
 + * Test writing to another table from inside an iterator.
 + *
 + * @see BatchWriterIterator
 + */
 +public class BatchWriterInTabletServerIT extends AccumuloClusterHarness {
 +  private static final Logger log = Logger.getLogger(BatchWriterInTabletServerIT.class);
 +
 +  @Override
 +  public boolean canRunTest(ClusterType type) {
 +    return ClusterType.MINI == type;
 +  }
 +
 +  /**
 +   * This test should succeed.
 +   */
 +  @Test
 +  public void testNormalWrite() throws Exception {
 +    String[] uniqueNames = getUniqueNames(2);
 +    String t1 = uniqueNames[0], t2 = uniqueNames[1];
 +    Connector c = getConnector();
 +    int numEntriesToWritePerEntry = 50;
 +    IteratorSetting itset = BatchWriterIterator.iteratorSetting(6, 0, 15, 1000, numEntriesToWritePerEntry, t2, c, getAdminToken(), false, false);
 +    test(t1, t2, c, itset, numEntriesToWritePerEntry);
 +  }
 +
 +  /**
 +   * Fixed by ACCUMULO-4229.
 +   * <p>
 +   * This tests a situation that a client which shares a LocatorCache with the tablet server may fall into. Before the problem was fixed, adding a split after
 +   * the Locator cache falls out of sync caused the BatchWriter to continuously attempt to write to an old, closed tablet. It would do so for 15 seconds until a
 +   * timeout on the BatchWriter.
 +   */
 +  @Test
 +  public void testClearLocatorAndSplitWrite() throws Exception {
 +    String[] uniqueNames = getUniqueNames(2);
 +    String t1 = uniqueNames[0], t2 = uniqueNames[1];
 +    Connector c = getConnector();
 +    int numEntriesToWritePerEntry = 50;
 +    IteratorSetting itset = BatchWriterIterator.iteratorSetting(6, 0, 15, 1000, numEntriesToWritePerEntry, t2, c, getAdminToken(), true, true);
 +    test(t1, t2, c, itset, numEntriesToWritePerEntry);
 +  }
 +
 +  private void test(String t1, String t2, Connector c, IteratorSetting itset, int numEntriesToWritePerEntry) throws Exception {
 +    // Write an entry to t1
 +    c.tableOperations().create(t1);
 +    Key k = new Key(new Text("row"), new Text("cf"), new Text("cq"));
 +    Value v = new Value("1".getBytes());
 +    {
 +      BatchWriterConfig config = new BatchWriterConfig();
 +      config.setMaxMemory(0);
 +      BatchWriter writer = c.createBatchWriter(t1, config);
 +      Mutation m = new Mutation(k.getRow());
 +      m.put(k.getColumnFamily(), k.getColumnQualifier(), v);
 +      writer.addMutation(m);
 +      writer.close();
 +    }
 +
 +    // Create t2 with a combiner to count entries written to it
 +    c.tableOperations().create(t2);
 +    IteratorSetting summer = new IteratorSetting(2, "summer", SummingCombiner.class);
 +    LongCombiner.setEncodingType(summer, LongCombiner.Type.STRING);
 +    LongCombiner.setCombineAllColumns(summer, true);
 +    c.tableOperations().attachIterator(t2, summer);
 +
 +    Map.Entry<Key,Value> actual;
 +    // Scan t1 with an iterator that writes to table t2
 +    Scanner scanner = c.createScanner(t1, Authorizations.EMPTY);
 +    scanner.addScanIterator(itset);
 +    actual = Iterators.getOnlyElement(scanner.iterator());
 +    Assert.assertTrue(actual.getKey().equals(k, PartialKey.ROW_COLFAM_COLQUAL));
 +    Assert.assertEquals(BatchWriterIterator.SUCCESS_VALUE, actual.getValue());
 +    scanner.close();
 +
 +    // ensure entries correctly wrote to table t2
 +    scanner = c.createScanner(t2, Authorizations.EMPTY);
 +    actual = Iterators.getOnlyElement(scanner.iterator());
 +    log.debug("t2 entry is " + actual.getKey().toStringNoTime() + " -> " + actual.getValue());
 +    Assert.assertTrue(actual.getKey().equals(k, PartialKey.ROW_COLFAM_COLQUAL));
 +    Assert.assertEquals(numEntriesToWritePerEntry, Integer.parseInt(actual.getValue().toString()));
 +    scanner.close();
 +
 +    c.tableOperations().delete(t1);
 +    c.tableOperations().delete(t2);
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/CleanWalIT.java
index 7146a9f,0000000..937ccb8
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java
@@@ -1,147 -1,0 +1,147 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test;
 +
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +import static org.junit.Assert.assertEquals;
 +
 +import java.util.Map.Entry;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.RootTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.harness.AccumuloClusterHarness;
 +import org.apache.accumulo.minicluster.ServerType;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.RawLocalFileSystem;
 +import org.junit.After;
 +import org.junit.Before;
 +import org.junit.Test;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import com.google.common.collect.Iterators;
- import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +
 +public class CleanWalIT extends AccumuloClusterHarness {
 +  private static final Logger log = LoggerFactory.getLogger(CleanWalIT.class);
 +
 +  @Override
 +  public int defaultTimeoutSeconds() {
 +    return 4 * 60;
 +  }
 +
 +  @Override
 +  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s");
 +    cfg.setNumTservers(1);
 +    // use raw local file system so walogs sync and flush will work
 +    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
 +  }
 +
 +  @Before
 +  public void offlineTraceTable() throws Exception {
 +    Connector conn = getConnector();
 +    String traceTable = conn.instanceOperations().getSystemConfiguration().get(Property.TRACE_TABLE.getKey());
 +    if (conn.tableOperations().exists(traceTable)) {
 +      conn.tableOperations().offline(traceTable, true);
 +    }
 +  }
 +
 +  @After
 +  public void onlineTraceTable() throws Exception {
 +    if (null != cluster) {
 +      Connector conn = getConnector();
 +      String traceTable = conn.instanceOperations().getSystemConfiguration().get(Property.TRACE_TABLE.getKey());
 +      if (conn.tableOperations().exists(traceTable)) {
 +        conn.tableOperations().online(traceTable, true);
 +      }
 +    }
 +  }
 +
 +  // test for ACCUMULO-1830
 +  @Test
 +  public void test() throws Exception {
 +    Connector conn = getConnector();
 +    String tableName = getUniqueNames(1)[0];
 +    conn.tableOperations().create(tableName);
 +    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
 +    Mutation m = new Mutation("row");
 +    m.put("cf", "cq", "value");
 +    bw.addMutation(m);
 +    bw.close();
 +    getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
 +    // all 3 tables should do recovery, but the bug doesn't really remove the log file references
 +
 +    getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
 +
 +    for (String table : new String[] {MetadataTable.NAME, RootTable.NAME})
 +      conn.tableOperations().flush(table, null, null, true);
 +    log.debug("Checking entries for " + tableName);
 +    assertEquals(1, count(tableName, conn));
 +    for (String table : new String[] {MetadataTable.NAME, RootTable.NAME}) {
 +      log.debug("Checking logs for " + table);
 +      assertEquals("Found logs for " + table, 0, countLogs(table, conn));
 +    }
 +
 +    bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
 +    m = new Mutation("row");
 +    m.putDelete("cf", "cq");
 +    bw.addMutation(m);
 +    bw.close();
 +    assertEquals(0, count(tableName, conn));
 +    conn.tableOperations().flush(tableName, null, null, true);
 +    conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
 +    conn.tableOperations().flush(RootTable.NAME, null, null, true);
 +    try {
 +      getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
 +      sleepUninterruptibly(3, TimeUnit.SECONDS);
 +    } finally {
 +      getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
 +    }
 +    assertEquals(0, count(tableName, conn));
 +  }
 +
 +  private int countLogs(String tableName, Connector conn) throws TableNotFoundException {
 +    Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
 +    scanner.fetchColumnFamily(MetadataSchema.TabletsSection.LogColumnFamily.NAME);
 +    scanner.setRange(MetadataSchema.TabletsSection.getRange());
 +    int count = 0;
 +    for (Entry<Key,Value> entry : scanner) {
 +      log.debug("Saw " + entry.getKey() + "=" + entry.getValue());
 +      count++;
 +    }
 +    return count;
 +  }
 +
 +  int count(String tableName, Connector conn) throws Exception {
 +    Scanner s = conn.createScanner(tableName, Authorizations.EMPTY);
 +    return Iterators.size(s.iterator());
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/CompactionRateLimitingIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/CompactionRateLimitingIT.java
index 91ed2de,0000000..075dd30
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/CompactionRateLimitingIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CompactionRateLimitingIT.java
@@@ -1,82 -1,0 +1,83 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.test;
 +
 +import java.util.Random;
++
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.accumulo.test.functional.ConfigurableMacBase;
 +import org.apache.hadoop.conf.Configuration;
 +import org.junit.Assert;
 +import org.junit.Test;
 +
 +public class CompactionRateLimitingIT extends ConfigurableMacBase {
 +  public static final long BYTES_TO_WRITE = 10 * 1024 * 1024;
 +  public static final long RATE = 1 * 1024 * 1024;
 +
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg, Configuration fsConf) {
 +    cfg.setProperty(Property.TSERV_MAJC_THROUGHPUT, RATE + "B");
 +    cfg.setProperty(Property.TABLE_MAJC_RATIO, "20");
 +    cfg.setProperty(Property.TABLE_FILE_COMPRESSION_TYPE, "none");
 +  }
 +
 +  @Test
 +  public void majorCompactionsAreRateLimited() throws Exception {
 +    long bytesWritten = 0;
 +    String tableName = getUniqueNames(1)[0];
 +    Connector conn = getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD));
 +    conn.tableOperations().create(tableName);
 +    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
 +    try {
 +      Random r = new Random();
 +      while (bytesWritten < BYTES_TO_WRITE) {
 +        byte[] rowKey = new byte[32];
 +        r.nextBytes(rowKey);
 +
 +        byte[] qual = new byte[32];
 +        r.nextBytes(qual);
 +
 +        byte[] value = new byte[1024];
 +        r.nextBytes(value);
 +
 +        Mutation m = new Mutation(rowKey);
 +        m.put(new byte[0], qual, value);
 +        bw.addMutation(m);
 +
 +        bytesWritten += rowKey.length + qual.length + value.length;
 +      }
 +    } finally {
 +      bw.close();
 +    }
 +
 +    conn.tableOperations().flush(tableName, null, null, true);
 +
 +    long compactionStart = System.currentTimeMillis();
 +    conn.tableOperations().compact(tableName, null, null, false, true);
 +    long duration = System.currentTimeMillis() - compactionStart;
 +    // The rate will be "bursty", try to account for that by taking 80% of the expected rate (allow for 20% under the maximum expected duration)
 +    Assert.assertTrue(
 +        String.format("Expected a compaction rate of no more than %,d bytes/sec, but saw a rate of %,f bytes/sec", (int) 0.8d * RATE, 1000.0 * bytesWritten
 +            / duration), duration > 1000L * 0.8 * BYTES_TO_WRITE / RATE);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/IMMLGBenchmark.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/IMMLGBenchmark.java
index 59c46ec,c06a5d7..6bae346
--- a/test/src/main/java/org/apache/accumulo/test/IMMLGBenchmark.java
+++ b/test/src/main/java/org/apache/accumulo/test/IMMLGBenchmark.java
@@@ -16,6 -16,6 +16,8 @@@
   */
  package org.apache.accumulo.test;
  
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
++
  import java.util.ArrayList;
  import java.util.HashMap;
  import java.util.HashSet;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/018c7fe5/test/src/main/java/org/apache/accumulo/test/MetaGetsReadersIT.java
----------------------------------------------------------------------
diff --cc test/src/main/java/org/apache/accumulo/test/MetaGetsReadersIT.java
index 73714c5,0000000..9785f8a
mode 100644,000000..100644
--- a/test/src/main/java/org/apache/accumulo/test/MetaGetsReadersIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/MetaGetsReadersIT.java
@@@ -1,117 -1,0 +1,117 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.accumulo.test;
 +
++import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.util.Iterator;
 +import java.util.Map.Entry;
 +import java.util.Random;
 +import java.util.concurrent.TimeUnit;
 +import java.util.concurrent.atomic.AtomicBoolean;
 +
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 +import org.apache.accumulo.test.functional.ConfigurableMacBase;
 +import org.apache.accumulo.test.functional.SlowIterator;
 +import org.apache.hadoop.conf.Configuration;
 +import org.junit.Test;
 +
 +import com.google.common.collect.Iterators;
- import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
 +
 +public class MetaGetsReadersIT extends ConfigurableMacBase {
 +
 +  @Override
 +  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
 +    cfg.setNumTservers(1);
 +    cfg.setProperty(Property.TSERV_SCAN_MAX_OPENFILES, "2");
 +    cfg.setProperty(Property.TABLE_BLOCKCACHE_ENABLED, "false");
 +  }
 +
 +  private static Thread slowScan(final Connector c, final String tableName, final AtomicBoolean stop) {
 +    Thread thread = new Thread() {
 +      @Override
 +      public void run() {
 +        try {
 +          while (stop.get() == false) {
 +            Scanner s = c.createScanner(tableName, Authorizations.EMPTY);
 +            IteratorSetting is = new IteratorSetting(50, SlowIterator.class);
 +            SlowIterator.setSleepTime(is, 10);
 +            s.addScanIterator(is);
 +            Iterator<Entry<Key,Value>> iterator = s.iterator();
 +            while (iterator.hasNext() && stop.get() == false) {
 +              iterator.next();
 +            }
 +          }
 +        } catch (Exception ex) {
 +          log.trace("{}", ex.getMessage(), ex);
 +          stop.set(true);
 +        }
 +      }
 +    };
 +    return thread;
 +  }
 +
 +  @Test(timeout = 2 * 60 * 1000)
 +  public void test() throws Exception {
 +    final String tableName = getUniqueNames(1)[0];
 +    final Connector c = getConnector();
 +    c.tableOperations().create(tableName);
 +    Random random = new Random();
 +    BatchWriter bw = c.createBatchWriter(tableName, null);
 +    for (int i = 0; i < 50000; i++) {
 +      byte[] row = new byte[100];
 +      random.nextBytes(row);
 +      Mutation m = new Mutation(row);
 +      m.put("", "", "");
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +    c.tableOperations().flush(tableName, null, null, true);
 +    final AtomicBoolean stop = new AtomicBoolean(false);
 +    Thread t1 = slowScan(c, tableName, stop);
 +    t1.start();
 +    Thread t2 = slowScan(c, tableName, stop);
 +    t2.start();
 +    sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
 +    long now = System.currentTimeMillis();
 +    Scanner m = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
 +    Iterators.size(m.iterator());
 +    long delay = System.currentTimeMillis() - now;
 +    System.out.println("Delay = " + delay);
 +    assertTrue("metadata table scan was slow", delay < 1000);
 +    assertFalse(stop.get());
 +    stop.set(true);
 +    t1.interrupt();
 +    t2.interrupt();
 +    t1.join();
 +    t2.join();
 +  }
 +
 +}


Mime
View raw message