accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [accumulo] 04/04: Additional wrapping fixes for checkstyle (#416)
Date Fri, 06 Apr 2018 09:53:10 GMT
This is an automated email from the ASF dual-hosted git repository.

ctubbsii pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git

commit fbaee3e76438202752c03d18dfcc31fb664c5ce6
Author: Christopher Tubbs <ctubbsii@apache.org>
AuthorDate: Fri Apr 6 04:16:04 2018 -0400

    Additional wrapping fixes for checkstyle (#416)
    
    Additional line-wrapping fixes for checkstyle for the master branch
---
 .../core/client/admin/NewTableConfiguration.java   |   5 +-
 .../accumulo/core/client/impl/MasterClient.java    |   4 +-
 .../impl/TabletServerBatchReaderIterator.java      |  11 +-
 .../core/client/summary/CountingSummarizer.java    |  36 +--
 .../accumulo/core/client/summary/Summarizer.java   |  32 +--
 .../accumulo/core/conf/ClientConfigGenerate.java   |  11 +-
 .../apache/accumulo/core/conf/ClientProperty.java  |   9 +-
 .../accumulo/core/conf/ConfigSanityCheck.java      |   8 +-
 .../accumulo/core/conf/ConfigurationDocGen.java    |   9 +-
 .../org/apache/accumulo/core/conf/Property.java    | 258 +++++++++++++--------
 .../apache/accumulo/core/conf/PropertyType.java    |  35 ++-
 .../file/blockfile/cache/lru/LruBlockCache.java    |   6 +-
 .../accumulo/core/iterators/IteratorUtil.java      |  37 +--
 .../CachingHDFSSecretKeyEncryptionStrategy.java    |  13 +-
 .../core/security/crypto/DefaultCryptoModule.java  |  11 +-
 .../org/apache/accumulo/core/summary/Gatherer.java |   5 +-
 .../accumulo/core/summary/SummaryReader.java       |  12 +-
 .../accumulo/core/conf/ConfigSanityCheckTest.java  |   6 +-
 .../main/java/org/apache/accumulo/proxy/Proxy.java |   4 +-
 .../java/org/apache/accumulo/server/Accumulo.java  |   8 +-
 .../org/apache/accumulo/server/ServerOpts.java     |   3 +-
 .../accumulo/server/client/BulkImporter.java       |   6 +-
 .../accumulo/server/fs/VolumeManagerImpl.java      |   5 +-
 .../apache/accumulo/server/init/Initialize.java    |  36 +--
 .../balancer/HostRegexTableLoadBalancer.java       |   8 +-
 .../server/metrics/MetricsConfiguration.java       |   4 +-
 .../rpc/TCredentialsUpdatingInvocationHandler.java |   5 +-
 .../apache/accumulo/server/rpc/TServerUtils.java   |   6 +-
 .../server/security/SecurityOperation.java         |  15 +-
 .../org/apache/accumulo/server/util/FileUtil.java  |   3 +-
 .../java/org/apache/accumulo/master/Master.java    |   6 +-
 .../monitor/util/AccumuloMonitorAppenderTest.java  |  28 ++-
 .../apache/accumulo/tracer/AsyncSpanReceiver.java  |   8 +-
 .../org/apache/accumulo/tracer/TraceServer.java    |   5 +-
 .../org/apache/accumulo/tserver/NativeMap.java     |   6 +-
 .../org/apache/accumulo/tserver/TabletServer.java  |   9 +-
 .../tserver/TabletServerResourceManager.java       |  17 +-
 .../tserver/compaction/MajorCompactionRequest.java |   6 +-
 .../tserver/LargestFirstMemoryManagerTest.java     |   6 +-
 .../main/java/org/apache/accumulo/shell/Shell.java |   8 +-
 .../org/apache/accumulo/shell/ShellOptionsJC.java  |  14 +-
 .../shell/commands/CreateTableCommand.java         |  11 +-
 .../accumulo/shell/commands/HiddenCommand.java     |  13 +-
 .../accumulo/shell/commands/SummariesCommand.java  |   3 +-
 .../main/java/org/apache/accumulo/start/Main.java  |   4 +-
 .../accumulo/harness/AccumuloClusterHarness.java   |   5 +-
 .../apache/accumulo/test/ConditionalWriterIT.java  |   5 +-
 .../org/apache/accumulo/test/ShellConfigIT.java    |   5 +-
 .../org/apache/accumulo/test/VerifyIngest.java     |   3 +-
 .../org/apache/accumulo/test/VolumeChooserIT.java  |   4 +-
 .../test/functional/RegexGroupBalanceIT.java       |   4 +-
 .../test/functional/SessionBlockVerifyIT.java      |   5 +-
 52 files changed, 463 insertions(+), 323 deletions(-)

diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/NewTableConfiguration.java b/core/src/main/java/org/apache/accumulo/core/client/admin/NewTableConfiguration.java
index aa34f2e..9c97f54 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/NewTableConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/NewTableConfiguration.java
@@ -235,9 +235,8 @@ public class NewTableConfiguration {
     try {
       TableOperationsHelper.checkIteratorConflicts(iteratorProps, setting, scopes);
     } catch (AccumuloException e) {
-      throw new IllegalArgumentException(
-          "The specified IteratorSetting conflicts with an iterator already defined on this NewTableConfiguration",
-          e);
+      throw new IllegalArgumentException("The specified IteratorSetting"
+          + " conflicts with an iterator already defined on this NewTableConfiguration", e);
     }
     for (IteratorScope scope : scopes) {
       String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/MasterClient.java b/core/src/main/java/org/apache/accumulo/core/client/impl/MasterClient.java
index ca05f84..dfe29e5 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/MasterClient.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/MasterClient.java
@@ -155,8 +155,8 @@ public class MasterClient {
         }
       } catch (ThriftNotActiveServiceException e) {
         // Let it loop, fetching a new location
-        log.debug(
-            "Contacted a Master which is no longer active, re-creating the connection to the active Master");
+        log.debug("Contacted a Master which is no longer active, re-creating"
+            + " the connection to the active Master");
       } catch (Exception e) {
         throw new AccumuloException(e);
       } finally {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
index 4609b8f..4c87dca 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
@@ -185,11 +185,12 @@ public class TabletServerBatchReaderIterator implements Iterator<Entry<Key,Value
             throw new RuntimeException(fatalException);
 
         if (queryThreadPool.isShutdown()) {
-          String shortMsg = "The BatchScanner was unexpectedly closed while this Iterator was still in use.";
-          log.error(
-              "{} Ensure that a reference to the BatchScanner is retained so that it can be closed when this Iterator is exhausted."
-                  + " Not retaining a reference to the BatchScanner guarantees that you are leaking threads in your client JVM.",
-              shortMsg);
+          String shortMsg = "The BatchScanner was unexpectedly closed while"
+              + " this Iterator was still in use.";
+          log.error("{} Ensure that a reference to the BatchScanner is retained"
+              + " so that it can be closed when this Iterator is exhausted. Not"
+              + " retaining a reference to the BatchScanner guarantees that you are"
+              + " leaking threads in your client JVM.", shortMsg);
           throw new RuntimeException(shortMsg + " Ensure proper handling of the BatchScanner.");
         }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/client/summary/CountingSummarizer.java b/core/src/main/java/org/apache/accumulo/core/client/summary/CountingSummarizer.java
index 58bbc3b..ffb26ff 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/summary/CountingSummarizer.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/summary/CountingSummarizer.java
@@ -30,32 +30,36 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.commons.lang.mutable.MutableLong;
 
-//checkstyle and formatter are in conflict
-//@formatter:off
 /**
- * This class counts arbitrary keys while defending against too many keys and keys that are too long.
+ * This class counts arbitrary keys while defending against too many keys and keys that are too
+ * long.
  *
  * <p>
- * During collection and summarization this class will use the functions from {@link #converter()} and {@link #encoder()}. For each key/value the function from
- * {@link #converter()} will be called to create zero or more counter objects. A counter associated with each counter object will be incremented, as long as
- * there are not too many counters and the counter object is not too long.
+ * During collection and summarization this class will use the functions from {@link #converter()}
+ * and {@link #encoder()}. For each key/value the function from {@link #converter()} will be called
+ * to create zero or more counter objects. A counter associated with each counter object will be
+ * incremented, as long as there are not too many counters and the counter object is not too long.
  *
  * <p>
- * When {@link Summarizer.Collector#summarize(Summarizer.StatisticConsumer)} is called, the function from {@link #encoder()} will be used to convert counter
- * objects to strings. These strings will be used to emit statistics. Overriding {@link #encoder()} is optional. One reason to override is if the counter object
- * contains binary or special data. For example, a function that base64 encodes counter objects could be created.
+ * When {@link Summarizer.Collector#summarize(Summarizer.StatisticConsumer)} is called, the function
+ * from {@link #encoder()} will be used to convert counter objects to strings. These strings will be
+ * used to emit statistics. Overriding {@link #encoder()} is optional. One reason to override is if
+ * the counter object contains binary or special data. For example, a function that base64 encodes
+ * counter objects could be created.
  *
  * <p>
  * If the counter key type is mutable, then consider overriding {@link #copier()}.
  *
  * <p>
- * The function returned by {@link #converter()} will be called frequently and should be very efficient. The function returned by {@link #encoder()} will be
- * called less frequently and can be more expensive. The reason these two functions exists is to avoid the conversion to string for each key value, if that
- * conversion is unnecessary.
+ * The function returned by {@link #converter()} will be called frequently and should be very
+ * efficient. The function returned by {@link #encoder()} will be called less frequently and can be
+ * more expensive. The reason these two functions exists is to avoid the conversion to string for
+ * each key value, if that conversion is unnecessary.
  *
  * <p>
- * Below is an example implementation that counts column visibilities. This example avoids converting column visibility to string for each key/value. This
- * example shows the source code for {@link VisibilitySummarizer}.
+ * Below is an example implementation that counts column visibilities. This example avoids
+ * converting column visibility to string for each key/value. This example shows the source code for
+ * {@link VisibilitySummarizer}.
  *
  * <pre>
  * <code>
@@ -75,11 +79,11 @@ import org.apache.commons.lang.mutable.MutableLong;
  * </pre>
  *
  * @param <K>
- *          The counter key type. This type must have good implementations of {@link Object#hashCode()} and {@link Object#equals(Object)}.
+ *          The counter key type. This type must have good implementations of
+ *          {@link Object#hashCode()} and {@link Object#equals(Object)}.
  * @see CounterSummary
  * @since 2.0.0
  */
-//@formatter:on
 public abstract class CountingSummarizer<K> implements Summarizer {
 
   /**
diff --git a/core/src/main/java/org/apache/accumulo/core/client/summary/Summarizer.java b/core/src/main/java/org/apache/accumulo/core/client/summary/Summarizer.java
index 02cf2ab..5d42d48 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/summary/Summarizer.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/summary/Summarizer.java
@@ -24,18 +24,18 @@ import org.apache.accumulo.core.client.rfile.RFile;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 
-//checkstyle and the formatter are in conflict, so turn off the formatter
-//@formatter:off
 /**
- * <p>
- * Instances of this interface can be configured for Accumulo tables. When Accumulo compacts files, it will use this Factory to create {@link Collector} and
- * {@link Combiner} objects to generate summary information about the data in the file.
+ * Instances of this interface can be configured for Accumulo tables. When Accumulo compacts files,
+ * it will use this Factory to create {@link Collector} and {@link Combiner} objects to generate
+ * summary information about the data in the file.
  *
  * <p>
- * In order to merge summary information from multiple files, Accumulo will use this factory to create a {@link Combiner} object.
+ * In order to merge summary information from multiple files, Accumulo will use this factory to
+ * create a {@link Combiner} object.
  *
  * <p>
- * Below is an example of a very simple summarizer that will compute the number of deletes, total number of keys, min timestamp and max timestamp.
+ * Below is an example of a very simple summarizer that will compute the number of deletes, total
+ * number of keys, min timestamp and max timestamp.
  *
  * <pre>
  * <code>
@@ -140,18 +140,21 @@ import org.apache.accumulo.core.data.Value;
  * </pre>
  *
  * <p>
- * The reason a Summarizer is a factory for a Collector and Combiner is to make it very clear in the API that Accumulo uses them independently at different
- * times. Therefore its not advisable to share internal state between the Collector and Combiner. The example implementation shows that the Collectors design
- * allows for very efficient collection of specialized summary information. Creating {@link String} + {@link Long} pairs is deferred until the summarize method
- * is called.
+ * The reason a Summarizer is a factory for a Collector and Combiner is to make it very clear in the
+ * API that Accumulo uses them independently at different times. Therefore its not advisable to
+ * share internal state between the Collector and Combiner. The example implementation shows that
+ * the Collectors design allows for very efficient collection of specialized summary information.
+ * Creating {@link String} + {@link Long} pairs is deferred until the summarize method is called.
  *
  * <p>
  * Summary data can be used by Compaction Strategies to decide which files to compact.
  *
  * <p>
- * Summary data is persisted, so ideally the same summarizer class with the same options should always produce the same results.  If you need to change the behavior
- * of a summarizer, then consider doing this by adding a new option.  If the same summarizer is configured twice with different options, then Accumulo will store and
- * merge each one separately.  This can allow old and new behavior to coexists simultaneously.
+ * Summary data is persisted, so ideally the same summarizer class with the same options should
+ * always produce the same results. If you need to change the behavior of a summarizer, then
+ * consider doing this by adding a new option. If the same summarizer is configured twice with
+ * different options, then Accumulo will store and merge each one separately. This can allow old and
+ * new behavior to coexists simultaneously.
  *
  * @since 2.0.0
  *
@@ -162,7 +165,6 @@ import org.apache.accumulo.core.data.Value;
  * @see RFile#summaries()
  * @see SummarizerConfiguration
  */
- //@formatter:on
 public interface Summarizer {
 
   public static interface StatisticConsumer {
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/ClientConfigGenerate.java b/core/src/main/java/org/apache/accumulo/core/conf/ClientConfigGenerate.java
index f71b7e0..3f1bfaa 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/ClientConfigGenerate.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/ClientConfigGenerate.java
@@ -87,12 +87,13 @@ class ClientConfigGenerate {
       doc.println("category: development");
       doc.println("order: 9");
       doc.println("---\n");
-      doc.println(
-          "<!-- WARNING: Do not edit this file. It is a generated file that is copied from Accumulo build (from core/target/generated-docs) -->");
+      doc.println("<!-- WARNING: Do not edit this file. It is a generated file"
+          + " that is copied from Accumulo build (from core/target/generated-docs)" + " -->");
       doc.println("<!-- Generated by : " + getClass().getName() + " -->\n");
-      doc.println(
-          "Below are properties set in `accumulo-client.properties` that configure [Accumulo clients]({{ page.docs_baseurl }}/getting-started/clients#connecting). "
-              + "All properties have been part of the API since 2.0.0 (unless otherwise specified):\n");
+      doc.println("Below are properties set in `accumulo-client.properties`"
+          + " that configure [Accumulo clients]({{ page.docs_baseurl"
+          + " }}/getting-started/clients#connecting). All properties have been part"
+          + " of the API since 2.0.0 (unless otherwise specified):\n");
       doc.println("| Property | Default value | Since | Description |");
       doc.println("|----------|---------------|-------|-------------|");
     }
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/ClientProperty.java b/core/src/main/java/org/apache/accumulo/core/conf/ClientProperty.java
index 5a2fc44..e481f6a 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/ClientProperty.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/ClientProperty.java
@@ -34,7 +34,8 @@ public enum ClientProperty {
 
   // Authentication
   AUTH_METHOD("auth.method", "password",
-      "Authentication method (i.e password, kerberos, provider). Set more properties for chosen method below.",
+      "Authentication method (i.e password,"
+          + " kerberos, provider). Set more properties for chosen method below.",
       "", true),
   AUTH_USERNAME("auth.username", "", "Accumulo username/principal for chosen authentication method",
       "", true),
@@ -51,11 +52,13 @@ public enum ClientProperty {
   BATCH_WRITER_MAX_LATENCY_SEC("batch.writer.max.latency.sec", "120",
       "Max amount of time (in seconds) to hold data in memory before flushing it"),
   BATCH_WRITER_MAX_TIMEOUT_SEC("batch.writer.max.timeout.sec", "0",
-      "Max amount of time (in seconds) an unresponsive server will be re-tried. An exception is thrown when this timeout is exceeded. Set to zero for no timeout."),
+      "Max amount" + " of time (in seconds) an unresponsive server will be re-tried. An"
+          + " exception is thrown when this timeout is exceeded. Set to zero for no timeout."),
   BATCH_WRITER_MAX_WRITE_THREADS("batch.writer.max.write.threads", "3",
       "Maximum number of threads to use for writing data to tablet servers."),
   BATCH_WRITER_DURABILITY("batch.writer.durability", "default",
-      "Change the durability for the BatchWriter session. To use the table's durability setting. use \"default\" which is the table's durability setting."),
+      "Change the" + " durability for the BatchWriter session. To use the table's durability"
+          + " setting. use \"default\" which is the table's durability setting."),
 
   // SSL
   SSL_ENABLED("ssl.enabled", "false", "Enable SSL for client RPC"),
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/ConfigSanityCheck.java b/core/src/main/java/org/apache/accumulo/core/conf/ConfigSanityCheck.java
index 34ec3df..1b778cc 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/ConfigSanityCheck.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/ConfigSanityCheck.java
@@ -36,7 +36,7 @@ public class ConfigSanityCheck {
   private static final String PREFIX = "BAD CONFIG ";
   private static final String NULL_CIPHER = "NullCipher";
   private static final String NULL_CRYPTO_MODULE = "NullCryptoModule";
-  private static final String NULL_SECRET_KEY_ENCRYPTION_STRATEGY = "NullSecretKeyEncryptionStrategy";
+  private static final String NULL_SECRET_KEY_CRYPT_STRATEGY = "NullSecretKeyEncryptionStrategy";
   @SuppressWarnings("deprecation")
   private static final Property INSTANCE_DFS_URI = Property.INSTANCE_DFS_URI;
   @SuppressWarnings("deprecation")
@@ -58,7 +58,7 @@ public class ConfigSanityCheck {
     boolean usingVolumes = false;
     String cipherSuite = NULL_CIPHER;
     String keyAlgorithm = NULL_CIPHER;
-    String secretKeyEncryptionStrategy = NULL_SECRET_KEY_ENCRYPTION_STRATEGY;
+    String secretKeyEncryptionStrategy = NULL_SECRET_KEY_CRYPT_STRATEGY;
     String cryptoModule = NULL_CRYPTO_MODULE;
     for (Entry<String,String> entry : entries) {
       String key = entry.getKey();
@@ -112,7 +112,7 @@ public class ConfigSanityCheck {
       }
       if (key.equals(Property.CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS.getKey())) {
         secretKeyEncryptionStrategy = Objects.requireNonNull(value);
-        if (!secretKeyEncryptionStrategy.equals(NULL_SECRET_KEY_ENCRYPTION_STRATEGY)) {
+        if (!secretKeyEncryptionStrategy.equals(NULL_SECRET_KEY_CRYPT_STRATEGY)) {
           verifyValidClassName(key, secretKeyEncryptionStrategy, SecretKeyEncryptionStrategy.class);
         }
       }
@@ -135,7 +135,7 @@ public class ConfigSanityCheck {
     }
 
     if (cryptoModule.equals(NULL_CRYPTO_MODULE)
-        ^ secretKeyEncryptionStrategy.equals(NULL_SECRET_KEY_ENCRYPTION_STRATEGY)) {
+        ^ secretKeyEncryptionStrategy.equals(NULL_SECRET_KEY_CRYPT_STRATEGY)) {
       fatal(Property.CRYPTO_MODULE_CLASS.getKey() + " and "
           + Property.CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS.getKey()
           + " must both be configured.");
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java b/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java
index 90b8b55..5b23807 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java
@@ -85,10 +85,11 @@ class ConfigurationDocGen {
       doc.println("category: administration");
       doc.println("order: 3");
       doc.println("---\n");
-      doc.println(
-          "<!-- WARNING: Do not edit this file. It is a generated file that is copied from Accumulo build (from core/target/generated-docs) -->\n");
-      doc.println(
-          "Below are properties set in `accumulo-site.xml` or the Accumulo shell that configure Accumulo servers (i.e tablet server, master, etc):\n");
+      doc.println("<!-- WARNING: Do not edit this file. It is a generated file"
+          + " that is copied from Accumulo build (from core/target/generated-docs)" + " -->\n");
+      doc.println("Below are properties set in `accumulo-site.xml` or the"
+          + " Accumulo shell that configure Accumulo servers (i.e tablet server,"
+          + " master, etc):\n");
     }
 
     @Override
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index 8298abd..c8cbc65 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -54,20 +54,25 @@ public enum Property {
           + " (future) other parts of the code."),
   @Experimental
   CRYPTO_CIPHER_SUITE("crypto.cipher.suite", "NullCipher", PropertyType.STRING,
-      "Describes the cipher suite to use for rfile encryption. The value must be either NullCipher or in the form of algorithm/mode/padding, "
-          + "e.g. AES/CBC/NoPadding"),
+      "Describes the cipher suite to use for rfile encryption. The value must"
+          + " be either NullCipher or in the form of algorithm/mode/padding, e.g."
+          + " AES/CBC/NoPadding"),
   @Experimental
   CRYPTO_WAL_CIPHER_SUITE("crypto.wal.cipher.suite", "", PropertyType.STRING,
-      "Describes the cipher suite to use for the write-ahead log. Defaults to 'cyrpto.cipher.suite' "
-          + "and will use that value for WAL encryption unless otherwise specified. Valid suite values include: an empty string, NullCipher, or a string the "
-          + "form of algorithm/mode/padding, e.g. AES/CBC/NOPadding"),
+      "Describes the cipher suite to use for the write-ahead log. Defaults to"
+          + " 'cyrpto.cipher.suite' and will use that value for WAL encryption unless"
+          + " otherwise specified. Valid suite values include: an empty string,"
+          + " NullCipher, or a string the form of algorithm/mode/padding, e.g."
+          + " AES/CBC/NOPadding"),
   @Experimental
   CRYPTO_CIPHER_KEY_ALGORITHM_NAME("crypto.cipher.key.algorithm.name", "NullCipher",
       PropertyType.STRING,
-      "States the name of the algorithm used for the key for the corresponding cipher suite. The key type must be compatible with the cipher suite."),
+      "States the name of the algorithm used for the key for the corresponding"
+          + " cipher suite. The key type must be compatible with the cipher suite."),
   @Experimental
   CRYPTO_BLOCK_STREAM_SIZE("crypto.block.stream.size", "1K", PropertyType.BYTES,
-      "The size of the buffer above the cipher stream. Used for reading files and padding walog entries."),
+      "The size of the buffer above the cipher stream. Used for reading files"
+          + " and padding walog entries."),
   @Experimental
   CRYPTO_CIPHER_KEY_LENGTH("crypto.cipher.key.length", "128", PropertyType.STRING,
       "Specifies the key length *in bits* to use for the symmetric key, "
@@ -115,7 +120,8 @@ public enum Property {
   // SSL properties local to each node (see also instance.ssl.enabled which must be consistent
   // across all nodes in an instance)
   RPC_PREFIX("rpc.", null, PropertyType.PREFIX,
-      "Properties in this category related to the configuration of SSL keys for RPC. See also instance.ssl.enabled"),
+      "Properties in this category related to the configuration of SSL keys for"
+          + " RPC. See also instance.ssl.enabled"),
   RPC_SSL_KEYSTORE_PATH("rpc.javax.net.ssl.keyStore", "", PropertyType.PATH,
       "Path of the keystore file for the server's private SSL key"),
   @Sensitive
@@ -175,11 +181,14 @@ public enum Property {
           + "Do not change after accumulo is initialized."),
   @Sensitive
   INSTANCE_SECRET("instance.secret", "DEFAULT", PropertyType.STRING,
-      "A secret unique to a given instance that all servers must know in order to communicate with one another."
-          + "It should be changed prior to the initialization of Accumulo. To change it after Accumulo has been initialized, use the ChangeSecret tool "
-          + "and then update accumulo-site.xml everywhere. Before using the ChangeSecret tool, make sure Accumulo is not running and you are logged "
-          + "in as the user that controls Accumulo files in HDFS.  To use the ChangeSecret tool, run the command: "
-          + "./bin/accumulo org.apache.accumulo.server.util.ChangeSecret"),
+      "A secret unique to a given instance that all servers must know in order"
+          + " to communicate with one another. It should be changed prior to the"
+          + " initialization of Accumulo. To change it after Accumulo has been"
+          + " initialized, use the ChangeSecret tool and then update accumulo-site.xml"
+          + " everywhere. Before using the ChangeSecret tool, make sure Accumulo is not"
+          + " running and you are logged in as the user that controls Accumulo files in"
+          + " HDFS. To use the ChangeSecret tool, run the command: ./bin/accumulo"
+          + " org.apache.accumulo.server.util.ChangeSecret"),
   INSTANCE_VOLUMES("instance.volumes", "", PropertyType.STRING,
       "A comma seperated list of dfs uris to use. Files will be stored across"
           + " these filesystems. If this is empty, then instance.dfs.uri will be used."
@@ -239,12 +248,15 @@ public enum Property {
 
   // general properties
   GENERAL_PREFIX("general.", null, PropertyType.PREFIX,
-      "Properties in this category affect the behavior of accumulo overall, but do not have to be consistent throughout a cloud."),
+      "Properties in this category affect the behavior of accumulo overall, but"
+          + " do not have to be consistent throughout a cloud."),
   @Deprecated
   GENERAL_CLASSPATHS(AccumuloClassLoader.GENERAL_CLASSPATHS, "", PropertyType.STRING,
-      "This property is deprecated. The class path should instead be configured by the launch environment (for example, accumulo-env.sh). "
-          + "A list of all of the places to look for a class. Order does matter, as it will look for the jar "
-          + "starting in the first location to the last. Supports full regex on filename alone."),
+      "This property is deprecated. The class path should instead be configured"
+          + " by the launch environment (for example, accumulo-env.sh). A list of all"
+          + " of the places to look for a class. Order does matter, as it will look for"
+          + " the jar starting in the first location to the last. Supports full regex"
+          + " on filename alone."),
   GENERAL_DYNAMIC_CLASSPATHS(AccumuloVFSClassLoader.DYNAMIC_CLASSPATH_PROPERTY_NAME,
       AccumuloVFSClassLoader.DEFAULT_DYNAMIC_CLASSPATH_VALUE, PropertyType.STRING,
       "A list of all of the places where changes in jars or classes will force "
@@ -264,8 +276,9 @@ public enum Property {
           + "principal. Leave blank if not using kerberoized hdfs"),
   GENERAL_KERBEROS_RENEWAL_PERIOD("general.kerberos.renewal.period", "30s",
       PropertyType.TIMEDURATION,
-      "The amount of time between attempts to perform "
-          + "Kerberos ticket renewals. This does not equate to how often tickets are actually renewed (which is performed at 80% of the ticket lifetime)."),
+      "The amount of time between attempts to perform Kerberos ticket renewals."
+          + " This does not equate to how often tickets are actually renewed (which is"
+          + " performed at 80% of the ticket lifetime)."),
   GENERAL_MAX_MESSAGE_SIZE("general.server.message.size.max", "1G", PropertyType.BYTES,
       "The maximum size of a message that can be sent to a server."),
   GENERAL_SIMPLETIMER_THREADPOOL_SIZE("general.server.simpletimer.threadpool.size", "1",
@@ -279,11 +292,13 @@ public enum Property {
   GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS("general.security.credential.provider.paths", "",
       PropertyType.STRING, "Comma-separated list of paths to CredentialProviders"),
   GENERAL_LEGACY_METRICS("general.legacy.metrics", "false", PropertyType.BOOLEAN,
-      "Use the old metric infrastructure configured by accumulo-metrics.xml, instead of Hadoop Metrics2"),
+      "Use the old metric infrastructure configured by accumulo-metrics.xml,"
+          + " instead of Hadoop Metrics2"),
   GENERAL_ARBITRARY_PROP_PREFIX("general.custom.", null, PropertyType.PREFIX,
-      "Prefix to be used for user defined system-wide properties. "
-          + "This may be particularly useful for system-wide configuration for various user-implementations of "
-          + "pluggable Accumulo features, such as the balancer or volume chooser."),
+      "Prefix to be used for user defined system-wide properties. This may be"
+          + " particularly useful for system-wide configuration for various"
+          + " user-implementations of pluggable Accumulo features, such as the balancer"
+          + " or volume chooser."),
   GENERAL_DELEGATION_TOKEN_LIFETIME("general.delegation.token.lifetime", "7d",
       PropertyType.TIMEDURATION,
       "The length of time that delegation tokens and secret keys are valid"),
@@ -331,7 +346,8 @@ public enum Property {
       "org.apache.accumulo.server.master.recovery.HadoopLogCloser", PropertyType.CLASSNAME,
       "A class that implements a mechanism to steal write access to a write-ahead log"),
   MASTER_FATE_THREADPOOL_SIZE("master.fate.threadpool.size", "4", PropertyType.COUNT,
-      "The number of threads used to run fault-tolerant executions (FATE). These are primarily table operations like merge."),
+      "The number of threads used to run fault-tolerant executions (FATE)."
+          + " These are primarily table operations like merge."),
   MASTER_REPLICATION_SCAN_INTERVAL("master.replication.status.scan.interval", "30s",
       PropertyType.TIMEDURATION,
       "Amount of time to sleep before scanning the status section of the "
@@ -358,8 +374,9 @@ public enum Property {
       "Specifies a default blocksize for the tserver caches"),
   TSERV_CACHE_MANAGER_IMPL("tserver.cache.manager.class",
       "org.apache.accumulo.core.file.blockfile.cache.lru.LruBlockCacheManager", PropertyType.STRING,
-      "Specifies the class name of the block cache factory implementation. Alternative implementation is "
-          + "org.apache.accumulo.core.file.blockfile.cache.tinylfu.TinyLfuBlockCacheManager"),
+      "Specifies the class name of the block cache factory implementation."
+          + " Alternative implementation is"
+          + " org.apache.accumulo.core.file.blockfile.cache.tinylfu.TinyLfuBlockCacheManager"),
   TSERV_DATACACHE_SIZE("tserver.cache.data.size", "10%", PropertyType.MEMORY,
       "Specifies the size of the cache for RFile data blocks."),
   TSERV_INDEXCACHE_SIZE("tserver.cache.index.size", "25%", PropertyType.MEMORY,
@@ -372,26 +389,34 @@ public enum Property {
       "The port used for handling client connections on the tablet servers"),
   @Deprecated
   TSERV_MUTATION_QUEUE_MAX("tserver.mutation.queue.max", "1M", PropertyType.BYTES,
-      "This setting is deprecated. See tserver.total.mutation.queue.max. "
-          + "The amount of memory to use to store write-ahead-log mutations-per-session before flushing them. Since the buffer is per write session, consider the"
-          + " max number of concurrent writer when configuring. When using Hadoop 2, Accumulo will call hsync() on the WAL . For a small number of "
-          + "concurrent writers, increasing this buffer size decreases the frequncy of hsync calls. For a large number of concurrent writers a small buffers "
-          + "size is ok because of group commit."),
+      "This setting is deprecated. See tserver.total.mutation.queue.max. The"
+          + " amount of memory to use to store write-ahead-log mutations-per-session"
+          + " before flushing them. Since the buffer is per write session, consider the"
+          + " max number of concurrent writer when configuring. When using Hadoop 2,"
+          + " Accumulo will call hsync() on the WAL . For a small number of concurrent"
+          + " writers, increasing this buffer size decreases the frequncy of hsync"
+          + " calls. For a large number of concurrent writers a small buffers size is"
+          + " ok because of group commit."),
   TSERV_TOTAL_MUTATION_QUEUE_MAX("tserver.total.mutation.queue.max", "5%", PropertyType.MEMORY,
       "The amount of memory used to store write-ahead-log mutations before flushing them."),
   TSERV_TABLET_SPLIT_FINDMIDPOINT_MAXOPEN("tserver.tablet.split.midpoint.files.max", "300",
       PropertyType.COUNT,
-      "To find a tablets split points, all RFiles are opened and their indexes are read. This setting determines how many "
-          + "RFiles can be opened at once. When there are more RFiles than this setting multiple passes "
-          + "must be made, which is slower. However opening too many RFiles at once can cause problems."),
+      "To find a tablets split points, all RFiles are opened and their indexes"
+          + " are read. This setting determines how many RFiles can be opened at once."
+          + " When there are more RFiles than this setting multiple passes must be"
+          + " made, which is slower. However opening too many RFiles at once can cause"
+          + " problems."),
   TSERV_WALOG_MAX_SIZE("tserver.walog.max.size", "1g", PropertyType.BYTES,
-      "The maximum size for each write-ahead log. See comment for property tserver.memory.maps.max"),
+      "The maximum size for each write-ahead log. See comment for property"
+          + " tserver.memory.maps.max"),
   TSERV_WALOG_MAX_AGE("tserver.walog.max.age", "24h", PropertyType.TIMEDURATION,
       "The maximum age for each write-ahead log."),
   TSERV_WALOG_TOLERATED_CREATION_FAILURES("tserver.walog.tolerated.creation.failures", "50",
       PropertyType.COUNT,
-      "The maximum number of failures tolerated when creating a new write-ahead log.  Values < 0 will allow unlimited creation failures."
-          + " Exceeding this number of failures consecutively trying to create a new write-ahead log causes the TabletServer to exit."),
+      "The maximum number of failures tolerated when creating a new write-ahead"
+          + " log. Negative values will allow unlimited creation failures. Exceeding this"
+          + " number of failures consecutively trying to create a new write-ahead log"
+          + " causes the TabletServer to exit."),
   TSERV_WALOG_TOLERATED_WAIT_INCREMENT("tserver.walog.tolerated.wait.increment", "1000ms",
       PropertyType.TIMEDURATION,
       "The amount of time to wait between failures to create or write a write-ahead log."),
@@ -406,14 +431,18 @@ public enum Property {
   TSERV_SCAN_MAX_OPENFILES("tserver.scan.files.open.max", "100", PropertyType.COUNT,
       "Maximum total RFiles that all tablets in a tablet server can open for scans. "),
   TSERV_MAX_IDLE("tserver.files.open.idle", "1m", PropertyType.TIMEDURATION,
-      "Tablet servers leave previously used RFiles open for future queries. "
-          + "This setting determines how much time an unused RFile should be kept open until it is closed."),
+      "Tablet servers leave previously used RFiles open for future queries."
+          + " This setting determines how much time an unused RFile should be kept open"
+          + " until it is closed."),
   TSERV_NATIVEMAP_ENABLED("tserver.memory.maps.native.enabled", "true", PropertyType.BOOLEAN,
-      "An in-memory data store for accumulo implemented in c++ that increases the amount of data accumulo can hold in memory and avoids Java GC pauses."),
+      "An in-memory data store for accumulo implemented in c++ that increases"
+          + " the amount of data accumulo can hold in memory and avoids Java GC" + " pauses."),
   TSERV_MAXMEM("tserver.memory.maps.max", "33%", PropertyType.MEMORY,
-      "Maximum amount of memory that can be used to buffer data written to a tablet server. There are two other properties that can effectively limit memory"
-          + " usage table.compaction.minor.logs.threshold and tserver.walog.max.size. Ensure that table.compaction.minor.logs.threshold *"
-          + " tserver.walog.max.size >= this property."),
+      "Maximum amount of memory that can be used to buffer data written to a"
+          + " tablet server. There are two other properties that can effectively limit"
+          + " memory usage table.compaction.minor.logs.threshold and"
+          + " tserver.walog.max.size. Ensure that table.compaction.minor.logs.threshold"
+          + " * tserver.walog.max.size >= this property."),
   TSERV_MEM_MGMT("tserver.memory.manager",
       "org.apache.accumulo.server.tabletserver.LargestFirstMemoryManager", PropertyType.CLASSNAME,
       "An implementation of MemoryManger that accumulo will use."),
@@ -436,7 +465,8 @@ public enum Property {
   TSERV_MAJC_MAXCONCURRENT("tserver.compaction.major.concurrent.max", "3", PropertyType.COUNT,
       "The maximum number of concurrent major compactions for a tablet server"),
   TSERV_MAJC_THROUGHPUT("tserver.compaction.major.throughput", "0B", PropertyType.BYTES,
-      "Maximum number of bytes to read or write per second over all major compactions on a TabletServer, or 0B for unlimited."),
+      "Maximum number of bytes to read or write per second over all major"
+          + " compactions on a TabletServer, or 0B for unlimited."),
   TSERV_MINC_MAXCONCURRENT("tserver.compaction.minor.concurrent.max", "4", PropertyType.COUNT,
       "The maximum number of concurrent minor compactions for a tablet server"),
   TSERV_MAJC_TRACE_PERCENT("tserver.compaction.major.trace.percent", "0.1", PropertyType.FRACTION,
@@ -459,13 +489,17 @@ public enum Property {
           + " minor compacted file because it may have been modified by iterators. The"
           + " file dumped to the local dir is an exact copy of what was in memory."),
   TSERV_BULK_PROCESS_THREADS("tserver.bulk.process.threads", "1", PropertyType.COUNT,
-      "The master will task a tablet server with pre-processing a bulk import RFile prior to assigning it to the appropriate tablet servers. This configuration"
-          + " value controls the number of threads used to process the files."),
+      "The master will task a tablet server with pre-processing a bulk import"
+          + " RFile prior to assigning it to the appropriate tablet servers. This"
+          + " configuration value controls the number of threads used to process the" + " files."),
   TSERV_BULK_ASSIGNMENT_THREADS("tserver.bulk.assign.threads", "1", PropertyType.COUNT,
-      "The master delegates bulk import RFile processing and assignment to tablet servers. After file has been processed, the tablet server will assign"
-          + " the file to the appropriate tablets on all servers. This property controls the number of threads used to communicate to the other servers."),
+      "The master delegates bulk import RFile processing and assignment to"
+          + " tablet servers. After file has been processed, the tablet server will"
+          + " assign the file to the appropriate tablets on all servers. This property"
+          + " controls the number of threads used to communicate to the other" + " servers."),
   TSERV_BULK_RETRY("tserver.bulk.retry.max", "5", PropertyType.COUNT,
-      "The number of times the tablet server will attempt to assign a RFile to a tablet as it migrates and splits."),
+      "The number of times the tablet server will attempt to assign a RFile to"
+          + " a tablet as it migrates and splits."),
   TSERV_BULK_TIMEOUT("tserver.bulk.timeout", "5m", PropertyType.TIMEDURATION,
       "The time to wait for a tablet server to process a bulk import request."),
   TSERV_MINTHREADS("tserver.server.threads.minimum", "20", PropertyType.COUNT,
@@ -475,11 +509,14 @@ public enum Property {
   TSERV_MAX_MESSAGE_SIZE("tserver.server.message.size.max", "1G", PropertyType.BYTES,
       "The maximum size of a message that can be sent to a tablet server."),
   TSERV_HOLD_TIME_SUICIDE("tserver.hold.time.max", "5m", PropertyType.TIMEDURATION,
-      "The maximum time for a tablet server to be in the \"memory full\" state. If the tablet server cannot write out memory"
-          + " in this much time, it will assume there is some failure local to its node, and quit. A value of zero is equivalent to forever."),
+      "The maximum time for a tablet server to be in the \"memory full\" state."
+          + " If the tablet server cannot write out memory in this much time, it will"
+          + " assume there is some failure local to its node, and quit. A value of zero"
+          + " is equivalent to forever."),
   TSERV_WAL_BLOCKSIZE("tserver.wal.blocksize", "0", PropertyType.BYTES,
-      "The size of the HDFS blocks used to write to the Write-Ahead log. If zero, it will be 110% of tserver.walog.max.size (that is, try to use just one"
-          + " block)"),
+      "The size of the HDFS blocks used to write to the Write-Ahead log. If"
+          + " zero, it will be 110% of tserver.walog.max.size (that is, try to use just"
+          + " one block)"),
   TSERV_WAL_REPLICATION("tserver.wal.replication", "0", PropertyType.COUNT,
       "The replication to use when writing the Write-Ahead log to HDFS. If"
           + " zero, it will use the HDFS default replication setting."),
@@ -490,7 +527,8 @@ public enum Property {
   TSERV_ARCHIVE_WALOGS("tserver.archive.walogs", "false", PropertyType.BOOLEAN,
       "Keep copies of the WALOGs for debugging purposes"),
   TSERV_WORKQ_THREADS("tserver.workq.threads", "2", PropertyType.COUNT,
-      "The number of threads for the distributed work queue. These threads are used for copying failed bulk import RFiles."),
+      "The number of threads for the distributed work queue. These threads are"
+          + " used for copying failed bulk import RFiles."),
   TSERV_WAL_SYNC("tserver.wal.sync", "true", PropertyType.BOOLEAN,
       "Use the SYNC_BLOCK create flag to sync WAL writes to disk. Prevents"
           + " problems recovering from sudden system resets."),
@@ -512,15 +550,21 @@ public enum Property {
   TSERV_ASSIGNMENT_MAXCONCURRENT("tserver.assignment.concurrent.max", "2", PropertyType.COUNT,
       "The number of threads available to load tablets. Recoveries are still performed serially."),
   TSERV_SLOW_FLUSH_MILLIS("tserver.slow.flush.time", "100ms", PropertyType.TIMEDURATION,
-      "If a flush to the write-ahead log takes longer than this period of time, debugging information will written, and may result in a log rollover."),
+      "If a flush to the write-ahead log takes longer than this period of time,"
+          + " debugging information will written, and may result in a log rollover."),
   TSERV_SUMMARY_PARTITION_THREADS("tserver.summary.partition.threads", "10", PropertyType.COUNT,
-      "Summary data must be retrieved from RFiles.  For a large number of RFiles, the files are broken into partitions of 100K files.  This setting determines "
-          + "how many of these groups of 100K RFiles will be processed concurrently."),
+      "Summary data must be retrieved from RFiles. For a large number of"
+          + " RFiles, the files are broken into partitions of 100K files. This setting"
+          + " determines how many of these groups of 100K RFiles will be processed"
+          + " concurrently."),
   TSERV_SUMMARY_REMOTE_THREADS("tserver.summary.remote.threads", "128", PropertyType.COUNT,
-      "For a partitioned group of 100K RFiles, those files are grouped by tablet server.  Then a remote tablet server is asked to gather summary data.  This "
-          + "setting determines how many concurrent request are made per partition."),
+      "For a partitioned group of 100K RFiles, those files are grouped by"
+          + " tablet server. Then a remote tablet server is asked to gather summary"
+          + " data. This setting determines how many concurrent request are made per"
+          + " partition."),
   TSERV_SUMMARY_RETRIEVAL_THREADS("tserver.summary.retrieval.threads", "10", PropertyType.COUNT,
-      "The number of threads on each tablet server available to retrieve summary data, that is not currently in cache, from RFiles."),
+      "The number of threads on each tablet server available to retrieve"
+          + " summary data, that is not currently in cache, from RFiles."),
 
   // accumulo garbage collector properties
   GC_PREFIX("gc.", null, PropertyType.PREFIX,
@@ -587,10 +631,13 @@ public enum Property {
       "The SimpleDateFormat string used to configure "
           + "the date shown on the 'Recent Logs' monitor page"),
   MONITOR_RESOURCES_EXTERNAL("monitor.resources.external", "", PropertyType.STRING,
-      "A JSON Map of Strings. Each String should be an HTML tag of an external resource (JS or CSS) to be imported by the Monitor. \n"
-          + "Be sure to wrap with CDATA tags. If this value is set, all of the external resources in the <head> tag of the Monitor will be replaced with \n"
-          + "the tags set here. Be sure the jquery tag is first since other scripts will depend on it. The resources that are used by default "
-          + "can be seen in accumulo/server/monitor/src/main/resources/templates/default.ftl"),
+      "A JSON Map of Strings. Each String should be an HTML tag of an external"
+          + " resource (JS or CSS) to be imported by the Monitor. Be sure to wrap"
+          + " with CDATA tags. If this value is set, all of the external resources"
+          + " in the <head> tag of the Monitor will be replaced with the tags set here."
+          + " Be sure the jquery tag is first since other scripts will depend on it."
+          + " The resources that are used by default can be seen in"
+          + " accumulo/server/monitor/src/main/resources/templates/default.ftl"),
 
   TRACE_PREFIX("trace.", null, PropertyType.PREFIX,
       "Properties in this category affect the behavior of distributed tracing."),
@@ -611,7 +658,8 @@ public enum Property {
       "The password for the user used to store distributed traces"),
   @Sensitive
   TRACE_TOKEN_PROPERTY_PREFIX("trace.token.property.", null, PropertyType.PREFIX,
-      "The prefix used to create a token for storing distributed traces. For each property required by trace.token.type, place this prefix in front of it."),
+      "The prefix used to create a token for storing distributed traces. For"
+          + " each property required by trace.token.type, place this prefix in front of it."),
   TRACE_TOKEN_TYPE("trace.token.type", PasswordToken.class.getName(), PropertyType.CLASSNAME,
       "An AuthenticationToken type supported by the authorizer"),
 
@@ -629,14 +677,17 @@ public enum Property {
   TABLE_ARBITRARY_PROP_PREFIX("table.custom.", null, PropertyType.PREFIX,
       "Prefix to be used for user defined arbitrary properties."),
   TABLE_MAJC_RATIO("table.compaction.major.ratio", "3", PropertyType.FRACTION,
-      "Minimum ratio of total input size to maximum input RFile size for running a major compaction. When adjusting this property you may want to also "
-          + "adjust table.file.max. Want to avoid the situation where only merging minor compactions occur."),
+      "Minimum ratio of total input size to maximum input RFile size for"
+          + " running a major compaction. When adjusting this property you may want to"
+          + " also adjust table.file.max. Want to avoid the situation where only"
+          + " merging minor compactions occur."),
   TABLE_MAJC_COMPACTALL_IDLETIME("table.compaction.major.everything.idle", "1h",
       PropertyType.TIMEDURATION,
-      "After a tablet has been idle (no mutations) for this time period it may have all "
-          + "of its RFiles compacted into one. There is no guarantee an idle tablet will be compacted. "
-          + "Compactions of idle tablets are only started when regular compactions are not running. Idle "
-          + "compactions only take place for tablets that have one or more RFiles."),
+      "After a tablet has been idle (no mutations) for this time period it may"
+          + " have all of its RFiles compacted into one. There is no guarantee an idle"
+          + " tablet will be compacted. Compactions of idle tablets are only started"
+          + " when regular compactions are not running. Idle compactions only take"
+          + " place for tablets that have one or more RFiles."),
   TABLE_SPLIT_THRESHOLD("table.split.threshold", "1G", PropertyType.BYTES,
       "A tablet is split when the combined size of RFiles exceeds this amount."),
   TABLE_MAX_END_ROW_SIZE("table.split.endrow.size.max", "10K", PropertyType.BYTES,
@@ -650,7 +701,8 @@ public enum Property {
           + "tablet will be compacted."),
   TABLE_MINC_MAX_MERGE_FILE_SIZE("table.compaction.minor.merge.file.size.max", "0",
       PropertyType.BYTES,
-      "The max RFile size used for a merging minor compaction. The default value of 0 disables a max file size."),
+      "The max RFile size used for a merging minor compaction. The default"
+          + " value of 0 disables a max file size."),
   TABLE_SCAN_MAXMEM("table.scan.max.memory", "512K", PropertyType.BYTES,
       "The maximum amount of memory that will be used to cache results of a client query/scan. "
           + "Once this limit is reached, the buffered data is sent to the client."),
@@ -661,32 +713,41 @@ public enum Property {
       "This property can be set to allow the LoadBalanceByTable load balancer"
           + " to change the called Load Balancer for this table"),
   TABLE_FILE_COMPRESSION_TYPE("table.file.compress.type", "gz", PropertyType.STRING,
-      "Compression algorithm used on index and data blocks before they are written. Possible values: gz, snappy, lzo, none"),
+      "Compression algorithm used on index and data blocks before they are"
+          + " written. Possible values: gz, snappy, lzo, none"),
   TABLE_FILE_COMPRESSED_BLOCK_SIZE("table.file.compress.blocksize", "100K", PropertyType.BYTES,
       "The maximum size of data blocks in RFiles before they are compressed and written."),
   TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX("table.file.compress.blocksize.index", "128K",
       PropertyType.BYTES,
       "The maximum size of index blocks in RFiles before they are compressed and written."),
   TABLE_FILE_BLOCK_SIZE("table.file.blocksize", "0B", PropertyType.BYTES,
-      "The HDFS block size used when writing RFiles. When set to 0B, the value/defaults of HDFS property 'dfs.block.size' will be used."),
+      "The HDFS block size used when writing RFiles. When set to 0B, the"
+          + " value/defaults of HDFS property 'dfs.block.size' will be used."),
   TABLE_FILE_REPLICATION("table.file.replication", "0", PropertyType.COUNT,
-      "The number of replicas for a table's RFiles in HDFS. When set to 0, HDFS defaults are used."),
+      "The number of replicas for a table's RFiles in HDFS. When set to 0, HDFS"
+          + " defaults are used."),
   TABLE_FILE_MAX("table.file.max", "15", PropertyType.COUNT,
-      "The maximum number of RFiles each tablet in a table can have. When adjusting this property you may want to consider adjusting"
-          + " table.compaction.major.ratio also. Setting this property to 0 will make it default to tserver.scan.files.open.max-1, this will prevent a"
-          + " tablet from having more RFiles than can be opened. Setting this property low may throttle ingest and increase query performance."),
+      "The maximum number of RFiles each tablet in a table can have. When"
+          + " adjusting this property you may want to consider adjusting"
+          + " table.compaction.major.ratio also. Setting this property to 0 will make"
+          + " it default to tserver.scan.files.open.max-1, this will prevent a tablet"
+          + " from having more RFiles than can be opened. Setting this property low may"
+          + " throttle ingest and increase query performance."),
   TABLE_FILE_SUMMARY_MAX_SIZE("table.file.summary.maxSize", "256K", PropertyType.BYTES,
-      "The maximum size summary that will be stored. The number of"
-          + " RFiles that had summary data exceeding this threshold is reported by Summary.getFileStatistics().getLarge().  When adjusting this"
-          + " consider the expected number RFiles with summaries on each tablet server and the summary cache size."),
+      "The maximum size summary that will be stored. The number of RFiles that"
+          + " had summary data exceeding this threshold is reported by"
+          + " Summary.getFileStatistics().getLarge(). When adjusting this consider the"
+          + " expected number RFiles with summaries on each tablet server and the"
+          + " summary cache size."),
   @Deprecated
   TABLE_WALOG_ENABLED("table.walog.enabled", "true", PropertyType.BOOLEAN,
       "This setting is deprecated.  Use table.durability=none instead."),
   TABLE_BLOOM_ENABLED("table.bloom.enabled", "false", PropertyType.BOOLEAN,
       "Use bloom filters on this table."),
   TABLE_BLOOM_LOAD_THRESHOLD("table.bloom.load.threshold", "1", PropertyType.COUNT,
-      "This number of seeks that would actually use a bloom filter must occur before a RFile's bloom filter is loaded."
-          + " Set this to zero to initiate loading of bloom filters when a RFile is opened."),
+      "This number of seeks that would actually use a bloom filter must occur"
+          + " before a RFile's bloom filter is loaded. Set this to zero to initiate"
+          + " loading of bloom filters when a RFile is opened."),
   TABLE_BLOOM_SIZE("table.bloom.size", "1048576", PropertyType.COUNT,
       "Bloom filter size, as number of keys."),
   TABLE_BLOOM_ERRORRATE("table.bloom.error.rate", "0.5%", PropertyType.FRACTION,
@@ -780,25 +841,34 @@ public enum Property {
   TABLE_REPLICATION("table.replication", "false", PropertyType.BOOLEAN,
       "Is replication enabled for the given table"),
   TABLE_REPLICATION_TARGET("table.replication.target.", null, PropertyType.PREFIX,
-      "Enumerate a mapping of other systems which this table should "
-          + "replicate their data to. The key suffix is the identifying cluster name and the value is an identifier for a location on the target system, "
-          + "e.g. the ID of the table on the target to replicate to"),
+      "Enumerate a mapping of other systems which this table should replicate"
+          + " their data to. The key suffix is the identifying cluster name and the"
+          + " value is an identifier for a location on the target system, e.g. the ID"
+          + " of the table on the target to replicate to"),
   TABLE_SAMPLER("table.sampler", "", PropertyType.CLASSNAME,
-      "The name of a class that implements org.apache.accumulo.core.Sampler.  Setting this option enables storing a sample of data which can be scanned."
-          + "  Always having a current sample can useful for query optimization and data comprehension.   After enabling sampling for an existing table, a compaction "
-          + "is needed to compute the sample for existing data.  The compact command in the shell has an option to only compact RFiles without sample data."),
+      "The name of a class that implements org.apache.accumulo.core.Sampler."
+          + " Setting this option enables storing a sample of data which can be"
+          + " scanned. Always having a current sample can useful for query optimization"
+          + " and data comprehension. After enabling sampling for an existing table,"
+          + " a compaction is needed to compute the sample for existing data. The"
+          + " compact command in the shell has an option to only compact RFiles without"
+          + " sample data."),
   TABLE_SAMPLER_OPTS("table.sampler.opt.", null, PropertyType.PREFIX,
       "The property is used to set options for a sampler. If a sample had two"
           + " options like hasher and modulous, then the two properties"
           + " table.sampler.opt.hasher=${hash algorithm} and"
           + " table.sampler.opt.modulous=${mod} would be set."),
   TABLE_SUSPEND_DURATION("table.suspend.duration", "0s", PropertyType.TIMEDURATION,
-      "For tablets belonging to this table: When a tablet server dies, allow the tablet server this duration to revive before reassigning its tablets"
-          + "to other tablet servers."),
+      "For tablets belonging to this table: When a tablet server dies, allow"
+          + " the tablet server this duration to revive before reassigning its tablets"
+          + " to other tablet servers."),
   TABLE_SUMMARIZER_PREFIX("table.summarizer.", null, PropertyType.PREFIX,
-      "Prefix for configuring summarizers for a table.  Using this prefix multiple summarizers can be configured with options for each one. Each summarizer configured "
-          + "should have a unique id, this id can be anything. To add a summarizer set table.summarizer.<unique id>=<summarizer class name>.  If the summarizer has options, "
-          + "then for each option set table.summarizer.<unique id>.opt.<key>=<value>."),
+      "Prefix for configuring summarizers for a table. Using this prefix"
+          + " multiple summarizers can be configured with options for each one. Each"
+          + " summarizer configured should have a unique id, this id can be anything."
+          + " To add a summarizer set table.summarizer.<unique id>=<summarizer class"
+          + " name>. If the summarizer has options, then for each option set"
+          + " table.summarizer.<unique id>.opt.<key>=<value>."),
 
   // VFS ClassLoader properties
   VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY(
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java b/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java
index e448ef8..ff6d5e8 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java
@@ -51,18 +51,24 @@ public enum PropertyType {
 
   BYTES("bytes", boundedUnits(0, Long.MAX_VALUE, false, "", "B", "K", "M", "G"),
       "A positive integer optionally followed by a unit of memory (whitespace disallowed).\n"
-          + "If no unit is specified, bytes are assumed. Valid units are 'B', 'K', 'M' or 'G' for bytes, kilobytes, megabytes, gigabytes.\n"
+          + "If no unit is specified, bytes are assumed. Valid units are 'B',"
+          + " 'K', 'M' or 'G' for bytes, kilobytes, megabytes, gigabytes.\n"
           + "Examples of valid memories are '1024', '20B', '100K', '1500M', '2G', '20%'.\n"
-          + "Examples of invalid memories are '1M500K', '1M 2K', '1MB', '1.5G', '1,024K', '', and 'a'.\n"
+          + "Examples of invalid memories are '1M500K', '1M 2K', '1MB', '1.5G',"
+          + " '1,024K', '', and 'a'.\n"
           + "Unless otherwise stated, the max value for the memory represented in bytes is "
           + Long.MAX_VALUE),
 
   MEMORY("memory", boundedUnits(0, Long.MAX_VALUE, false, "", "B", "K", "M", "G", "%"),
-      "A positive integer optionally followed by a unit of memory or a percentage (whitespace disallowed).\n"
-          + "If a percentage is specified, memory will be a percentage of the max memory allocated to a Java process (set by the JVM option -Xmx).\n"
-          + "If no unit is specified, bytes are assumed. Valid units are 'B', 'K', 'M', 'G', '%' for bytes, kilobytes, megabytes, gigabytes, and percentage.\n"
+      "A positive integer optionally followed by a unit of memory or a"
+          + " percentage (whitespace disallowed).\n"
+          + "If a percentage is specified, memory will be a percentage of the"
+          + " max memory allocated to a Java process (set by the JVM option -Xmx).\n"
+          + "If no unit is specified, bytes are assumed. Valid units are 'B',"
+          + " 'K', 'M', 'G', '%' for bytes, kilobytes, megabytes, gigabytes, and" + " percentage.\n"
           + "Examples of valid memories are '1024', '20B', '100K', '1500M', '2G', '20%'.\n"
-          + "Examples of invalid memories are '1M500K', '1M 2K', '1MB', '1.5G', '1,024K', '', and 'a'.\n"
+          + "Examples of invalid memories are '1M500K', '1M 2K', '1MB', '1.5G',"
+          + " '1,024K', '', and 'a'.\n"
           + "Unless otherwise stated, the max value for the memory represented in bytes is "
           + Long.MAX_VALUE),
 
@@ -76,8 +82,10 @@ public enum PropertyType {
 
   @SuppressWarnings("unchecked")
   PORT("port", or(new Bounds(1024, 65535), in(true, "0"), new PortRange("\\d{4,5}-\\d{4,5}")),
-      "An positive integer in the range 1024-65535 (not already in use or specified elsewhere in the configuration),\n"
-          + "zero to indicate any open ephemeral port, or a range of positive integers specified as M-N"),
+      "An positive integer in the range 1024-65535 (not already in use or"
+          + " specified elsewhere in the configuration),\n"
+          + "zero to indicate any open ephemeral port, or a range of positive"
+          + " integers specified as M-N"),
 
   COUNT("count", new Bounds(0, Integer.MAX_VALUE),
       "A non-negative integer in the range of 0-" + Integer.MAX_VALUE),
@@ -90,13 +98,15 @@ public enum PropertyType {
           + "Examples of invalid fractions/percentages are '', '10 percent'," + " 'Hulk Hogan'"),
 
   PATH("path", x -> true,
-      "A string that represents a filesystem path, which can be either relative or absolute to some directory. The filesystem depends on the property. The "
-          + "following environment variables will be substituted: "
+      "A string that represents a filesystem path, which can be either relative"
+          + " or absolute to some directory. The filesystem depends on the property."
+          + " The following environment variables will be substituted: "
           + Constants.PATH_PROPERTY_ENV_VARS),
 
   ABSOLUTEPATH("absolute path",
       x -> x == null || x.trim().isEmpty() || new Path(x.trim()).isAbsolute(),
-      "An absolute filesystem path. The filesystem depends on the property. This is the same as path, but enforces that its root is explicitly specified."),
+      "An absolute filesystem path. The filesystem depends on the property."
+          + " This is the same as path, but enforces that its root is explicitly" + " specified."),
 
   CLASSNAME("java class", new Matches("[\\w$.]*"),
       "A fully qualified java class name representing a class on the classpath.\n"
@@ -110,7 +120,8 @@ public enum PropertyType {
       "One of 'none', 'log', 'flush' or 'sync'."),
 
   STRING("string", x -> true,
-      "An arbitrary string of characters whose format is unspecified and interpreted based on the context of the property to which it applies."),
+      "An arbitrary string of characters whose format is unspecified and"
+          + " interpreted based on the context of the property to which it applies."),
 
   BOOLEAN("boolean", in(false, null, "true", "false"),
       "Has a value of either 'true' or 'false' (case-insensitive)"),
diff --git a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/LruBlockCache.java b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/LruBlockCache.java
index 5d8381b..419c4fd 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/LruBlockCache.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/LruBlockCache.java
@@ -570,8 +570,10 @@ public class LruBlockCache extends SynchronousLoadingBlockCache implements Block
     float freeMB = ((float) freeSize) / ((float) (1024 * 1024));
     float maxMB = ((float) this.conf.getMaxSize()) / ((float) (1024 * 1024));
     log.debug(
-        "Cache Stats: Sizes: Total={}MB ({}), Free={}MB ({}), Max={}MB ({}), Counts: Blocks={}, Access={}, Hit={}, Miss={}, Evictions={}, Evicted={},"
-            + "Ratios: Hit Ratio={}%, Miss Ratio={}%, Evicted/Run={}, Duplicate Reads={}",
+        "Cache Stats: Sizes: Total={}MB ({}), Free={}MB ({}), Max={}MB"
+            + " ({}), Counts: Blocks={}, Access={}, Hit={}, Miss={}, Evictions={},"
+            + " Evicted={},Ratios: Hit Ratio={}%, Miss Ratio={}%, Evicted/Run={},"
+            + " Duplicate Reads={}",
         sizeMB, totalSize, freeMB, freeSize, maxMB, this.conf.getMaxSize(), size(),
         stats.requestCount(), stats.hitCount(), stats.getMissCount(), stats.getEvictionCount(),
         stats.getEvictedCount(), stats.getHitRatio() * 100, stats.getMissRatio() * 100,
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
index 1c2119b..ada3936 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
@@ -242,30 +242,39 @@ public class IteratorUtil {
     }
   }
 
-  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V> loadIterators(
-      IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
-      AccumuloConfiguration conf, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio,
-      IteratorEnvironment env, boolean useAccumuloClassLoader) throws IOException {
+  // @formatter:off
+  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V>
+    loadIterators(
+  // @formatter:on
+          IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
+          AccumuloConfiguration conf, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio,
+          IteratorEnvironment env, boolean useAccumuloClassLoader) throws IOException {
 
     return loadIteratorsHelper(scope, source, extent, conf, ssiList, ssio, env,
         useAccumuloClassLoader, conf.get(Property.TABLE_CLASSPATH));
   }
 
-  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V> loadIterators(
-      IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
-      AccumuloConfiguration conf, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio,
-      IteratorEnvironment env, boolean useAccumuloClassLoader, String classLoaderContext)
-      throws IOException {
+  // @formatter:off
+  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V>
+    loadIterators(
+  // @formatter:on
+          IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
+          AccumuloConfiguration conf, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio,
+          IteratorEnvironment env, boolean useAccumuloClassLoader, String classLoaderContext)
+          throws IOException {
 
     return loadIteratorsHelper(scope, source, extent, conf, ssiList, ssio, env,
         useAccumuloClassLoader, classLoaderContext);
   }
 
-  private static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V> loadIteratorsHelper(
-      IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
-      AccumuloConfiguration conf, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio,
-      IteratorEnvironment env, boolean useAccumuloClassLoader, String classLoaderContext)
-      throws IOException {
+  // @formatter:off
+  private static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V>
+    loadIteratorsHelper(
+  // @formatter:on
+          IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
+          AccumuloConfiguration conf, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio,
+          IteratorEnvironment env, boolean useAccumuloClassLoader, String classLoaderContext)
+          throws IOException {
 
     List<IterInfo> iters = new ArrayList<>(ssiList);
     Map<String,Map<String,String>> allOptions = new HashMap<>();
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/CachingHDFSSecretKeyEncryptionStrategy.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/CachingHDFSSecretKeyEncryptionStrategy.java
index 5f3c8d7..6bdc2e3 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/CachingHDFSSecretKeyEncryptionStrategy.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/CachingHDFSSecretKeyEncryptionStrategy.java
@@ -165,14 +165,13 @@ public class CachingHDFSSecretKeyEncryptionStrategy implements SecretKeyEncrypti
             "Could not initialize key encryption cache, malformed key encryption key file", e);
       } catch (IOException e) {
         if (invalidFile) {
-          throw new IOException(
-              "Could not initialize key encryption cache, malformed key encryption key file. Expected key of lengh "
-                  + keyEncryptionKeyLength + " but file contained "
-                  + (fs.getFileStatus(pathToKey).getLen() - 4) + "bytes for key encryption key.");
+          throw new IOException("Could not initialize key encryption cache,"
+              + " malformed key encryption key file. Expected key of lengh "
+              + keyEncryptionKeyLength + " but file contained "
+              + (fs.getFileStatus(pathToKey).getLen() - 4) + "bytes for key encryption key.");
         } else {
-          throw new IOException(
-              "Could not initialize key encryption cache, unable to access or find key encryption key file",
-              e);
+          throw new IOException("Could not initialize key encryption cache,"
+              + " unable to access or find key encryption key file", e);
         }
       } finally {
         IOUtils.closeQuietly(in);
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
index ebbd8fe..aadfd40 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
@@ -74,7 +74,8 @@ public class DefaultCryptoModule implements CryptoModule {
   public CryptoModuleParameters initializeCipher(CryptoModuleParameters params) {
 
     log.trace(String.format(
-        "Using cipher suite \"%s\" with key length %d with RNG \"%s\" and RNG provider \"%s\" and key encryption strategy \"%s\"",
+        "Using cipher suite \"%s\" with key length %d with"
+            + " RNG \"%s\" and RNG provider \"%s\" and key encryption strategy" + " \"%s\"",
         params.getCipherSuite(), params.getKeyLength(), params.getRandomNumberGenerator(),
         params.getRandomNumberGeneratorProvider(), params.getKeyEncryptionStrategyClass()));
 
@@ -98,12 +99,12 @@ public class DefaultCryptoModule implements CryptoModule {
     try {
       initCipher(params, cipher, Cipher.ENCRYPT_MODE);
     } catch (InvalidKeyException e) {
-      log.error(
-          "Accumulo encountered an unknown error in generating the secret key object (SecretKeySpec) for an encrypted stream");
+      log.error("Accumulo encountered an unknown error in generating the secret"
+          + " key object (SecretKeySpec) for an encrypted stream");
       throw new RuntimeException(e);
     } catch (InvalidAlgorithmParameterException e) {
-      log.error(
-          "Accumulo encountered an unknown error in setting up the initialization vector for an encrypted stream");
+      log.error("Accumulo encountered an unknown error in setting up the"
+          + " initialization vector for an encrypted stream");
       throw new RuntimeException(e);
     }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/summary/Gatherer.java b/core/src/main/java/org/apache/accumulo/core/summary/Gatherer.java
index 56e9d51..8163cd1 100644
--- a/core/src/main/java/org/apache/accumulo/core/summary/Gatherer.java
+++ b/core/src/main/java/org/apache/accumulo/core/summary/Gatherer.java
@@ -209,10 +209,9 @@ public class Gatherer {
         location = tservers.get(idx);
       }
 
+      // merge contiguous ranges
       List<Range> merged = Range
-          .mergeOverlapping(Lists.transform(entry.getValue(), tm -> tm.getExtent().toDataRange())); // merge
-                                                                                                    // contiguous
-                                                                                                    // ranges
+          .mergeOverlapping(Lists.transform(entry.getValue(), tm -> tm.getExtent().toDataRange()));
       List<TRowRange> ranges = merged.stream().map(r -> toClippedExtent(r).toThrift())
           .collect(Collectors.toList()); // clip ranges to queried range
 
diff --git a/core/src/main/java/org/apache/accumulo/core/summary/SummaryReader.java b/core/src/main/java/org/apache/accumulo/core/summary/SummaryReader.java
index e87bc22..74e1c35 100644
--- a/core/src/main/java/org/apache/accumulo/core/summary/SummaryReader.java
+++ b/core/src/main/java/org/apache/accumulo/core/summary/SummaryReader.java
@@ -174,8 +174,10 @@ public class SummaryReader {
   public static SummaryReader load(Configuration conf, AccumuloConfiguration aConf,
       InputStream inputStream, long length, Predicate<SummarizerConfiguration> summarySelector,
       SummarizerFactory factory) throws IOException {
-    org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.Reader bcReader = new CachableBlockFile.Reader(
-        (InputStream & Seekable) inputStream, length, conf, aConf);
+    // @formatter:off
+    org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.Reader bcReader =
+      new CachableBlockFile.Reader((InputStream & Seekable) inputStream, length, conf, aConf);
+    // @formatter:on
     return load(bcReader, summarySelector, factory);
   }
 
@@ -242,8 +244,10 @@ public class SummaryReader {
     // read summarizer configuration
     String summarizerClazz = in.readUTF();
     String configId = in.readUTF();
-    org.apache.accumulo.core.client.summary.SummarizerConfiguration.Builder scb = SummarizerConfiguration
-        .builder(summarizerClazz).setPropertyId(configId);
+    // @formatter:off
+    org.apache.accumulo.core.client.summary.SummarizerConfiguration.Builder scb =
+      SummarizerConfiguration.builder(summarizerClazz).setPropertyId(configId);
+    // @formatter:on
     int numOpts = WritableUtils.readVInt(in);
     for (int i = 0; i < numOpts; i++) {
       String k = in.readUTF();
diff --git a/core/src/test/java/org/apache/accumulo/core/conf/ConfigSanityCheckTest.java b/core/src/test/java/org/apache/accumulo/core/conf/ConfigSanityCheckTest.java
index 6403593..1841d80 100644
--- a/core/src/test/java/org/apache/accumulo/core/conf/ConfigSanityCheckTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/conf/ConfigSanityCheckTest.java
@@ -26,8 +26,10 @@ public class ConfigSanityCheckTest {
   private Map<String,String> m;
 
   // These are used when a valid class is needed for testing
-  private static final String DEFAULT_CRYPTO_MODULE = "org.apache.accumulo.core.security.crypto.DefaultCryptoModule";
-  private static final String DEFAULT_SECRET_KEY_ENCRYPTION_STRATEGY = "org.apache.accumulo.core.security.crypto.NonCachingSecretKeyEncryptionStrategy";
+  private static final String PROPS_PREFIX = "org.apache.accumulo.core.security.crypto.";
+  private static final String DEFAULT_CRYPTO_MODULE = PROPS_PREFIX + "DefaultCryptoModule";
+  private static final String DEFAULT_SECRET_KEY_ENCRYPTION_STRATEGY = PROPS_PREFIX
+      + "NonCachingSecretKeyEncryptionStrategy";
 
   @Before
   public void setUp() {
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java b/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
index 2f24f30..30a696b 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
@@ -137,8 +137,8 @@ public class Proxy implements KeywordExecutable {
         if (is != null) {
           props.load(is);
         } else {
-          System.err.println(
-              "proxy.properties needs to be specified as argument (using -p) or on the classpath (by putting the file in conf/)");
+          System.err.println("proxy.properties needs to be specified as"
+              + " argument (using -p) or on the classpath (by putting the file in conf/)");
           System.exit(-1);
         }
       }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
index 44b18a8..aaff32c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
@@ -182,10 +182,10 @@ public class Accumulo {
               String setting = new String(buffer, 0, bytes, UTF_8);
               setting = setting.trim();
               if (bytes > 0 && Integer.parseInt(setting) > 10) {
-                log.warn(
-                    "System swappiness setting is greater than ten ({}) which can cause time-sensitive operations to be delayed. "
-                        + " Accumulo is time sensitive because it needs to maintain distributed lock agreement.",
-                    setting);
+                log.warn("System swappiness setting is greater than ten ({})"
+                    + " which can cause time-sensitive operations to be delayed."
+                    + " Accumulo is time sensitive because it needs to maintain"
+                    + " distributed lock agreement.", setting);
               }
             }
           }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/ServerOpts.java b/server/base/src/main/java/org/apache/accumulo/server/ServerOpts.java
index b173ae3..c49f6a1 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerOpts.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerOpts.java
@@ -40,7 +40,8 @@ public class ServerOpts extends Help {
   }
 
   @Parameter(names = "-o", splitter = NullSplitter.class,
-      description = "Overrides configuration set in accumulo-site.xml (but NOT system-wide config set in Zookeeper). Expected format: -o <key>=<value>")
+      description = "Overrides configuration set in accumulo-site.xml (but NOT system-wide config"
+          + " set in Zookeeper). Expected format: -o <key>=<value>")
   private List<String> properties = new ArrayList<>();
 
   public String getAddress() {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java b/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
index 3b5aebf..93646c4 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
@@ -628,8 +628,10 @@ public class BulkImporter {
           files.put(entry.getKey(), tabletFiles);
 
           for (PathSize pathSize : entry.getValue()) {
-            org.apache.accumulo.core.data.thrift.MapFileInfo mfi = new org.apache.accumulo.core.data.thrift.MapFileInfo(
-                pathSize.estSize);
+            // @formatter:off
+            org.apache.accumulo.core.data.thrift.MapFileInfo mfi =
+              new org.apache.accumulo.core.data.thrift.MapFileInfo(pathSize.estSize);
+            // @formatter:on
             tabletFiles.put(pathSize.path.toString(), mfi);
           }
         }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
index 2409f30..05df085 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
@@ -239,9 +239,8 @@ public class VolumeManagerImpl implements VolumeManager {
           synchronized (WARNED_ABOUT_SYNCONCLOSE) {
             if (!WARNED_ABOUT_SYNCONCLOSE.contains(entry.getKey())) {
               WARNED_ABOUT_SYNCONCLOSE.add(entry.getKey());
-              log.warn(
-                  "{} set to false in hdfs-site.xml: data loss is possible on hard system reset or power loss",
-                  DFS_DATANODE_SYNCONCLOSE);
+              log.warn("{} set to false in hdfs-site.xml: data loss is possible"
+                  + " on hard system reset or power loss", DFS_DATANODE_SYNCONCLOSE);
             }
           }
         }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 3fb3953..6f97106 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@ -252,8 +252,8 @@ public class Initialize implements KeywordExecutable {
     log.info("Accumulo data dirs are {}",
         Arrays.asList(VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance())));
     log.info("Zookeeper server is {}", sconf.get(Property.INSTANCE_ZK_HOST));
-    log.info(
-        "Checking if Zookeeper is available. If this hangs, then you need to make sure zookeeper is running");
+    log.info("Checking if Zookeeper is available. If this hangs, then you need"
+        + " to make sure zookeeper is running");
     if (!zookeeperAvailable()) {
       // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j compatibility
       log.error("FATAL Zookeeper needs to be up and running in order to init. Exiting ...");
@@ -270,9 +270,9 @@ public class Initialize implements KeywordExecutable {
       c.println();
       c.println("You can change the instance secret in accumulo by using:");
       c.println("   bin/accumulo " + org.apache.accumulo.server.util.ChangeSecret.class.getName());
-      c.println(
-          "You will also need to edit your secret in your configuration file by adding the property instance.secret to your accumulo-site.xml. "
-              + "Without this accumulo will not operate correctly");
+      c.println("You will also need to edit your secret in your configuration"
+          + " file by adding the property instance.secret to your"
+          + " accumulo-site.xml. Without this accumulo will not operate" + " correctly");
     }
     try {
       if (isInitialized(fs)) {
@@ -307,9 +307,8 @@ public class Initialize implements KeywordExecutable {
     } else if (!instanceDfsDir.isEmpty()) {
       log.error("FATAL: Change the property {} to use a different filesystem,", INSTANCE_DFS_URI);
     } else {
-      log.error(
-          "FATAL: You are using the default URI for the filesystem. Set the property {} to use a different filesystem,",
-          Property.INSTANCE_VOLUMES);
+      log.error("FATAL: You are using the default URI for the filesystem. Set"
+          + " the property {} to use a different filesystem,", Property.INSTANCE_VOLUMES);
     }
     log.error("FATAL: or change the property {} to use a different directory.", INSTANCE_DFS_DIR);
     log.error("FATAL: The current value of {} is |{}|", INSTANCE_DFS_URI, instanceDfsUri);
@@ -384,10 +383,11 @@ public class Initialize implements KeywordExecutable {
         // Try to determine when we couldn't find an appropriate core-site.xml on the classpath
         if (defaultFsUri.equals(fsDefaultName) && defaultFsUri.equals(fsDefaultFS)) {
           log.error(
-              "FATAL: Default filesystem value ('fs.defaultFS' or 'fs.default.name') of '{}' was found in the Hadoop configuration",
+              "FATAL: Default filesystem value ('fs.defaultFS' or"
+                  + " 'fs.default.name') of '{}' was found in the Hadoop configuration",
               defaultFsUri);
-          log.error(
-              "FATAL: Please ensure that the Hadoop core-site.xml is on the classpath using 'general.classpaths' in accumulo-site.xml");
+          log.error("FATAL: Please ensure that the Hadoop core-site.xml is on"
+              + " the classpath using 'general.classpaths' in accumulo-site.xml");
         }
       }
 
@@ -439,8 +439,8 @@ public class Initialize implements KeywordExecutable {
 
     if (opts.uploadAccumuloSite) {
       try {
-        log.info(
-            "Uploading properties in accumulo-site.xml to Zookeeper. Properties that cannot be set in Zookeeper will be skipped:");
+        log.info("Uploading properties in accumulo-site.xml to Zookeeper."
+            + " Properties that cannot be set in Zookeeper will be skipped:");
         Map<String,String> entries = new TreeMap<>();
         SiteConfiguration.getInstance().getProperties(entries, x -> true, false);
         for (Map.Entry<String,String> entry : entries.entrySet()) {
@@ -858,8 +858,10 @@ public class Initialize implements KeywordExecutable {
         .fromString(ZooUtil.getInstanceIDFromHdfs(iidPath, SiteConfiguration.getInstance()));
     for (Pair<Path,Path> replacementVolume : ServerConstants.getVolumeReplacements()) {
       if (aBasePath.equals(replacementVolume.getFirst()))
-        log.error("{} is set to be replaced in {} and should not appear in {}"
-            + ". It is highly recommended that this property be removed as data could still be written to this volume.",
+        log.error(
+            "{} is set to be replaced in {} and should not appear in {}."
+                + " It is highly recommended that this property be removed as data"
+                + " could still be written to this volume.",
             aBasePath, Property.INSTANCE_VOLUMES_REPLACEMENTS, Property.INSTANCE_VOLUMES);
     }
 
@@ -936,8 +938,8 @@ public class Initialize implements KeywordExecutable {
         if (isInitialized(fs)) {
           if (!opts.forceResetSecurity) {
             ConsoleReader c = getConsoleReader();
-            String userEnteredName = c.readLine(
-                "WARNING: This will remove all users from Accumulo! If you wish to proceed enter the instance name: ");
+            String userEnteredName = c.readLine("WARNING: This will remove all"
+                + " users from Accumulo! If you wish to proceed enter the instance" + " name: ");
             if (userEnteredName != null && !instance.getInstanceName().equals(userEnteredName)) {
               log.error("Aborted reset security: Instance name did not match current instance.");
               return;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
index 0b2ffa3..4c8be24 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
@@ -89,10 +89,10 @@ public class HostRegexTableLoadBalancer extends TableLoadBalancer implements Con
   public static final String HOST_BALANCER_OOB_CHECK_KEY = PROP_PREFIX
       + "balancer.host.regex.oob.period";
   private static final String HOST_BALANCER_OOB_DEFAULT = "5m";
-  public static final String HOST_BALANCER_REGEX_USING_IPS_KEY = Property.TABLE_ARBITRARY_PROP_PREFIX
-      .getKey() + "balancer.host.regex.is.ip";
-  public static final String HOST_BALANCER_REGEX_MAX_MIGRATIONS_KEY = Property.TABLE_ARBITRARY_PROP_PREFIX
-      .getKey() + "balancer.host.regex.concurrent.migrations";
+  public static final String HOST_BALANCER_REGEX_USING_IPS_KEY = PROP_PREFIX
+      + "balancer.host.regex.is.ip";
+  public static final String HOST_BALANCER_REGEX_MAX_MIGRATIONS_KEY = PROP_PREFIX
+      + "balancer.host.regex.concurrent.migrations";
   private static final int HOST_BALANCER_REGEX_MAX_MIGRATIONS_DEFAULT = 250;
   protected static final String DEFAULT_POOL = "HostTableLoadBalancer.ALL";
   private static final int DEFAULT_OUTSTANDING_MIGRATIONS = 0;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java
index ac5f7aa..01ca214 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java
@@ -154,8 +154,8 @@ public class MetricsConfiguration {
         .getResource("accumulo-metrics.xml");
     if (metricsUrl == null) {
       if (!alreadyWarned)
-        log.warn(
-            "accumulo-metrics.xml was not found on classpath. Metrics collection will be disabled.");
+        log.warn("accumulo-metrics.xml was not found on classpath. Metrics"
+            + " collection will be disabled.");
       alreadyWarned = true;
       notFound = true;
       return;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/rpc/TCredentialsUpdatingInvocationHandler.java b/server/base/src/main/java/org/apache/accumulo/server/rpc/TCredentialsUpdatingInvocationHandler.java
index 563131b..f2d4f6b 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/rpc/TCredentialsUpdatingInvocationHandler.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/rpc/TCredentialsUpdatingInvocationHandler.java
@@ -111,9 +111,8 @@ public class TCredentialsUpdatingInvocationHandler<I> implements InvocationHandl
     if (!KerberosToken.class.isAssignableFrom(tokenClass)
         && !SystemToken.class.isAssignableFrom(tokenClass)) {
       // Don't include messages about SystemToken since it's internal
-      log.debug(
-          "Will not update principal on authentication tokens other than KerberosToken. Received {}",
-          tokenClass);
+      log.debug("Will not update principal on authentication tokens other than"
+          + " KerberosToken. Received {}", tokenClass);
       throw new ThriftSecurityException("Did not receive a valid token",
           SecurityErrorCode.BAD_CREDENTIALS);
     }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java b/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
index 2aa07e1..959dfff 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
@@ -457,9 +457,9 @@ public class TServerUtils {
     // this host, fail quickly and inform them to update
     // their configuration.
     if (!hostname.equals(fqdn)) {
-      log.error(
-          "Expected hostname of '{}' but got '{}'. Ensure the entries in the Accumulo hosts files (e.g. masters, tservers) are the FQDN for each host when using SASL.",
-          fqdn, hostname);
+      log.error("Expected hostname of '{}' but got '{}'. Ensure the entries in"
+          + " the Accumulo hosts files (e.g. masters, tservers) are the FQDN for"
+          + " each host when using SASL.", fqdn, hostname);
       transport.close();
       throw new RuntimeException("SASL requires that the address the thrift"
           + " server listens on is the same as the FQDN for this host");
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java b/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
index 6a92eb3..b169294 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
@@ -187,9 +187,8 @@ public class SecurityOperation {
         }
       } else {
         if (!(context.getCredentials().equals(creds))) {
-          log.debug(
-              "Provided credentials did not match server's expected credentials. Expected {} but got {}",
-              context.getCredentials(), creds);
+          log.debug("Provided credentials did not match server's expected"
+              + " credentials. Expected {} but got {}", context.getCredentials(), creds);
           throw new ThriftSecurityException(creds.getPrincipal(),
               SecurityErrorCode.BAD_CREDENTIALS);
         }
@@ -793,9 +792,8 @@ public class SecurityOperation {
 
     try {
       permHandle.grantNamespacePermission(user, namespace, permission);
-      log.info(
-          "Granted namespace permission {} for user {} on the namespace {} at the request of user {}",
-          permission, user, namespace, c.getPrincipal());
+      log.info("Granted namespace permission {} for user {} on the namespace {}"
+          + " at the request of user {}", permission, user, namespace, c.getPrincipal());
     } catch (AccumuloSecurityException e) {
       throw e.asThriftException();
     } catch (NamespaceNotFoundException e) {
@@ -849,9 +847,8 @@ public class SecurityOperation {
 
     try {
       permHandle.revokeNamespacePermission(user, namespace, permission);
-      log.info(
-          "Revoked namespace permission {} for user {} on the namespace {} at the request of user {}",
-          permission, user, namespace, c.getPrincipal());
+      log.info("Revoked namespace permission {} for user {} on the namespace {}"
+          + " at the request of user {}", permission, user, namespace, c.getPrincipal());
 
     } catch (AccumuloSecurityException e) {
       throw e.asThriftException();
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
index 8dba100..9be303b 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
@@ -319,7 +319,8 @@ public class FileUtil {
       if (numKeys == 0) {
         if (useIndex) {
           log.warn(
-              "Failed to find mid point using indexes, falling back to data files which is slower. No entries between {} and {} for {}",
+              "Failed to find mid point using indexes, falling back to"
+                  + " data files which is slower. No entries between {} and {} for {}",
               prevEndRow, endRow, mapFiles);
           // need to pass original map files, not possibly reduced indexes
           return findMidPoint(fs, tabletDirectory, acuConf, prevEndRow, endRow, origMapFiles,
diff --git a/server/master/src/main/java/org/apache/accumulo/master/Master.java b/server/master/src/main/java/org/apache/accumulo/master/Master.java
index 6db190b..c006b7e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/Master.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/Master.java
@@ -1235,8 +1235,10 @@ public class Master extends AccumuloServerContext
     MasterReplicationCoordinator impl = new MasterReplicationCoordinator(this);
     ReplicationCoordinator.Iface haReplicationProxy = HighlyAvailableServiceWrapper.service(impl,
         this);
-    ReplicationCoordinator.Processor<ReplicationCoordinator.Iface> replicationCoordinatorProcessor = new ReplicationCoordinator.Processor<>(
-        RpcWrapper.service(haReplicationProxy));
+    // @formatter:off
+    ReplicationCoordinator.Processor<ReplicationCoordinator.Iface> replicationCoordinatorProcessor =
+      new ReplicationCoordinator.Processor<>(RpcWrapper.service(haReplicationProxy));
+    // @formatter:on
     ServerAddress replAddress = TServerUtils.startServer(this, hostname,
         Property.MASTER_REPLICATION_COORDINATOR_PORT, replicationCoordinatorProcessor,
         "Master Replication Coordinator", "Replication Coordinator", null,
diff --git a/server/monitor/src/test/java/org/apache/accumulo/monitor/util/AccumuloMonitorAppenderTest.java b/server/monitor/src/test/java/org/apache/accumulo/monitor/util/AccumuloMonitorAppenderTest.java
index 2c97056..91f9b96 100644
--- a/server/monitor/src/test/java/org/apache/accumulo/monitor/util/AccumuloMonitorAppenderTest.java
+++ b/server/monitor/src/test/java/org/apache/accumulo/monitor/util/AccumuloMonitorAppenderTest.java
@@ -110,22 +110,26 @@ public class AccumuloMonitorAppenderTest {
       byte[] location = loc == 0 ? null : ("loc" + loc).getBytes(UTF_8);
       return new MonitorLocation(loc, location);
     };
-    Function<MonitorLocation,AppenderSkeleton> appenderFactory = newLocation -> new AppenderSkeleton() {
+    Function<MonitorLocation,AppenderSkeleton> appenderFactory = newLocation -> {
 
-      {
-        this.name = "Appender for " + newLocation.getLocation();
-      }
+      return new AppenderSkeleton() {
 
-      @Override
-      public boolean requiresLayout() {
-        return false;
-      }
+        {
+          this.name = "Appender for " + newLocation.getLocation();
+        }
+
+        @Override
+        public boolean requiresLayout() {
+          return false;
+        }
+
+        @Override
+        public void close() {}
 
-      @Override
-      public void close() {}
+        @Override
+        protected void append(LoggingEvent event) {}
 
-      @Override
-      protected void append(LoggingEvent event) {}
+      };
 
     };
 
diff --git a/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java b/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java
index 9d2df7c..6105e59 100644
--- a/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java
+++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java
@@ -171,10 +171,10 @@ public abstract class AsyncSpanReceiver<SpanKey,Destination> implements SpanRece
       if (sendQueueSize.get() > maxQueueSize) {
         long now = System.currentTimeMillis();
         if (now - lastNotificationOfDroppedSpans > 60 * 1000) {
-          log.warn(
-              "Tracing spans are being dropped because there are already {} spans queued for delivery.\n"
-                  + "This does not affect performance, security or data integrity, but distributed tracing information is being lost.",
-              maxQueueSize);
+          log.warn("Tracing spans are being dropped because there are already"
+              + " {} spans queued for delivery.\n"
+              + "This does not affect performance, security or data integrity,"
+              + " but distributed tracing information is being lost.", maxQueueSize);
           lastNotificationOfDroppedSpans = now;
         }
         return;
diff --git a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java
index af9834b..aa33da1 100644
--- a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java
+++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java
@@ -319,9 +319,8 @@ public class TraceServer implements Watcher {
         }
       }
     } catch (MutationsRejectedException | RuntimeException exception) {
-      log.warn(
-          "Problem flushing traces, resetting writer. Set log level to DEBUG to see stacktrace. cause: "
-              + exception);
+      log.warn("Problem flushing traces, resetting writer. Set log level to"
+          + " DEBUG to see stacktrace. cause: " + exception);
       log.debug("flushing traces failed due to exception", exception);
       resetWriter();
       /* XXX e.g. if the writer was closed between when we grabbed it and when we called flush. */
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/NativeMap.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/NativeMap.java
index 4a2b212..caff10b 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/NativeMap.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/NativeMap.java
@@ -97,8 +97,10 @@ public class NativeMap implements Iterable<Map.Entry<Key,Value>> {
     // Exit if native libraries could not be loaded
     if (!isLoaded()) {
       log.error(
-          "FATAL! Accumulo native libraries were requested but could not be be loaded. Either set '{}' to false in accumulo-site.xml "
-              + " or make sure native libraries are created in directories set by the JVM system property 'accumulo.native.lib.path' in accumulo-env.sh!",
+          "FATAL! Accumulo native libraries were requested but could not"
+              + " be be loaded. Either set '{}' to false in accumulo-site.xml or make"
+              + " sure native libraries are created in directories set by the JVM"
+              + " system property 'accumulo.native.lib.path' in accumulo-env.sh!",
           Property.TSERV_NATIVEMAP_ENABLED);
       System.exit(1);
     }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
index 91f5f6c..52aaf05 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
@@ -830,7 +830,8 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
 
       if (log.isTraceEnabled()) {
         log.trace(String.format(
-            "MultiScanSess %s %,d entries in %.2f secs (lookup_time:%.2f secs tablets:%,d ranges:%,d) ",
+            "MultiScanSess %s %,d entries in %.2f secs"
+                + " (lookup_time:%.2f secs tablets:%,d ranges:%,d) ",
             TServerUtils.clientAddress.get(), session.numEntries, (t2 - session.startTime) / 1000.0,
             session.totalLookupTime / 1000.0, session.numTablets, session.numRanges));
       }
@@ -2648,8 +2649,10 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
     ReplicationServicer.Iface rpcProxy = RpcWrapper.service(handler);
     ReplicationServicer.Iface repl = TCredentialsUpdatingWrapper.service(rpcProxy,
         handler.getClass(), getConfiguration());
-    ReplicationServicer.Processor<ReplicationServicer.Iface> processor = new ReplicationServicer.Processor<>(
-        repl);
+    // @formatter:off
+    ReplicationServicer.Processor<ReplicationServicer.Iface> processor =
+      new ReplicationServicer.Processor<>(repl);
+    // @formatter:on
     AccumuloConfiguration conf = getServerConfigurationFactory().getSystemConfiguration();
     Property maxMessageSizeProperty = (conf.get(Property.TSERV_MAX_MESSAGE_SIZE) != null
         ? Property.TSERV_MAX_MESSAGE_SIZE
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
index e6dd44f..224cf31 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
@@ -202,13 +202,16 @@ public class TabletServerResourceManager {
       // Still check block cache sizes when using native maps.
       if (dCacheSize + iCacheSize + sCacheSize + totalQueueSize > runtime.maxMemory()) {
         throw new IllegalArgumentException(String.format(
-            "Block cache sizes %,d and mutation queue size %,d is too large for this JVM configuration %,d",
+            "Block cache sizes %,d" + " and mutation queue size %,d is too large for this JVM"
+                + " configuration %,d",
             dCacheSize + iCacheSize + sCacheSize, totalQueueSize, runtime.maxMemory()));
       }
     } else if (maxMemory + dCacheSize + iCacheSize + sCacheSize + totalQueueSize > runtime
         .maxMemory()) {
       throw new IllegalArgumentException(String.format(
-          "Maximum tablet server map memory %,d block cache sizes %,d and mutation queue size %,d is too large for this JVM configuration %,d",
+          "Maximum tablet server"
+              + " map memory %,d block cache sizes %,d and mutation queue size %,d is"
+              + " too large for this JVM configuration %,d",
           maxMemory, dCacheSize + iCacheSize + sCacheSize, totalQueueSize, runtime.maxMemory()));
     }
     runtime.gc();
@@ -474,9 +477,8 @@ public class TabletServerResourceManager {
               TabletStateImpl tabletReport = tabletReportsCopy.get(keyExtent);
 
               if (tabletReport == null) {
-                log.warn(
-                    "Memory manager asked to compact nonexistent tablet {}; manager implementation might be misbehaving",
-                    keyExtent);
+                log.warn("Memory manager asked to compact nonexistent tablet"
+                    + " {}; manager implementation might be misbehaving", keyExtent);
                 continue;
               }
               Tablet tablet = tabletReport.getTablet();
@@ -494,9 +496,8 @@ public class TabletServerResourceManager {
                       }
                     }
                   }
-                  log.debug(
-                      "Ignoring memory manager recommendation: not minor compacting closed tablet {}",
-                      keyExtent);
+                  log.debug("Ignoring memory manager recommendation: not minor"
+                      + " compacting closed tablet {}", keyExtent);
                 } else {
                   log.info("Ignoring memory manager recommendation: not minor compacting {}",
                       keyExtent);
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionRequest.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionRequest.java
index 5655098..8876cac 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionRequest.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionRequest.java
@@ -146,7 +146,8 @@ public class MajorCompactionRequest implements Cloneable {
   public List<Summary> getSummaries(Collection<FileRef> files,
       Predicate<SummarizerConfiguration> summarySelector) throws IOException {
     Preconditions.checkState(volumeManager != null,
-        "Getting summaries is not supported at this time.  Its only supported when CompactionStrategy.gatherInformation() is called.");
+        "Getting summaries is not" + " supported at this time. It's only supported when"
+            + " CompactionStrategy.gatherInformation() is called.");
     SummaryCollection sc = new SummaryCollection();
     SummarizerFactory factory = new SummarizerFactory(tableConfig);
     for (FileRef file : files) {
@@ -167,7 +168,8 @@ public class MajorCompactionRequest implements Cloneable {
 
   public FileSKVIterator openReader(FileRef ref) throws IOException {
     Preconditions.checkState(volumeManager != null,
-        "Opening files is not supported at this time.  Its only supported when CompactionStrategy.gatherInformation() is called.");
+        "Opening files is not" + " supported at this time. It's only supported when"
+            + " CompactionStrategy.gatherInformation() is called.");
     // @TODO verify the file isn't some random file in HDFS
     // @TODO ensure these files are always closed?
     FileOperations fileFactory = FileOperations.getInstance();
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/LargestFirstMemoryManagerTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/LargestFirstMemoryManagerTest.java
index 179b24e..f5f4721 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/LargestFirstMemoryManagerTest.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/LargestFirstMemoryManagerTest.java
@@ -195,8 +195,10 @@ public class LargestFirstMemoryManagerTest {
     final String deletedTableId = "1";
     Function<Table.ID,Boolean> existenceCheck = tableId -> !deletedTableId
         .contentEquals(tableId.canonicalID());
-    LargestFirstMemoryManagerWithExistenceCheck mgr = new LargestFirstMemoryManagerWithExistenceCheck(
-        existenceCheck);
+    // @formatter:off
+    LargestFirstMemoryManagerWithExistenceCheck mgr =
+      new LargestFirstMemoryManagerWithExistenceCheck(existenceCheck);
+    // @formatter:on
     ServerConfiguration config = new ServerConfiguration() {
       ServerConfigurationFactory delegate = new ServerConfigurationFactory(inst);
 
diff --git a/shell/src/main/java/org/apache/accumulo/shell/Shell.java b/shell/src/main/java/org/apache/accumulo/shell/Shell.java
index ee4c465..2707fef 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/Shell.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/Shell.java
@@ -512,12 +512,12 @@ public class Shell extends ShellOptions implements KeywordExecutable {
     String keepers = getZooKeepers(keepersOption, properties);
 
     if (keepers == null) {
-      throw new IllegalArgumentException(
-          "ZooKeepers must be set using -z or -zh on command line or in accumulo-client.properties");
+      throw new IllegalArgumentException("ZooKeepers must be set using -z or"
+          + " -zh on command line or in accumulo-client.properties");
     }
     if (instanceName == null) {
-      throw new IllegalArgumentException(
-          "Instance name must be set using -z or -zi on command line or in accumulo-client.properties");
+      throw new IllegalArgumentException("Instance name must be set using -z or"
+          + " -zi on command line or in accumulo-client.properties");
     }
     return new ZooKeeperInstance(instanceName, keepers);
   }
diff --git a/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java b/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
index 5278c76..f36e70b 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
@@ -189,8 +189,11 @@ public class ShellOptionsJC {
   private boolean useSasl = false;
 
   @Parameter(names = "--config-file",
-      description = "Read the given accumulo-client.properties file. If omitted, the following locations will be searched "
-          + "~/.accumulo/accumulo-client.properties:$ACCUMULO_CONF_DIR/accumulo-client.properties:/etc/accumulo/accumulo-client.properties")
+      description = "Read the given"
+          + " accumulo-client.properties file. If omitted, the following locations will be"
+          + " searched ~/.accumulo/accumulo-client.properties:"
+          + "$ACCUMULO_CONF_DIR/accumulo-client.properties:"
+          + "/etc/accumulo/accumulo-client.properties")
   private String clientConfigFile = null;
 
   @Parameter(names = {"-zi", "--zooKeeperInstanceName"},
@@ -221,13 +224,14 @@ public class ShellOptionsJC {
         if (ClientProperty.SASL_ENABLED.getBoolean(getClientProperties())) {
           if (!UserGroupInformation.isSecurityEnabled()) {
             throw new IllegalArgumentException(
-                "Kerberos security is not enabled. Run with --sasl or set 'sasl.enabled' in accumulo-client.properties");
+                "Kerberos security is not" + " enabled. Run with --sasl or set 'sasl.enabled' in"
+                    + " accumulo-client.properties");
           }
           UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
           username = ugi.getUserName();
         } else {
-          throw new IllegalArgumentException(
-              "Username is not set. Run with '-u myuser' or set 'auth.username' in accumulo-client.properties");
+          throw new IllegalArgumentException("Username is not set. Run with '-u"
+              + " myuser' or set 'auth.username' in accumulo-client.properties");
         }
       }
     }
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/CreateTableCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/CreateTableCommand.java
index 23e7606..596196c 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/CreateTableCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/CreateTableCommand.java
@@ -226,9 +226,8 @@ public class CreateTableCommand extends Command {
       try {
         iteratorSetting = shellState.iteratorProfiles.get(profileName).get(0);
       } catch (NullPointerException ex) {
-        throw new IllegalArgumentException(
-            "invalid iterator argument. Either profile does not exist or unexpected spaces in argument list.",
-            ex);
+        throw new IllegalArgumentException("invalid iterator argument. Either"
+            + " profile does not exist or unexpected spaces in argument list.", ex);
       }
       // handle case where only the profile is supplied. Use all scopes by default if no scope args
       // are provided.
@@ -280,7 +279,8 @@ public class CreateTableCommand extends Command {
 
   @Override
   public String description() {
-    return "creates a new table, with optional aggregators, iterators, locality groups and optionally pre-split";
+    return "creates a new table, with optional aggregators, iterators, locality"
+        + " groups and optionally pre-split";
   }
 
   @Override
@@ -320,7 +320,8 @@ public class CreateTableCommand extends Command {
     createTableOptLocalityProps.setArgs(Option.UNLIMITED_VALUES);
 
     createTableOptIteratorProps = new Option("i", "iter", true,
-        "initialize iterator at table creation using profile. If no scope supplied, all scopes are activated.");
+        "initialize" + " iterator at table creation using profile. If no scope supplied, all"
+            + " scopes are activated.");
     createTableOptIteratorProps.setArgName("profile[:[all]|[scan[,]][minc[,]][majc]]");
     createTableOptIteratorProps.setArgs(Option.UNLIMITED_VALUES);
 
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/HiddenCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/HiddenCommand.java
index ab0190c..79ff0fe 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/HiddenCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/HiddenCommand.java
@@ -42,11 +42,14 @@ public class HiddenCommand extends Command {
     if (rand.nextInt(10) == 0) {
       shellState.getReader().beep();
       shellState.getReader().println();
-      shellState.getReader().println(new String(Base64.getDecoder().decode(
-          "ICAgICAgIC4tLS4KICAgICAgLyAvXCBcCiAgICAgKCAvLS1cICkKICAgICAuPl8gIF88LgogICAgLyB8ICd8ICcgXAog"
-              + "ICAvICB8Xy58Xy4gIFwKICAvIC98ICAgICAgfFwgXAogfCB8IHwgfFwvfCB8IHwgfAogfF98IHwgfCAgfCB8IHxffAogICAgIC8gIF9fICBcCiAgICAvICAv"
-              + "ICBcICBcCiAgIC8gIC8gICAgXCAgXF8KIHwvICAvICAgICAgXCB8IHwKIHxfXy8gICAgICAgIFx8X3wK"),
-          UTF_8));
+      shellState.getReader()
+          .println(new String(Base64.getDecoder()
+              .decode("ICAgICAgIC4tLS4KICAgICAgLyAvXCBcCiAgICAgKCAvLS1cICkKICAgICAuPl8g"
+                  + "IF88LgogICAgLyB8ICd8ICcgXAogICAvICB8Xy58Xy4gIFwKICAvIC98ICAgIC"
+                  + "AgfFwgXAogfCB8IHwgfFwvfCB8IHwgfAogfF98IHwgfCAgfCB8IHxffAogICAg"
+                  + "IC8gIF9fICBcCiAgICAvICAvICBcICBcCiAgIC8gIC8gICAgXCAgXF8KIHwvIC"
+                  + "AvICAgICAgXCB8IHwKIHxfXy8gICAgICAgIFx8X3wK"),
+              UTF_8));
     } else {
       throw new ShellCommandException(ErrorCode.UNRECOGNIZED_COMMAND, getName());
     }
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/SummariesCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/SummariesCommand.java
index e39bdc1..77e665a 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/SummariesCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/SummariesCommand.java
@@ -107,7 +107,8 @@ public class SummariesCommand extends TableOperation {
     final Options opts = super.getOptions();
     disablePaginationOpt = new Option("np", "no-pagination", false, "disable pagination of output");
     summarySelectionOpt = new Option("sr", "select-regex", true,
-        "regex to select summaries. Matches against class name and options used to generate summaries.");
+        "regex to" + " select summaries. Matches against class name and options used to"
+            + " generate summaries.");
     opts.addOption(disablePaginationOpt);
     opts.addOption(summarySelectionOpt);
     opts.addOption(OptUtil.startRowOpt());
diff --git a/start/src/main/java/org/apache/accumulo/start/Main.java b/start/src/main/java/org/apache/accumulo/start/Main.java
index ca976e8..a30c4b4 100644
--- a/start/src/main/java/org/apache/accumulo/start/Main.java
+++ b/start/src/main/java/org/apache/accumulo/start/Main.java
@@ -216,8 +216,8 @@ public class Main {
         Comparator.comparing(KeywordExecutable::keyword));
     executables.addAll(getExecutables(getClassLoader()).values());
 
-    System.out.println(
-        "\nUsage: accumulo <command> [--help] (<argument> ...)\n\n  --help   Prints usage for specified command");
+    System.out.println("\nUsage: accumulo <command> [--help] (<argument> ...)\n\n"
+        + "  --help   Prints usage for specified command");
     System.out.println("\nCore Commands:");
     printCommands(executables, UsageGroup.CORE);
 
diff --git a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
index 7820364..3939af6 100644
--- a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
+++ b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
@@ -131,7 +131,10 @@ public abstract class AccumuloClusterHarness extends AccumuloITBase
         }
         break;
       case STANDALONE:
-        StandaloneAccumuloClusterConfiguration conf = (StandaloneAccumuloClusterConfiguration) clusterConf;
+        // @formatter:off
+        StandaloneAccumuloClusterConfiguration conf =
+          (StandaloneAccumuloClusterConfiguration) clusterConf;
+        // @formatter:on
         StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(
             cluster.getConnectionInfo(), conf.getTmpDirectory(), conf.getUsers());
         // If these are provided in the configuration, pass them into the cluster
diff --git a/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java b/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
index 729077b..dcce508 100644
--- a/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
@@ -1557,9 +1557,10 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
         String traceOutput = finalBuffer.toString();
         log.info("Trace output:" + traceOutput);
         if (traceCount > 0) {
+          String[] parts = ("traceTest, startScan,startConditionalUpdate,conditionalUpdate"
+              + ",Check conditions,apply conditional mutations").split(",");
           int lastPos = 0;
-          for (String part : "traceTest, startScan,startConditionalUpdate,conditionalUpdate,Check conditions,apply conditional mutations"
-              .split(",")) {
+          for (String part : parts) {
             log.info("Looking in trace output for '" + part + "'");
             int pos = traceOutput.indexOf(part);
             if (-1 == pos) {
diff --git a/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java b/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java
index 4deee38..acd39be 100644
--- a/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java
@@ -79,7 +79,10 @@ public class ShellConfigIT extends AccumuloClusterHarness {
         clientPropsFile = mac.getConfig().getClientPropsFile();
         break;
       case STANDALONE:
-        StandaloneAccumuloClusterConfiguration standaloneConf = (StandaloneAccumuloClusterConfiguration) getClusterConfiguration();
+        // @formatter:off
+        StandaloneAccumuloClusterConfiguration standaloneConf =
+          (StandaloneAccumuloClusterConfiguration) getClusterConfiguration();
+        // @formatter:on
         clientPropsFile = standaloneConf.getClientPropsFile();
         break;
       default:
diff --git a/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java b/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java
index 996555c..c025226 100644
--- a/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java
@@ -185,7 +185,8 @@ public class VerifyIngest {
 
             if (expectedRow >= (opts.rows + opts.startRow)) {
               log.error(
-                  "expectedRow ({}) >= (ingestArgs.rows + ingestArgs.startRow)  ({}), get batch returned data passed end key",
+                  "expectedRow ({}) >= (ingestArgs.rows + ingestArgs.startRow)  ({}), get"
+                      + " batch returned data passed end key",
                   expectedRow, (opts.rows + opts.startRow));
               errors++;
               break;
diff --git a/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java b/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java
index 40b1fda..fb85930 100644
--- a/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java
@@ -105,8 +105,8 @@ public class VolumeChooserIT extends ConfigurableMacBase {
     v4 = new Path("file://" + v4f.getAbsolutePath());
 
     systemPreferredVolumes = v1.toString() + "," + v2.toString();
-    siteConfig.put(PreferredVolumeChooser.TABLE_PREFERRED_VOLUMES, systemPreferredVolumes); // exclude
-                                                                                            // v4
+    // exclude v4
+    siteConfig.put(PreferredVolumeChooser.TABLE_PREFERRED_VOLUMES, systemPreferredVolumes);
     cfg.setSiteConfig(siteConfig);
 
     siteConfig.put(PerTableVolumeChooser.getPropertyNameForScope(ChooserScope.LOGGER),
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java b/test/src/main/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
index 1c52ae6..0ce7e9f 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
@@ -27,6 +27,7 @@ import java.util.TreeSet;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.impl.Table.ID;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
@@ -169,8 +170,7 @@ public class RegexGroupBalanceIT extends ConfigurableMacBase {
       throws TableNotFoundException {
     try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
       s.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
-      org.apache.accumulo.core.client.impl.Table.ID tableId = org.apache.accumulo.core.client.impl.Table.ID
-          .of(conn.tableOperations().tableIdMap().get(tablename));
+      ID tableId = ID.of(conn.tableOperations().tableIdMap().get(tablename));
       s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
 
       Table<String,String,MutableInt> groupLocationCounts = HashBasedTable.create();
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SessionBlockVerifyIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SessionBlockVerifyIT.java
index 2b343db..a9b9e82 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/SessionBlockVerifyIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SessionBlockVerifyIT.java
@@ -168,9 +168,8 @@ public class SessionBlockVerifyIT extends ScanSessionTimeOutIT {
        * AND we will orphan the sessionsToCleanup in the sweep, leading to an inaccurate count
        * within sessionsFound.
        */
-      assertEquals(
-          "Must have ten sessions. Failure indicates a synchronization block within the sweep mechanism",
-          10, sessionsFound);
+      assertEquals("Must have ten sessions. Failure indicates a synchronization"
+          + " block within the sweep mechanism", 10, sessionsFound);
       for (Future<Boolean> callable : callables) {
         callable.cancel(true);
       }

-- 
To stop receiving notification emails like this one, please contact
ctubbsii@apache.org.

Mime
View raw message