accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [accumulo] 04/05: Fix line wrap and other checkstyle errors
Date Fri, 06 Apr 2018 09:53:04 GMT
This is an automated email from the ASF dual-hosted git repository.

ctubbsii pushed a commit to branch 1.8
in repository https://gitbox.apache.org/repos/asf/accumulo.git

commit 2ecae2dc26cc657ee6f0fa3fd738d488ee7d8d26
Author: Christopher Tubbs <ctubbsii@apache.org>
AuthorDate: Fri Apr 6 00:01:59 2018 -0400

    Fix line wrap and other checkstyle errors
---
 .../accumulo/core/bloomfilter/BloomFilter.java     |   3 +-
 .../core/bloomfilter/DynamicBloomFilter.java       |   3 +-
 .../org/apache/accumulo/core/cli/ClientOpts.java   |   8 +-
 .../core/cli/MapReduceClientOnRequiredTable.java   |   2 +-
 .../accumulo/core/cli/MapReduceClientOpts.java     |   6 +-
 .../core/client/AccumuloSecurityException.java     |   3 +-
 .../accumulo/core/client/ClientConfiguration.java  |   9 +-
 .../core/client/ClientSideIteratorScanner.java     |  18 +-
 .../core/client/MutationsRejectedException.java    |   5 +-
 .../apache/accumulo/core/client/ScannerBase.java   |   3 +-
 .../accumulo/core/client/ZooKeeperInstance.java    |   5 +-
 .../core/client/admin/CompactionConfig.java        |   4 +-
 .../accumulo/core/client/impl/ClientContext.java   |  21 +-
 .../accumulo/core/client/impl/ConnectorImpl.java   |   3 +-
 .../core/client/impl/SecurityOperationsImpl.java   |  10 +-
 .../core/client/impl/TableOperationsImpl.java      |   5 +-
 .../impl/TabletServerBatchReaderIterator.java      |   8 +-
 .../core/client/impl/TabletServerBatchWriter.java  |  15 +-
 .../core/client/lexicoder/PairLexicoder.java       |  21 +-
 .../core/client/mapred/AbstractInputFormat.java    |   9 +-
 .../core/client/mapred/AccumuloInputFormat.java    |   5 +-
 .../mapred/AccumuloMultiTableInputFormat.java      |   3 +-
 .../core/client/mapred/AccumuloOutputFormat.java   |   5 +-
 .../core/client/mapred/AccumuloRowInputFormat.java |  54 ++-
 .../core/client/mapreduce/AbstractInputFormat.java |   9 +-
 .../core/client/mapreduce/AccumuloInputFormat.java |   5 +-
 .../client/mapreduce/AccumuloOutputFormat.java     |   5 +-
 .../mapreduce/lib/impl/InputConfigurator.java      |   4 +-
 .../accumulo/core/client/rfile/RFileWriter.java    |  16 +-
 .../core/client/sample/RowColumnSampler.java       |   8 +-
 .../accumulo/core/client/sample/RowSampler.java    |   8 +-
 .../security/tokens/AuthenticationToken.java       |   6 +-
 .../core/client/security/tokens/KerberosToken.java |   4 +-
 .../accumulo/core/conf/AccumuloConfiguration.java  |   4 +-
 .../core/conf/CredentialProviderFactoryShim.java   |  22 +-
 .../org/apache/accumulo/core/conf/Property.java    | 537 +++++++++++++--------
 .../apache/accumulo/core/conf/PropertyType.java    |  53 +-
 .../accumulo/core/conf/SiteConfiguration.java      |   5 +-
 .../core/data/ConstraintViolationSummary.java      |   3 +-
 .../accumulo/core/data/impl/TabletIdImpl.java      |  11 +-
 .../apache/accumulo/core/file/FileOperations.java  |  10 +-
 .../core/file/blockfile/cache/LruBlockCache.java   |   9 +-
 .../accumulo/core/file/rfile/CreateEmpty.java      |   8 +-
 .../accumulo/core/file/rfile/KeyShortener.java     |   5 +-
 .../org/apache/accumulo/core/file/rfile/RFile.java |   5 +-
 .../accumulo/core/file/rfile/bcfile/BCFile.java    |   9 +-
 .../core/iterators/AggregatingIterator.java        |   5 +-
 .../apache/accumulo/core/iterators/Combiner.java   |  11 +-
 .../org/apache/accumulo/core/iterators/Filter.java |   4 +-
 .../core/iterators/IteratorEnvironment.java        |   3 +-
 .../accumulo/core/iterators/IteratorUtil.java      |  89 ++--
 .../accumulo/core/iterators/LongCombiner.java      |   4 +-
 .../accumulo/core/iterators/OptionDescriber.java   |   5 +-
 .../core/iterators/TypedValueCombiner.java         |   4 +-
 .../accumulo/core/iterators/user/AgeOffFilter.java |   4 +-
 .../core/iterators/user/CfCqSliceOpts.java         |  40 +-
 .../core/iterators/user/ColumnAgeOffFilter.java    |   4 +-
 .../core/iterators/user/ColumnSliceFilter.java     |   4 +-
 .../core/iterators/user/LargeRowFilter.java        |   3 +-
 .../accumulo/core/iterators/user/MaxCombiner.java  |   5 +-
 .../accumulo/core/iterators/user/MinCombiner.java  |   5 +-
 .../accumulo/core/iterators/user/RegExFilter.java  |   4 +-
 .../core/iterators/user/RowEncodingIterator.java   |   6 +-
 .../core/iterators/user/SummingArrayCombiner.java  |   6 +-
 .../core/iterators/user/SummingCombiner.java       |   5 +-
 .../core/iterators/user/TransformingIterator.java  |  12 +-
 .../core/iterators/user/VisibilityFilter.java      |  10 +-
 .../core/metadata/schema/MetadataSchema.java       |   8 +-
 .../core/replication/ReplicationSchema.java        |  27 +-
 .../apache/accumulo/core/rpc/FilterTransport.java  |   2 +-
 .../org/apache/accumulo/core/rpc/ThriftUtil.java   |  16 +-
 .../accumulo/core/rpc/UGIAssumingTransport.java    |   2 +-
 .../accumulo/core/security/ColumnVisibility.java   |   5 +-
 .../core/security/crypto/BlockedOutputStream.java  |   7 +-
 .../core/security/crypto/CryptoModuleFactory.java  |  30 +-
 .../core/security/crypto/DefaultCryptoModule.java  |  33 +-
 .../security/crypto/DefaultCryptoModuleUtils.java  |  18 +-
 .../NonCachingSecretKeyEncryptionStrategy.java     |   6 +-
 .../org/apache/accumulo/core/util/CreateToken.java |   5 +-
 .../java/org/apache/accumulo/core/util/Merge.java  |   3 +-
 .../accumulo/core/volume/NonConfiguredVolume.java  |   4 +-
 .../client/security/SecurityErrorCodeTest.java     |   5 +-
 .../accumulo/core/conf/PropertyTypeTest.java       |   3 +-
 .../core/data/ConstraintViolationSummaryTest.java  |   9 +-
 .../apache/accumulo/core/data/MutationTest.java    |  14 +-
 .../conf/AggregatorConfigurationTest.java          |  19 +-
 .../core/iterators/user/VisibilityFilterTest.java  |   3 +-
 .../accumulo/core/security/crypto/CryptoTest.java  |   5 +-
 .../examples/simple/client/RandomBatchWriter.java  |   8 +-
 .../examples/simple/mapreduce/TableToFile.java     |  11 +-
 .../examples/simple/sample/SampleExample.java      |  12 +-
 .../accumulo/examples/simple/shard/Query.java      |   6 +-
 .../org/apache/accumulo/fate/util/AddressUtil.java |  19 +-
 .../apache/accumulo/fate/util/AddressUtilTest.java |  16 +-
 .../testcases/MultipleHasTopCalls.java             |   6 +-
 .../accumulo/minicluster/MiniAccumuloRunner.java   |   4 +-
 .../minicluster/impl/MiniAccumuloConfigImpl.java   |   4 +-
 .../main/java/org/apache/accumulo/proxy/Proxy.java |   4 +-
 .../org/apache/accumulo/proxy/ProxyServer.java     |  38 +-
 .../java/org/apache/accumulo/server/Accumulo.java  |  12 +-
 .../accumulo/server/GarbageCollectionLogger.java   |   4 +-
 .../accumulo/server/fs/PreferredVolumeChooser.java |   4 +-
 .../accumulo/server/fs/VolumeManagerImpl.java      |  10 +-
 .../apache/accumulo/server/init/Initialize.java    |  25 +-
 .../accumulo/server/log/WalStateManager.java       |  38 +-
 .../balancer/HostRegexTableLoadBalancer.java       |  46 +-
 .../server/metrics/MetricsConfiguration.java       |   7 +-
 .../rpc/TCredentialsUpdatingInvocationHandler.java |  13 +-
 .../apache/accumulo/server/rpc/TServerUtils.java   |  10 +-
 .../server/security/AuditedSecurityOperation.java  |  95 ++--
 .../server/security/SecurityOperation.java         |   3 +-
 .../server/security/UserImpersonation.java         |  14 +-
 .../server/security/handler/ZKAuthenticator.java   |   5 +-
 .../org/apache/accumulo/server/util/FileUtil.java  |  18 +-
 .../accumulo/server/util/SendLogToChainsaw.java    |   6 +-
 .../server/util/FileSystemMonitorTest.java         |   6 +-
 .../apache/accumulo/gc/SimpleGarbageCollector.java |   4 +-
 .../java/org/apache/accumulo/master/Master.java    |  45 +-
 .../DistributedWorkQueueWorkAssigner.java          |   5 +-
 .../master/replication/FinishedWorkUpdater.java    |  10 +-
 .../RemoveCompleteReplicationRecords.java          |   9 +-
 .../org/apache/accumulo/master/util/FateAdmin.java |   5 +-
 .../accumulo/monitor/servlets/BasicServlet.java    |  17 +-
 .../accumulo/monitor/servlets/MasterServlet.java   |   3 +-
 .../accumulo/monitor/servlets/ProblemServlet.java  |   3 +-
 .../accumulo/monitor/servlets/ShellServlet.java    |  20 +-
 .../accumulo/monitor/servlets/TServersServlet.java |  17 +-
 .../accumulo/monitor/servlets/TablesServlet.java   |  19 +-
 .../accumulo/monitor/servlets/VisServlet.java      |  20 +-
 .../accumulo/monitor/servlets/trace/Basic.java     |   4 +-
 .../accumulo/monitor/servlets/trace/ShowTrace.java |   8 +-
 .../accumulo/monitor/servlets/trace/Summary.java   |   4 +-
 .../apache/accumulo/tracer/AsyncSpanReceiver.java  |   3 +-
 .../org/apache/accumulo/tracer/TraceServer.java    |  25 +-
 .../org/apache/accumulo/tserver/InMemoryMap.java   |   6 +-
 .../org/apache/accumulo/tserver/TabletServer.java  |  42 +-
 .../tserver/TabletServerResourceManager.java       |   7 +-
 .../org/apache/accumulo/tserver/log/DfsLogger.java |  18 +-
 .../accumulo/tserver/log/SortedLogRecovery.java    |   8 +-
 .../tserver/metrics/TabletServerUpdateMetrics.java |   5 +-
 .../tserver/replication/AccumuloReplicaSystem.java |  22 +-
 .../apache/accumulo/tserver/tablet/Compactor.java  |   3 +-
 .../accumulo/tserver/tablet/DatafileManager.java   |   5 +-
 .../org/apache/accumulo/tserver/tablet/Tablet.java |   6 +-
 .../accumulo/tserver/tablet/TabletCommitter.java   |   5 +-
 .../tserver/LargestFirstMemoryManagerTest.java     |   6 +-
 .../main/java/org/apache/accumulo/shell/Shell.java |  12 +-
 .../org/apache/accumulo/shell/ShellOptionsJC.java  |  22 +-
 .../shell/commands/ActiveScanIterator.java         |   3 +-
 .../accumulo/shell/commands/CloneTableCommand.java |   4 +-
 .../accumulo/shell/commands/CompactCommand.java    |  42 +-
 .../shell/commands/CreateTableCommand.java         |   3 +-
 .../apache/accumulo/shell/commands/DUCommand.java  |   5 +-
 .../accumulo/shell/commands/DeleteCommand.java     |   3 +-
 .../accumulo/shell/commands/DeleteRowsCommand.java |   3 +-
 .../shell/commands/DeleteScanIterCommand.java      |   3 +-
 .../accumulo/shell/commands/EGrepCommand.java      |   8 +-
 .../accumulo/shell/commands/FateCommand.java       |   3 +-
 .../accumulo/shell/commands/GrepCommand.java       |   3 +-
 .../accumulo/shell/commands/HiddenCommand.java     |  14 +-
 .../shell/commands/ImportDirectoryCommand.java     |   3 +-
 .../accumulo/shell/commands/InsertCommand.java     |   3 +-
 .../shell/commands/ListCompactionsCommand.java     |   4 +-
 .../accumulo/shell/commands/ListScansCommand.java  |   3 +-
 .../accumulo/shell/commands/MergeCommand.java      |   3 +-
 .../shell/commands/QuotedStringTokenizer.java      |   5 +-
 .../accumulo/shell/commands/ScanCommand.java       |   3 +-
 .../accumulo/shell/commands/SetIterCommand.java    |  16 +-
 .../java/org/apache/accumulo/shell/ShellTest.java  |   3 +-
 .../shell/commands/SetIterCommandTest.java         |   3 +-
 .../classloader/vfs/AccumuloVFSClassLoader.java    |  19 +-
 .../accumulo/harness/AccumuloClusterHarness.java   |   5 +-
 .../accumulo/harness/MiniClusterHarness.java       |   9 +-
 .../apache/accumulo/test/ConditionalWriterIT.java  |  15 +-
 .../org/apache/accumulo/test/InMemoryMapIT.java    |   6 +-
 .../java/org/apache/accumulo/test/MetaSplitIT.java |   4 +-
 .../org/apache/accumulo/test/NamespacesIT.java     |   5 +-
 .../accumulo/test/NativeMapPerformanceTest.java    |   3 +-
 .../org/apache/accumulo/test/ShellConfigIT.java    |   5 +-
 .../org/apache/accumulo/test/ShellServerIT.java    |  41 +-
 .../org/apache/accumulo/test/TestBinaryRows.java   |   3 +-
 .../java/org/apache/accumulo/test/TestIngest.java  |   7 +-
 .../org/apache/accumulo/test/VerifyIngest.java     |   3 +-
 .../test/continuous/ContinuousStatsCollector.java  |  10 +-
 .../test/functional/GarbageCollectorIT.java        |   4 +-
 .../accumulo/test/functional/PermissionsIT.java    |   6 +-
 .../test/functional/RecoveryWithEmptyRFileIT.java  |   4 +-
 .../test/functional/SessionBlockVerifyIT.java      |   5 +-
 .../test/functional/WatchTheWatchCountIT.java      |   5 +-
 .../test/mapreduce/AccumuloInputFormatIT.java      |  10 +-
 .../accumulo/test/proxy/SimpleProxyBase.java       |   4 +-
 .../accumulo/test/proxy/TestProxyReadWrite.java    |  18 +-
 .../org/apache/accumulo/test/randomwalk/Node.java  |   5 +-
 .../test/randomwalk/multitable/CopyTool.java       |  10 +-
 .../test/randomwalk/security/Validate.java         |   6 +-
 .../randomwalk/sequential/MapRedVerifyTool.java    |  10 +-
 .../test/replication/FinishedWorkUpdaterIT.java    |  34 +-
 ...GarbageCollectorCommunicatesWithTServersIT.java |   4 +-
 .../accumulo/test/stress/random/WriteOptions.java  |   5 +-
 .../org/apache/accumulo/test/util/CertUtils.java   |  12 +-
 .../test/TraceRepoDeserializationTest.java         |   3 +-
 201 files changed, 1581 insertions(+), 1102 deletions(-)

diff --git a/core/src/main/java/org/apache/accumulo/core/bloomfilter/BloomFilter.java b/core/src/main/java/org/apache/accumulo/core/bloomfilter/BloomFilter.java
index 37e84d9..4d704f3 100644
--- a/core/src/main/java/org/apache/accumulo/core/bloomfilter/BloomFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/bloomfilter/BloomFilter.java
@@ -1,6 +1,7 @@
 /*
  *
- * Copyright (c) 2005, European Commission project OneLab under contract 034819 (http://www.one-lab.org)
+ * Copyright (c) 2005, European Commission project OneLab under contract 034819
+ * (http://www.one-lab.org)
  * All rights reserved.
  * Redistribution and use in source and binary forms, with or
  * without modification, are permitted provided that the following
diff --git a/core/src/main/java/org/apache/accumulo/core/bloomfilter/DynamicBloomFilter.java b/core/src/main/java/org/apache/accumulo/core/bloomfilter/DynamicBloomFilter.java
index 5f70105..85399bf 100644
--- a/core/src/main/java/org/apache/accumulo/core/bloomfilter/DynamicBloomFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/bloomfilter/DynamicBloomFilter.java
@@ -1,6 +1,7 @@
 /*
  *
- * Copyright (c) 2005, European Commission project OneLab under contract 034819 (http://www.one-lab.org)
+ * Copyright (c) 2005, European Commission project OneLab under contract 034819
+ * (http://www.one-lab.org)
  * All rights reserved.
  * Redistribution and use in source and binary forms, with or
  * without modification, are permitted provided that the following
diff --git a/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java b/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
index b1d3150..e88c6d8 100644
--- a/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
+++ b/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
@@ -137,8 +137,8 @@ public class ClientOpts extends Help {
   @Parameter(names = {"-tc", "--tokenClass"}, description = "Token class")
   private String tokenClassName = null;
 
-  @DynamicParameter(names = "-l",
-      description = "login properties in the format key=value. Reuse -l for each property (prompt for properties if this option is missing")
+  @DynamicParameter(names = "-l", description = "login properties in the format key=value. "
+      + "Reuse -l for each property (prompt for properties if this option is missing")
   public Map<String,String> loginProps = new LinkedHashMap<>();
 
   public AuthenticationToken getToken() {
@@ -205,8 +205,8 @@ public class ClientOpts extends Help {
   public boolean saslEnabled = false;
 
   @Parameter(names = "--config-file", description = "Read the given client config file. "
-      + "If omitted, the path searched can be specified with $ACCUMULO_CLIENT_CONF_PATH, "
-      + "which defaults to ~/.accumulo/config:$ACCUMULO_CONF_DIR/client.conf:/etc/accumulo/client.conf")
+      + "If omitted, the path searched can be specified with $ACCUMULO_CLIENT_CONF_PATH, which "
+      + "defaults to ~/.accumulo/config:$ACCUMULO_CONF_DIR/client.conf:/etc/accumulo/client.conf")
   public String clientConfigFile = null;
 
   public void startDebugLogging() {
diff --git a/core/src/main/java/org/apache/accumulo/core/cli/MapReduceClientOnRequiredTable.java b/core/src/main/java/org/apache/accumulo/core/cli/MapReduceClientOnRequiredTable.java
index 79b59e7..96ada88 100644
--- a/core/src/main/java/org/apache/accumulo/core/cli/MapReduceClientOnRequiredTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/cli/MapReduceClientOnRequiredTable.java
@@ -30,7 +30,7 @@ public class MapReduceClientOnRequiredTable extends MapReduceClientOpts {
   private String tableName;
 
   @Parameter(names = {"-tf", "--tokenFile"},
-      description = "File in hdfs containing the user's authentication token create with \"bin/accumulo create-token\"")
+      description = "User's token file in HDFS created with \"bin/accumulo create-token\"")
   private String tokenFile = "";
 
   @Override
diff --git a/core/src/main/java/org/apache/accumulo/core/cli/MapReduceClientOpts.java b/core/src/main/java/org/apache/accumulo/core/cli/MapReduceClientOpts.java
index 71d8e1c..d4eb23b 100644
--- a/core/src/main/java/org/apache/accumulo/core/cli/MapReduceClientOpts.java
+++ b/core/src/main/java/org/apache/accumulo/core/cli/MapReduceClientOpts.java
@@ -65,8 +65,10 @@ public class MapReduceClientOpts extends ClientOpts {
         if (!conn.securityOperations().hasSystemPermission(conn.whoami(),
             SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
           log.error(
-              "{} doesn't have the {} SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's"
-                  + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.",
+              "{} doesn't have the {} SystemPermission neccesary to obtain a delegation"
+                  + " token. MapReduce tasks cannot automatically use the client's"
+                  + " credentials on remote servers. Delegation tokens provide a means to run"
+                  + " MapReduce without distributing the user's credentials.",
               user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name());
           throw new IllegalStateException(
               conn.whoami() + " does not have permission to obtain a delegation token");
diff --git a/core/src/main/java/org/apache/accumulo/core/client/AccumuloSecurityException.java b/core/src/main/java/org/apache/accumulo/core/client/AccumuloSecurityException.java
index 0881ac2..f9bcc89 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/AccumuloSecurityException.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/AccumuloSecurityException.java
@@ -57,7 +57,8 @@ public class AccumuloSecurityException extends Exception {
       case TOKEN_EXPIRED:
         return "The supplied token expired, please update and try again";
       case INSUFFICIENT_PROPERTIES:
-        return "The login properties supplied are not sufficient for authentication. Please check the requested properties and try again";
+        return "The login properties supplied are not sufficient for authentication. "
+            + "Please check the requested properties and try again";
       case DEFAULT_SECURITY_ERROR:
       default:
         return "Unknown security exception";
diff --git a/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java b/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java
index 09c9e8b..5f2be86 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java
@@ -106,7 +106,8 @@ public class ClientConfiguration extends CompositeConfiguration {
      * @since 1.7.0
      */
     KERBEROS_SERVER_PRIMARY("kerberos.server.primary", "accumulo", PropertyType.STRING,
-        "The first component of the Kerberos principal, the 'primary', that Accumulo servers use to login");
+        "The first component of the Kerberos principal, the 'primary', "
+            + "that Accumulo servers use to login");
 
     private String key;
     private String defaultValue;
@@ -217,9 +218,9 @@ public class ClientConfiguration extends CompositeConfiguration {
         AbstractConfiguration abstractConfiguration = (AbstractConfiguration) c;
         if (!abstractConfiguration.isDelimiterParsingDisabled()
             && abstractConfiguration.getListDelimiter() != '\0') {
-          log.warn(
-              "Client configuration constructed with a Configuration that did not have list delimiter disabled or overridden, multi-valued config "
-                  + "properties may be unavailable");
+          log.warn("Client configuration constructed with a Configuration that did not have "
+              + "list delimiter disabled or overridden, multi-valued config "
+              + "properties may be unavailable");
           abstractConfiguration.setListDelimiter('\0');
         }
       }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/ClientSideIteratorScanner.java b/core/src/main/java/org/apache/accumulo/core/client/ClientSideIteratorScanner.java
index b940941..ab05a12 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/ClientSideIteratorScanner.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/ClientSideIteratorScanner.java
@@ -50,11 +50,19 @@ import org.apache.hadoop.io.Text;
 /**
  * A scanner that instantiates iterators on the client side instead of on the tablet server. This
  * can be useful for testing iterators or in cases where you don't want iterators affecting the
- * performance of tablet servers.<br>
- * <br>
- * Suggested usage:<br>
- * <code>Scanner scanner = new ClientSideIteratorScanner(connector.createScanner(tableName, authorizations));</code><br>
- * <br>
+ * performance of tablet servers.
+ *
+ * <p>
+ * Suggested usage:
+ *
+ * <pre>
+ * <code>
+ * Scanner scanner = connector.createScanner(tableName, authorizations);
+ * scanner = new ClientSideIteratorScanner(scanner);
+ * </code>
+ * </pre>
+ *
+ * <p>
  * Iterators added to this scanner will be run in the client JVM. Separate scan iterators can be run
  * on the server side and client side by adding iterators to the source scanner (which will execute
  * server side) and to the client side scanner (which will execute client side).
diff --git a/core/src/main/java/org/apache/accumulo/core/client/MutationsRejectedException.java b/core/src/main/java/org/apache/accumulo/core/client/MutationsRejectedException.java
index 8bade1c..4fb6a2c 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/MutationsRejectedException.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/MutationsRejectedException.java
@@ -174,9 +174,12 @@ public class MutationsRejectedException extends AccumuloException {
    * @deprecated since 1.7.0 see {@link #getSecurityErrorCodes()}
    */
   @Deprecated
-  public Map<org.apache.accumulo.core.data.KeyExtent,Set<SecurityErrorCode>> getAuthorizationFailuresMap() {
+  // @formatter:off
+  public Map<org.apache.accumulo.core.data.KeyExtent,Set<SecurityErrorCode>>
+    getAuthorizationFailuresMap() {
     return transformKeys(af, TabletIdImpl.TID_2_KE_OLD);
   }
+  // @formatter:on
 
   /**
    * @return the internal mapping of TabletID to SecurityErrorCodes
diff --git a/core/src/main/java/org/apache/accumulo/core/client/ScannerBase.java b/core/src/main/java/org/apache/accumulo/core/client/ScannerBase.java
index 6ec3618..503d307 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/ScannerBase.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/ScannerBase.java
@@ -201,7 +201,8 @@ public interface ScannerBase extends Iterable<Entry<Key,Value>>, AutoCloseable {
    * <pre>
    * <code>
    *   // could cache this if creating many scanners to avoid RPCs.
-   *   SamplerConfiguration samplerConfig = connector.tableOperations().getSamplerConfiguration(table);
+   *   SamplerConfiguration samplerConfig =
+   *     connector.tableOperations().getSamplerConfiguration(table);
    *   // verify table's sample data is generated in an expected way before using
    *   userCode.verifySamplerConfig(samplerConfig);
    *   scanner.setSamplerCongiguration(samplerConfig);
diff --git a/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java b/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
index 201de62..563f2a9 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
@@ -197,8 +197,9 @@ public class ZooKeeperInstance implements Instance {
       String instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + instanceName;
       byte[] iidb = zooCache.get(instanceNamePath);
       if (iidb == null) {
-        throw new RuntimeException("Instance name " + instanceName
-            + " does not exist in zookeeper.  Run \"accumulo org.apache.accumulo.server.util.ListInstances\" to see a list.");
+        throw new RuntimeException(
+            "Instance name " + instanceName + " does not exist in zookeeper. "
+                + "Run \"accumulo org.apache.accumulo.server.util.ListInstances\" to see a list.");
       }
       instanceId = new String(iidb, UTF_8);
     }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionConfig.java b/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionConfig.java
index 7a220a7..766867f 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionConfig.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionConfig.java
@@ -18,13 +18,13 @@
 package org.apache.accumulo.core.client.admin;
 
 import static java.util.Objects.requireNonNull;
+import static org.apache.accumulo.core.client.impl.CompactionStrategyConfigUtil.DEFAULT_STRATEGY;
 
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
 import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.impl.CompactionStrategyConfigUtil;
 import org.apache.hadoop.io.Text;
 
 /**
@@ -39,7 +39,7 @@ public class CompactionConfig {
   private boolean flush = true;
   private boolean wait = true;
   private List<IteratorSetting> iterators = Collections.emptyList();
-  private CompactionStrategyConfig compactionStrategy = CompactionStrategyConfigUtil.DEFAULT_STRATEGY;
+  private CompactionStrategyConfig compactionStrategy = DEFAULT_STRATEGY;
 
   /**
    * @param start
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ClientContext.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ClientContext.java
index 0f53992..05cd058 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ClientContext.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ClientContext.java
@@ -243,14 +243,12 @@ public class ClientContext {
                 log.trace("Loaded sensitive value for {} from CredentialProvider", key);
                 return new String(value);
               } else {
-                log.trace(
-                    "Tried to load sensitive value for {} from CredentialProvider, but none was found",
-                    key);
+                log.trace("Tried to load sensitive value for {} from CredentialProvider, "
+                    + "but none was found", key);
               }
             } catch (IOException e) {
-              log.warn(
-                  "Failed to extract sensitive property ({}) from Hadoop CredentialProvider, falling back to base AccumuloConfiguration",
-                  key, e);
+              log.warn("Failed to extract sensitive property ({}) from Hadoop CredentialProvider,"
+                  + " falling back to base AccumuloConfiguration", key, e);
             }
           }
         }
@@ -312,9 +310,8 @@ public class ClientContext {
               }
             }
           } catch (IOException e) {
-            log.warn(
-                "Failed to extract sensitive properties from Hadoop CredentialProvider, falling back to accumulo-site.xml",
-                e);
+            log.warn("Failed to extract sensitive properties from Hadoop CredentialProvider, "
+                + "falling back to accumulo-site.xml", e);
           }
         }
       }
@@ -323,9 +320,9 @@ public class ClientContext {
         String credProviderPaths = config
             .getString(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey());
         if (null != credProviderPaths && !credProviderPaths.isEmpty()) {
-          org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration();
-          hadoopConf.set(CredentialProviderFactoryShim.CREDENTIAL_PROVIDER_PATH, credProviderPaths);
-          return hadoopConf;
+          org.apache.hadoop.conf.Configuration hConf = new org.apache.hadoop.conf.Configuration();
+          hConf.set(CredentialProviderFactoryShim.CREDENTIAL_PROVIDER_PATH, credProviderPaths);
+          return hConf;
         }
 
         log.trace("Did not find credential provider configuration in ClientConfiguration");
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
index ebf4f30..359800d 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
@@ -46,7 +46,8 @@ import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.trace.Tracer;
 
 public class ConnectorImpl extends Connector {
-  private static final String SYSTEM_TOKEN_NAME = "org.apache.accumulo.server.security.SystemCredentials$SystemToken";
+  private static final String SYSTEM_TOKEN_NAME = "org.apache.accumulo.server.security."
+      + "SystemCredentials$SystemToken";
   private final ClientContext context;
   private SecurityOperations secops = null;
   private TableOperationsImpl tableops = null;
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/SecurityOperationsImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/SecurityOperationsImpl.java
index 71059b1..0b2860d 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/SecurityOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/SecurityOperationsImpl.java
@@ -17,6 +17,7 @@
 package org.apache.accumulo.core.client.impl;
 
 import static com.google.common.base.Preconditions.checkArgument;
+import static org.apache.accumulo.core.client.security.SecurityErrorCode.NAMESPACE_DOESNT_EXIST;
 
 import java.nio.ByteBuffer;
 import java.util.Set;
@@ -245,8 +246,7 @@ public class SecurityOperationsImpl implements SecurityOperations {
         }
       });
     } catch (AccumuloSecurityException e) {
-      if (e
-          .getSecurityErrorCode() == org.apache.accumulo.core.client.security.SecurityErrorCode.NAMESPACE_DOESNT_EXIST)
+      if (e.getSecurityErrorCode() == NAMESPACE_DOESNT_EXIST)
         throw new AccumuloSecurityException(null, SecurityErrorCode.TABLE_DOESNT_EXIST, e);
       else
         throw e;
@@ -297,8 +297,7 @@ public class SecurityOperationsImpl implements SecurityOperations {
         }
       });
     } catch (AccumuloSecurityException e) {
-      if (e
-          .getSecurityErrorCode() == org.apache.accumulo.core.client.security.SecurityErrorCode.NAMESPACE_DOESNT_EXIST)
+      if (e.getSecurityErrorCode() == NAMESPACE_DOESNT_EXIST)
         throw new AccumuloSecurityException(null, SecurityErrorCode.TABLE_DOESNT_EXIST, e);
       else
         throw e;
@@ -349,8 +348,7 @@ public class SecurityOperationsImpl implements SecurityOperations {
         }
       });
     } catch (AccumuloSecurityException e) {
-      if (e
-          .getSecurityErrorCode() == org.apache.accumulo.core.client.security.SecurityErrorCode.NAMESPACE_DOESNT_EXIST)
+      if (e.getSecurityErrorCode() == NAMESPACE_DOESNT_EXIST)
         throw new AccumuloSecurityException(null, SecurityErrorCode.TABLE_DOESNT_EXIST, e);
       else
         throw e;
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java
index 9632cc0..2cb4bf1 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsImpl.java
@@ -551,9 +551,8 @@ public class TableOperationsImpl extends TableOperationsHelper {
           // Do not silently spin when we repeatedly fail to get the location for a tablet
           locationFailures++;
           if (5 == locationFailures || 0 == locationFailures % 50) {
-            log.warn(
-                "Having difficulty locating hosting tabletserver for split {} on table {}. Seen {} failures.",
-                split, tableName, locationFailures);
+            log.warn("Having difficulty locating hosting tabletserver for split {} on table {}."
+                + " Seen {} failures.", split, tableName, locationFailures);
           }
 
           tabLocator.invalidateCache(tl.tablet_extent);
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
index f00d95a..d6dae58 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
@@ -185,10 +185,12 @@ public class TabletServerBatchReaderIterator implements Iterator<Entry<Key,Value
             throw new RuntimeException(fatalException);
 
         if (queryThreadPool.isShutdown()) {
-          String shortMsg = "The BatchScanner was unexpectedly closed while this Iterator was still in use.";
+          String shortMsg = "The BatchScanner was unexpectedly closed while this Iterator "
+              + "was still in use.";
           log.error(shortMsg
-              + " Ensure that a reference to the BatchScanner is retained so that it can be closed when this Iterator is exhausted."
-              + " Not retaining a reference to the BatchScanner guarantees that you are leaking threads in your client JVM.");
+              + " Ensure that a reference to the BatchScanner is retained so that it can be closed"
+              + " when this Iterator is exhausted. Not retaining a reference to the BatchScanner"
+              + " guarantees that you are leaking threads in your client JVM.");
           throw new RuntimeException(shortMsg + " Ensure proper handling of the BatchScanner.");
         }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java
index 3fb8651..0226b6a 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java
@@ -83,24 +83,23 @@ import com.google.common.base.Joiner;
 /*
  * Differences from previous TabletServerBatchWriter
  *   + As background threads finish sending mutations to tablet servers they decrement memory usage
- *   + Once the queue of unprocessed mutations reaches 50% it is always pushed to the background threads,
- *      even if they are currently processing... new mutations are merged with mutations currently
- *      processing in the background
+ *   + Once the queue of unprocessed mutations reaches 50% it is always pushed
+ *     to the background threads, even if they are currently processing... new
+ *     mutations are merged with mutations currently processing in the background
  *   + Failed mutations are held for 1000ms and then re-added to the unprocessed queue
  *   + Flush holds adding of new mutations so it does not wait indefinitely
  *
  * Considerations
  *   + All background threads must catch and note Throwable
- *   + mutations for a single tablet server are only processed by one thread concurrently (if new mutations
- *      come in for a tablet server while one thread is processing mutations for it, no other thread should
- *      start processing those mutations)
+ *   + mutations for a single tablet server are only processed by one thread
+ *     concurrently (if new mutations come in for a tablet server while one
+ *     thread is processing mutations for it, no other thread should
+ *     start processing those mutations)
  *
  * Memory accounting
  *   + when a mutation enters the system memory is incremented
  *   + when a mutation successfully leaves the system memory is decremented
  *
- *
- *
  */
 
 public class TabletServerBatchWriter {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/lexicoder/PairLexicoder.java b/core/src/main/java/org/apache/accumulo/core/client/lexicoder/PairLexicoder.java
index d2d0bc5..e5db4b9 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/lexicoder/PairLexicoder.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/lexicoder/PairLexicoder.java
@@ -33,18 +33,15 @@ import org.apache.accumulo.core.util.ComparablePair;
  * component a date which is reverse sorted, we can do so with the following example:
  *
  * <pre>
- * {
- *   &#064;code
- *   StringLexicoder stringEncoder = new StringLexicoder();
- *   ReverseLexicoder&lt;Date&gt; dateEncoder = new ReverseLexicoder&lt;Date&gt;(new DateLexicoder());
- *   PairLexicoder&lt;String,Date&gt; pairLexicoder = new PairLexicoder&lt;String,Date&gt;(stringEncoder,
- *       dateEncoder);
- *   byte[] pair1 = pairLexicoder.encode(new ComparablePair&lt;String,Date&gt;(&quot;com.google&quot;, new Date()));
- *   byte[] pair2 = pairLexicoder.encode(
- *       new ComparablePair&lt;String,Date&gt;(&quot;com.google&quot;, new Date(System.currentTimeMillis() + 500)));
- *   byte[] pair3 = pairLexicoder.encode(new ComparablePair&lt;String,Date&gt;(&quot;org.apache&quot;,
- *       new Date(System.currentTimeMillis() + 1000)));
- * }
+ * <code>
+ * StringLexicoder strEncoder = new StringLexicoder();
+ * ReverseLexicoder&lt;Date&gt; dateEnc = new ReverseLexicoder&lt;&gt;(new DateLexicoder());
+ * PairLexicoder&lt;String,Date&gt; pair = new PairLexicoder&lt;&gt;(strEncoder, dateEnc);
+ * long now = System.currentTimeMillis();
+ * byte[] pair1 = pair.encode(new ComparablePair&lt;&gt;(&quot;com&quot;, new Date(now)));
+ * byte[] pair2 = pair.encode(new ComparablePair&lt;&gt;(&quot;com&quot;, new Date(now + 500)));
+ * byte[] pair3 = pair.encode(new ComparablePair&lt;&gt;(&quot;org&quot;, new Date(now + 1000)));
+ * </code>
  * </pre>
  *
  * In the example, pair2 will be sorted before pair1. pair3 will occur last since 'org' is sorted
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
index 9c7700b..4e80c18 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
@@ -144,9 +144,8 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
         Connector conn = instance.getConnector(principal, token);
         token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
       } catch (Exception e) {
-        log.warn(
-            "Failed to automatically obtain DelegationToken, Mappers/Reducers will likely fail to communicate with Accumulo",
-            e);
+        log.warn("Failed to automatically obtain DelegationToken, Mappers/Reducers will likely"
+            + " fail to communicate with Accumulo", e);
       }
     }
     // DelegationTokens can be passed securely from user to task without serializing insecurely in
@@ -722,8 +721,8 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
       boolean supportBatchScan = !(tableConfig.isOfflineScan()
           || tableConfig.shouldUseIsolatedScanners() || tableConfig.shouldUseLocalIterators());
       if (batchScan && !supportBatchScan)
-        throw new IllegalArgumentException(
-            "BatchScanner optimization not available for offline scan, isolated, or local iterators");
+        throw new IllegalArgumentException("BatchScanner optimization not available for offline"
+            + " scan, isolated, or local iterators");
 
       boolean autoAdjust = tableConfig.shouldAutoAdjustRanges();
       if (batchScan && !autoAdjust)
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java
index 6f1a872..a30f2de 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java
@@ -57,7 +57,10 @@ public class AccumuloInputFormat extends InputFormatBase<Key,Value> {
     // Override the log level from the configuration as if the RangeInputSplit has one it's the more
     // correct one to use.
     if (split instanceof org.apache.accumulo.core.client.mapreduce.RangeInputSplit) {
-      org.apache.accumulo.core.client.mapreduce.RangeInputSplit accSplit = (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split;
+      // @formatter:off
+      org.apache.accumulo.core.client.mapreduce.RangeInputSplit accSplit =
+        (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split;
+      // @formatter:on
       Level level = accSplit.getLogLevel();
       if (null != level) {
         log.setLevel(level);
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java
index a29d690..6604bf0 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloMultiTableInputFormat.java
@@ -20,6 +20,7 @@ import java.io.IOException;
 import java.util.Map;
 
 import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.mapred.InputFormatBase.RecordReaderBase;
 import org.apache.accumulo.core.client.mapreduce.InputTableConfig;
 import org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator;
 import org.apache.accumulo.core.data.Key;
@@ -67,7 +68,7 @@ public class AccumuloMultiTableInputFormat extends AbstractInputFormat<Key,Value
   public RecordReader<Key,Value> getRecordReader(InputSplit split, JobConf job, Reporter reporter)
       throws IOException {
     log.setLevel(getLogLevel(job));
-    InputFormatBase.RecordReaderBase<Key,Value> recordReader = new InputFormatBase.RecordReaderBase<Key,Value>() {
+    RecordReaderBase<Key,Value> recordReader = new RecordReaderBase<Key,Value>() {
 
       @Override
       public boolean next(Key key, Value value) throws IOException {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
index 2752fd3..e287f49 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
@@ -111,9 +111,8 @@ public class AccumuloOutputFormat implements OutputFormat<Text,Mutation> {
         Connector conn = instance.getConnector(principal, token);
         token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
       } catch (Exception e) {
-        log.warn(
-            "Failed to automatically obtain DelegationToken, Mappers/Reducers will likely fail to communicate with Accumulo",
-            e);
+        log.warn("Failed to automatically obtain DelegationToken, "
+            + "Mappers/Reducers will likely fail to communicate with Accumulo", e);
       }
     }
     // DelegationTokens can be passed securely from user to task without serializing insecurely in
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloRowInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloRowInputFormat.java
index aeec37d..787eacb 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloRowInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloRowInputFormat.java
@@ -56,35 +56,39 @@ public class AccumuloRowInputFormat
   public RecordReader<Text,PeekingIterator<Entry<Key,Value>>> getRecordReader(InputSplit split,
       JobConf job, Reporter reporter) throws IOException {
     log.setLevel(getLogLevel(job));
-    RecordReaderBase<Text,PeekingIterator<Entry<Key,Value>>> recordReader = new RecordReaderBase<Text,PeekingIterator<Entry<Key,Value>>>() {
-      RowIterator rowIterator;
+    // @formatter:off
+    RecordReaderBase<Text,PeekingIterator<Entry<Key,Value>>> recordReader =
+      new RecordReaderBase<Text,PeekingIterator<Entry<Key,Value>>>() {
+    // @formatter:on
+          RowIterator rowIterator;
 
-      @Override
-      public void initialize(InputSplit inSplit, JobConf job) throws IOException {
-        super.initialize(inSplit, job);
-        rowIterator = new RowIterator(scannerIterator);
-      }
+          @Override
+          public void initialize(InputSplit inSplit, JobConf job) throws IOException {
+            super.initialize(inSplit, job);
+            rowIterator = new RowIterator(scannerIterator);
+          }
 
-      @Override
-      public boolean next(Text key, PeekingIterator<Entry<Key,Value>> value) throws IOException {
-        if (!rowIterator.hasNext())
-          return false;
-        value.initialize(rowIterator.next());
-        numKeysRead = rowIterator.getKVCount();
-        key.set((currentKey = value.peek().getKey()).getRow());
-        return true;
-      }
+          @Override
+          public boolean next(Text key, PeekingIterator<Entry<Key,Value>> value)
+              throws IOException {
+            if (!rowIterator.hasNext())
+              return false;
+            value.initialize(rowIterator.next());
+            numKeysRead = rowIterator.getKVCount();
+            key.set((currentKey = value.peek().getKey()).getRow());
+            return true;
+          }
 
-      @Override
-      public Text createKey() {
-        return new Text();
-      }
+          @Override
+          public Text createKey() {
+            return new Text();
+          }
 
-      @Override
-      public PeekingIterator<Entry<Key,Value>> createValue() {
-        return new PeekingIterator<>();
-      }
-    };
+          @Override
+          public PeekingIterator<Entry<Key,Value>> createValue() {
+            return new PeekingIterator<>();
+          }
+        };
     recordReader.initialize(split, job);
     return recordReader;
   }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
index b81d930..169fcd3 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
@@ -147,9 +147,8 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
         Connector conn = instance.getConnector(principal, token);
         token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
       } catch (Exception e) {
-        log.warn(
-            "Failed to automatically obtain DelegationToken, Mappers/Reducers will likely fail to communicate with Accumulo",
-            e);
+        log.warn("Failed to automatically obtain DelegationToken, "
+            + "Mappers/Reducers will likely fail to communicate with Accumulo", e);
       }
     }
     // DelegationTokens can be passed securely from user to task without serializing insecurely in
@@ -771,8 +770,8 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
       boolean supportBatchScan = !(tableConfig.isOfflineScan()
           || tableConfig.shouldUseIsolatedScanners() || tableConfig.shouldUseLocalIterators());
       if (batchScan && !supportBatchScan)
-        throw new IllegalArgumentException(
-            "BatchScanner optimization not available for offline scan, isolated, or local iterators");
+        throw new IllegalArgumentException("BatchScanner optimization not available for offline"
+            + " scan, isolated, or local iterators");
 
       boolean autoAdjust = tableConfig.shouldAutoAdjustRanges();
       if (batchScan && !autoAdjust)
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
index 5c5fa18..c4515a2 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
@@ -56,7 +56,10 @@ public class AccumuloInputFormat extends InputFormatBase<Key,Value> {
     // Override the log level from the configuration as if the InputSplit has one it's the more
     // correct one to use.
     if (split instanceof org.apache.accumulo.core.client.mapreduce.RangeInputSplit) {
-      org.apache.accumulo.core.client.mapreduce.RangeInputSplit accSplit = (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split;
+      // @formatter:off
+      org.apache.accumulo.core.client.mapreduce.RangeInputSplit accSplit =
+        (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split;
+      // @formatter:on
       Level level = accSplit.getLogLevel();
       if (null != level) {
         log.setLevel(level);
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
index a4ebf3a..8b82e7c 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloOutputFormat.java
@@ -112,9 +112,8 @@ public class AccumuloOutputFormat extends OutputFormat<Text,Mutation> {
         Connector conn = instance.getConnector(principal, token);
         token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
       } catch (Exception e) {
-        log.warn(
-            "Failed to automatically obtain DelegationToken, Mappers/Reducers will likely fail to communicate with Accumulo",
-            e);
+        log.warn("Failed to automatically obtain DelegationToken, "
+            + "Mappers/Reducers will likely fail to communicate with Accumulo", e);
       }
     }
     // DelegationTokens can be passed securely from user to task without serializing insecurely in
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
index b5fd34c..e99a0e6 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/InputConfigurator.java
@@ -685,8 +685,8 @@ public class InputConfigurator extends ConfiguratorBase {
         mapWritable.readFields(new DataInputStream(bais));
         bais.close();
       } catch (IOException e) {
-        throw new IllegalStateException(
-            "The table query configurations could not be deserialized from the given configuration");
+        throw new IllegalStateException("The table query configurations could not be deserialized"
+            + " from the given configuration");
       }
     }
     for (Map.Entry<Writable,Writable> entry : mapWritable.entrySet())
diff --git a/core/src/main/java/org/apache/accumulo/core/client/rfile/RFileWriter.java b/core/src/main/java/org/apache/accumulo/core/client/rfile/RFileWriter.java
index 0fb5919..a0703fd 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/rfile/RFileWriter.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/rfile/RFileWriter.java
@@ -35,14 +35,13 @@ import org.apache.commons.collections.map.LRUMap;
 
 import com.google.common.base.Preconditions;
 
-//formatter was adding spaces that checkstyle did not like, so turned off formatter
-//@formatter:off
 /**
- * This class provides an API for writing RFiles. It can be used to create file for bulk import into Accumulo using
- * {@link TableOperations#importDirectory(String, String, String, boolean)}
+ * This class provides an API for writing RFiles. It can be used to create file for bulk import into
+ * Accumulo using {@link TableOperations#importDirectory(String, String, String, boolean)}
  *
  * <p>
- * A RFileWriter has the following constraints. Violating these constraints will result in runtime exceptions.
+ * A RFileWriter has the following constraints. Violating these constraints will result in runtime
+ * exceptions.
  *
  * <ul>
  * <li>Keys must be appended in sorted order within a locality group.</li>
@@ -70,9 +69,11 @@ import com.google.common.base.Preconditions;
  *       writer.startNewLocalityGroup("groupB", "columnFam3", "columnFam4");
  *       writer.append(localityGroup2Data);
  *
- *       // The default locality group must be started last. The column families for the default group do not need to be specified.
+ *       // The default locality group must be started last.
+ *       // The column families for the default group do not need to be specified.
  *       writer.startDefaultLocalityGroup();
- *       // Data appended here can not contain any column families specified in previous locality groups.
+ *       // Data appended here can not contain any
+ *       // column families specified in previous locality groups.
  *       writer.append(defaultGroupData);
  *
  *       // This is a try-with-resources so the writer is closed here at the end of the code block.
@@ -85,7 +86,6 @@ import com.google.common.base.Preconditions;
  *
  * @since 1.8.0
  */
-// @formatter:on
 public class RFileWriter implements AutoCloseable {
 
   private FileSKVWriter writer;
diff --git a/core/src/main/java/org/apache/accumulo/core/client/sample/RowColumnSampler.java b/core/src/main/java/org/apache/accumulo/core/client/sample/RowColumnSampler.java
index 69dfe7a..82cd577 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/sample/RowColumnSampler.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/sample/RowColumnSampler.java
@@ -48,8 +48,12 @@ import com.google.common.collect.ImmutableSet;
  * To configure Accumulo to generate sample data on one thousandth of the column qualifiers, the
  * following SamplerConfiguration could be created and used to configure a table.
  *
- * <p>
- * {@code new SamplerConfiguration(RowColumnSampler.class.getName()).setOptions(ImmutableMap.of("hasher","murmur3_32","modulus","1009","qualifier","true"))}
+ * <pre>
+ * <code>
+ * new SamplerConfiguration(RowColumnSampler.class.getName()).setOptions(
+ *   ImmutableMap.of("hasher","murmur3_32","modulus","1009","qualifier","true"));
+ * </code>
+ * </pre>
  *
  * <p>
  * With this configuration, if a column qualifier is selected then all key values contains that
diff --git a/core/src/main/java/org/apache/accumulo/core/client/sample/RowSampler.java b/core/src/main/java/org/apache/accumulo/core/client/sample/RowSampler.java
index 7526394..5c202f0 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/sample/RowSampler.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/sample/RowSampler.java
@@ -35,8 +35,12 @@ import org.apache.accumulo.core.data.Key;
  * To configure Accumulo to generate sample data on one thousandth of the rows, the following
  * SamplerConfiguration could be created and used to configure a table.
  *
- * <p>
- * {@code new SamplerConfiguration(RowSampler.class.getName()).setOptions(ImmutableMap.of("hasher","murmur3_32","modulus","1009"))}
+ * <pre>
+ * <code>
+ * new SamplerConfiguration(RowSampler.class.getName()).setOptions(
+ *   ImmutableMap.of("hasher","murmur3_32","modulus","1009"));
+ * </code>
+ * </pre>
  *
  * @since 1.8.0
  */
diff --git a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/AuthenticationToken.java b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/AuthenticationToken.java
index 5e955d3..dcb1dff 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/AuthenticationToken.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/AuthenticationToken.java
@@ -95,9 +95,11 @@ public interface AuthenticationToken extends Writable, Destroyable, Cloneable {
     public static AuthenticationToken deserialize(String tokenClassName, byte[] tokenBytes) {
       Class<? extends AuthenticationToken> tokenType = null;
       try {
+        // @formatter:off
         @SuppressWarnings("unchecked")
-        Class<? extends AuthenticationToken> tmpTokenType = (Class<? extends AuthenticationToken>) Class
-            .forName(tokenClassName);
+        Class<? extends AuthenticationToken> tmpTokenType =
+          (Class<? extends AuthenticationToken>) Class.forName(tokenClassName);
+        // @formatter:on
         tokenType = tmpTokenType;
       } catch (ClassNotFoundException e) {
         throw new IllegalArgumentException("Class not available " + tokenClassName, e);
diff --git a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/KerberosToken.java b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/KerberosToken.java
index 99f68df..8a8bae8 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/KerberosToken.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/KerberosToken.java
@@ -70,8 +70,8 @@ public class KerberosToken implements AuthenticationToken {
     // There is also KERBEROS_SSL but that appears to be deprecated/OBE
     checkArgument(
         AuthenticationMethod.KERBEROS == authMethod || AuthenticationMethod.PROXY == authMethod,
-        "KerberosToken expects KERBEROS or PROXY authentication for the current UserGroupInformation user. Saw "
-            + authMethod);
+        "KerberosToken expects KERBEROS or PROXY authentication for the current "
+            + "UserGroupInformation user. Saw " + authMethod);
   }
 
   /**
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java b/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java
index 28c44e6..8763875 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java
@@ -442,8 +442,8 @@ public abstract class AccumuloConfiguration implements Iterable<Entry<String,Str
           ports[0] = port;
         }
       } catch (NumberFormatException e1) {
-        throw new IllegalArgumentException(
-            "Invalid port syntax. Must be a single positive integers or a range (M-N) of positive integers");
+        throw new IllegalArgumentException("Invalid port syntax. Must be a single positive "
+            + "integers or a range (M-N) of positive integers");
       }
     }
     return ports;
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/CredentialProviderFactoryShim.java b/core/src/main/java/org/apache/accumulo/core/conf/CredentialProviderFactoryShim.java
index e837c81..8df237b 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/CredentialProviderFactoryShim.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/CredentialProviderFactoryShim.java
@@ -42,17 +42,25 @@ import org.slf4j.LoggerFactory;
 public class CredentialProviderFactoryShim {
   private static final Logger log = LoggerFactory.getLogger(CredentialProviderFactoryShim.class);
 
-  public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory";
-  public static final String HADOOP_CRED_PROVIDER_FACTORY_GET_PROVIDERS_METHOD_NAME = "getProviders";
-
-  public static final String HADOOP_CRED_PROVIDER_CLASS_NAME = "org.apache.hadoop.security.alias.CredentialProvider";
-  public static final String HADOOP_CRED_PROVIDER_GET_CREDENTIAL_ENTRY_METHOD_NAME = "getCredentialEntry";
+  // @formatter:off
+  public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME =
+    "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory";
+  public static final String HADOOP_CRED_PROVIDER_FACTORY_GET_PROVIDERS_METHOD_NAME =
+    "getProviders";
+
+  public static final String HADOOP_CRED_PROVIDER_CLASS_NAME =
+    "org.apache.hadoop.security.alias.CredentialProvider";
+  public static final String HADOOP_CRED_PROVIDER_GET_CREDENTIAL_ENTRY_METHOD_NAME =
+    "getCredentialEntry";
   public static final String HADOOP_CRED_PROVIDER_GET_ALIASES_METHOD_NAME = "getAliases";
-  public static final String HADOOP_CRED_PROVIDER_CREATE_CREDENTIAL_ENTRY_METHOD_NAME = "createCredentialEntry";
+  public static final String HADOOP_CRED_PROVIDER_CREATE_CREDENTIAL_ENTRY_METHOD_NAME =
+    "createCredentialEntry";
   public static final String HADOOP_CRED_PROVIDER_FLUSH_METHOD_NAME = "flush";
 
-  public static final String HADOOP_CRED_ENTRY_CLASS_NAME = "org.apache.hadoop.security.alias.CredentialProvider$CredentialEntry";
+  public static final String HADOOP_CRED_ENTRY_CLASS_NAME =
+    "org.apache.hadoop.security.alias.CredentialProvider$CredentialEntry";
   public static final String HADOOP_CRED_ENTRY_GET_CREDENTIAL_METHOD_NAME = "getCredential";
+  // @formatter:on
 
   public static final String CREDENTIAL_PROVIDER_PATH = "hadoop.security.credential.provider.path";
 
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index 5c93195..eea039f 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -46,29 +46,34 @@ public enum Property {
   // Crypto-related properties
   @Experimental
   CRYPTO_PREFIX("crypto.", null, PropertyType.PREFIX,
-      "Properties in this category related to the configuration of both default and custom crypto modules."),
+      "Properties in this category related to the configuration of both default and custom crypto"
+          + " modules."),
   @Experimental
   CRYPTO_MODULE_CLASS("crypto.module.class", "NullCryptoModule", PropertyType.STRING,
-      "Fully qualified class name of the class that implements the CryptoModule interface, to be used in setting up encryption at rest for the WAL and "
-          + "(future) other parts of the code."),
+      "Fully qualified class name of the class that implements the CryptoModule"
+          + " interface, to be used in setting up encryption at rest for the WAL and"
+          + " (future) other parts of the code."),
   @Experimental
   CRYPTO_CIPHER_SUITE("crypto.cipher.suite", "NullCipher", PropertyType.STRING,
       "Describes the cipher suite to use for the write-ahead log"),
   @Experimental
   CRYPTO_CIPHER_ALGORITHM_NAME("crypto.cipher.algorithm.name", "NullCipher", PropertyType.STRING,
-      "States the name of the algorithm used in the corresponding cipher suite. Do not make these different, unless you enjoy mysterious exceptions and bugs."),
+      "States the name of the algorithm used in the corresponding cipher suite. "
+          + "Do not make these different, unless you enjoy mysterious exceptions and bugs."),
   @Experimental
   CRYPTO_BLOCK_STREAM_SIZE("crypto.block.stream.size", "1K", PropertyType.MEMORY,
-      "The size of the buffer above the cipher stream. Used for reading files and padding walog entries."),
+      "The size of the buffer above the cipher stream."
+          + " Used for reading files and padding walog entries."),
   @Experimental
   CRYPTO_CIPHER_KEY_LENGTH("crypto.cipher.key.length", "128", PropertyType.STRING,
-      "Specifies the key length *in bits* to use for the symmetric key, should probably be 128 or 256 unless you really know what you're doing"),
+      "Specifies the key length *in bits* to use for the symmetric key, "
+          + "should probably be 128 or 256 unless you really know what you're doing"),
   @Experimental
   CRYPTO_SECURE_RNG("crypto.secure.rng", "SHA1PRNG", PropertyType.STRING,
-      "States the secure random number generator to use, and defaults to the built-in Sun SHA1PRNG"),
+      "States the secure random number generator to use, and defaults to the built-in SHA1PRNG"),
   @Experimental
   CRYPTO_SECURE_RNG_PROVIDER("crypto.secure.rng.provider", "SUN", PropertyType.STRING,
-      "States the secure random number generator provider to use, and defaults to the built-in SUN provider"),
+      "States the secure random number generator provider to use."),
   @Experimental
   CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS("crypto.secret.key.encryption.strategy.class",
       "NullSecretKeyEncryptionStrategy", PropertyType.STRING,
@@ -76,32 +81,41 @@ public enum Property {
   @Experimental
   CRYPTO_DEFAULT_KEY_STRATEGY_HDFS_URI("crypto.default.key.strategy.hdfs.uri", "",
       PropertyType.STRING,
-      "The path relative to the top level instance directory (instance.dfs.dir) where to store the key encryption key within HDFS."),
+      "The path relative to the top level instance directory (instance.dfs.dir) where to store"
+          + " the key encryption key within HDFS."),
   @Experimental
   CRYPTO_DEFAULT_KEY_STRATEGY_KEY_LOCATION("crypto.default.key.strategy.key.location",
       "/crypto/secret/keyEncryptionKey", PropertyType.ABSOLUTEPATH,
-      "The path relative to the top level instance directory (instance.dfs.dir) where to store the key encryption key within HDFS."),
+      "The path relative to the top level instance directory (instance.dfs.dir) where to store"
+          + " the key encryption key within HDFS."),
   @Experimental
   CRYPTO_DEFAULT_KEY_STRATEGY_CIPHER_SUITE("crypto.default.key.strategy.cipher.suite", "NullCipher",
       PropertyType.STRING,
-      "The cipher suite to use when encrypting session keys with a key encryption keyThis should be set to match the overall encryption algorithm "
-          + "but with ECB mode and no padding unless you really know what you're doing and are sure you won't break internal file formats"),
+      "The cipher suite to use when encrypting session keys with a key"
+          + " encryption keyThis should be set to match the overall encryption"
+          + " algorithm but with ECB mode and no padding unless you really know what"
+          + " you're doing and are sure you won't break internal file formats"),
   @Experimental
   CRYPTO_OVERRIDE_KEY_STRATEGY_WITH_CONFIGURED_STRATEGY(
       "crypto.override.key.strategy.with.configured.strategy", "false", PropertyType.BOOLEAN,
-      "The default behavior is to record the key encryption strategy with the encrypted file, and continue to use that strategy for the life "
-          + "of that file. Sometimes, you change your strategy and want to use the new strategy, not the old one. (Most commonly, this will be "
-          + "because you have moved key material from one spot to another.)  If you want to override the recorded key strategy with the one in "
-          + "the configuration file, set this property to true."),
+      "The default behavior is to record the key encryption strategy with the"
+          + " encrypted file, and continue to use that strategy for the life of that"
+          + " file. Sometimes, you change your strategy and want to use the new"
+          + " strategy, not the old one. (Most commonly, this will be because you have"
+          + " moved key material from one spot to another.) If you want to override"
+          + " the recorded key strategy with the one in the configuration file, set"
+          + " this property to true."),
   // SSL properties local to each node (see also instance.ssl.enabled which must be consistent
   // across all nodes in an instance)
   RPC_PREFIX("rpc.", null, PropertyType.PREFIX,
-      "Properties in this category related to the configuration of SSL keys for RPC. See also instance.ssl.enabled"),
+      "Properties in this category related to the configuration of SSL keys for RPC."
+          + " See also instance.ssl.enabled"),
   RPC_SSL_KEYSTORE_PATH("rpc.javax.net.ssl.keyStore", "$ACCUMULO_CONF_DIR/ssl/keystore.jks",
       PropertyType.PATH, "Path of the keystore file for the servers' private SSL key"),
   @Sensitive
   RPC_SSL_KEYSTORE_PASSWORD("rpc.javax.net.ssl.keyStorePassword", "", PropertyType.STRING,
-      "Password used to encrypt the SSL private keystore. Leave blank to use the Accumulo instance secret"),
+      "Password used to encrypt the SSL private keystore. "
+          + "Leave blank to use the Accumulo instance secret"),
   RPC_SSL_KEYSTORE_TYPE("rpc.javax.net.ssl.keyStoreType", "jks", PropertyType.STRING,
       "Type of SSL keystore"),
   RPC_SSL_TRUSTSTORE_PATH("rpc.javax.net.ssl.trustStore", "$ACCUMULO_CONF_DIR/ssl/truststore.jks",
@@ -121,109 +135,140 @@ public enum Property {
       "Comma separated list of protocols that can be used to accept connections"),
   // TLSv1.2 should be used as the default when JDK6 support is dropped
   RPC_SSL_CLIENT_PROTOCOL("rpc.ssl.client.protocol", "TLSv1", PropertyType.STRING,
-      "The protocol used to connect to a secure server, must be in the list of enabled protocols on the server side (rpc.ssl.server.enabled.protocols)"),
+      "The protocol used to connect to a secure server, must be in the list of enabled protocols "
+          + "on the server side (rpc.ssl.server.enabled.protocols)"),
   /**
    * @since 1.7.0
    */
   RPC_SASL_QOP("rpc.sasl.qop", "auth", PropertyType.STRING,
-      "The quality of protection to be used with SASL. Valid values are 'auth', 'auth-int', and 'auth-conf'"),
+      "The quality of protection to be used with SASL. Valid values are 'auth', 'auth-int',"
+          + " and 'auth-conf'"),
 
   // instance properties (must be the same for every node in an instance)
   INSTANCE_PREFIX("instance.", null, PropertyType.PREFIX,
-      "Properties in this category must be consistent throughout a cloud. This is enforced and servers won't be able to communicate if these differ."),
+      "Properties in this category must be consistent throughout a cloud. "
+          + "This is enforced and servers won't be able to communicate if these differ."),
   INSTANCE_ZK_HOST("instance.zookeeper.host", "localhost:2181", PropertyType.HOSTLIST,
       "Comma separated list of zookeeper servers"),
   INSTANCE_ZK_TIMEOUT("instance.zookeeper.timeout", "30s", PropertyType.TIMEDURATION,
-      "Zookeeper session timeout; max value when represented as milliseconds should be no larger than "
+      "Zookeeper session timeout; "
+          + "max value when represented as milliseconds should be no larger than "
           + Integer.MAX_VALUE),
   @Deprecated
   INSTANCE_DFS_URI("instance.dfs.uri", "", PropertyType.URI,
-      "A url accumulo should use to connect to DFS. If this is empty, accumulo will obtain this information from the hadoop configuration. This property "
-          + "will only be used when creating new files if instance.volumes is empty. After an upgrade to 1.6.0 Accumulo will start using absolute paths to "
-          + "reference files. Files created before a 1.6.0 upgrade are referenced via relative paths. Relative paths will always be resolved using this "
-          + "config (if empty using the hadoop config)."),
+      "A url accumulo should use to connect to DFS. If this is empty, accumulo"
+          + " will obtain this information from the hadoop configuration. This property"
+          + " will only be used when creating new files if instance.volumes is empty."
+          + " After an upgrade to 1.6.0 Accumulo will start using absolute paths to"
+          + " reference files. Files created before a 1.6.0 upgrade are referenced via"
+          + " relative paths. Relative paths will always be resolved using this config"
+          + " (if empty using the hadoop config)."),
   @Deprecated
   INSTANCE_DFS_DIR("instance.dfs.dir", "/accumulo", PropertyType.ABSOLUTEPATH,
-      "HDFS directory in which accumulo instance will run. Do not change after accumulo is initialized."),
+      "HDFS directory in which accumulo instance will run. "
+          + "Do not change after accumulo is initialized."),
   @Sensitive
   INSTANCE_SECRET("instance.secret", "DEFAULT", PropertyType.STRING,
-      "A secret unique to a given instance that all servers must know in order to communicate with one another."
-          + "It should be changed prior to the initialization of Accumulo. To change it after Accumulo has been initialized, use the ChangeSecret tool "
-          + "and then update conf/accumulo-site.xml everywhere. Before using the ChangeSecret tool, make sure Accumulo is not running and you are logged "
-          + "in as the user that controls Accumulo files in HDFS.  To use the ChangeSecret tool, run the command: "
-          + "./bin/accumulo org.apache.accumulo.server.util.ChangeSecret"),
+      "A secret unique to a given instance that all servers must know in order"
+          + " to communicate with one another. It should be changed prior to the"
+          + " initialization of Accumulo. To change it after Accumulo has been"
+          + " initialized, use the ChangeSecret tool and then update"
+          + " conf/accumulo-site.xml everywhere. Before using the ChangeSecret tool,"
+          + " make sure Accumulo is not running and you are logged in as the user that"
+          + " controls Accumulo files in HDFS. To use the ChangeSecret tool, run the"
+          + " command: ./bin/accumulo org.apache.accumulo.server.util.ChangeSecret"),
   INSTANCE_VOLUMES("instance.volumes", "", PropertyType.STRING,
-      "A comma seperated list of dfs uris to use. Files will be stored across these filesystems. If this is empty, then instance.dfs.uri will be used. "
-          + "After adding uris to this list, run 'accumulo init --add-volume' and then restart tservers. If entries are removed from this list then tservers "
-          + "will need to be restarted. After a uri is removed from the list Accumulo will not create new files in that location, however Accumulo can still "
-          + "reference files created at that location before the config change. To use a comma or other reserved characters in a URI use standard URI hex "
-          + "encoding. For example replace commas with %2C."),
+      "A comma seperated list of dfs uris to use. Files will be stored across"
+          + " these filesystems. If this is empty, then instance.dfs.uri will be used."
+          + " After adding uris to this list, run 'accumulo init --add-volume' and then"
+          + " restart tservers. If entries are removed from this list then tservers"
+          + " will need to be restarted. After a uri is removed from the list Accumulo"
+          + " will not create new files in that location, however Accumulo can still"
+          + " reference files created at that location before the config change. To use"
+          + " a comma or other reserved characters in a URI use standard URI hex"
+          + " encoding. For example replace commas with %2C."),
   INSTANCE_VOLUMES_REPLACEMENTS("instance.volumes.replacements", "", PropertyType.STRING,
-      "Since accumulo stores absolute URIs changing the location of a namenode could prevent Accumulo from starting. The property helps deal with that "
-          + "situation. Provide a comma separated list of uri replacement pairs here if a namenode location changes. Each pair shold be separated with a "
-          + "space. For example, if hdfs://nn1 was replaced with hdfs://nnA and hdfs://nn2 was replaced with hdfs://nnB, then set this property to "
-          + "'hdfs://nn1 hdfs://nnA,hdfs://nn2 hdfs://nnB' Replacements must be configured for use. To see which volumes are currently in use, run "
-          + "'accumulo admin volumes -l'. To use a comma or other reserved characters in a URI use standard URI hex encoding. For example replace commas with "
-          + "%2C."),
+      "Since accumulo stores absolute URIs changing the location of a namenode "
+          + "could prevent Accumulo from starting. The property helps deal with "
+          + "that situation. Provide a comma separated list of uri replacement "
+          + "pairs here if a namenode location changes. Each pair shold be separated "
+          + "with a space. For example, if hdfs://nn1 was replaced with "
+          + "hdfs://nnA and hdfs://nn2 was replaced with hdfs://nnB, then set this "
+          + "property to 'hdfs://nn1 hdfs://nnA,hdfs://nn2 hdfs://nnB' "
+          + "Replacements must be configured for use. To see which volumes are "
+          + "currently in use, run 'accumulo admin volumes -l'. To use a comma or "
+          + "other reserved characters in a URI use standard URI hex encoding. For "
+          + "example replace commas with %2C."),
   INSTANCE_SECURITY_AUTHENTICATOR("instance.security.authenticator",
       "org.apache.accumulo.server.security.handler.ZKAuthenticator", PropertyType.CLASSNAME,
-      "The authenticator class that accumulo will use to determine if a user has privilege to perform an action"),
+      "The authenticator class that accumulo will use to determine if a user "
+          + "has privilege to perform an action"),
   INSTANCE_SECURITY_AUTHORIZOR("instance.security.authorizor",
       "org.apache.accumulo.server.security.handler.ZKAuthorizor", PropertyType.CLASSNAME,
-      "The authorizor class that accumulo will use to determine what labels a user has privilege to see"),
+      "The authorizor class that accumulo will use to determine what labels a "
+          + "user has privilege to see"),
   INSTANCE_SECURITY_PERMISSION_HANDLER("instance.security.permissionHandler",
       "org.apache.accumulo.server.security.handler.ZKPermHandler", PropertyType.CLASSNAME,
-      "The permission handler class that accumulo will use to determine if a user has privilege to perform an action"),
+      "The permission handler class that accumulo will use to determine if a "
+          + "user has privilege to perform an action"),
   INSTANCE_RPC_SSL_ENABLED("instance.rpc.ssl.enabled", "false", PropertyType.BOOLEAN,
-      "Use SSL for socket connections from clients and among accumulo services. Mutually exclusive with SASL RPC configuration."),
+      "Use SSL for socket connections from clients and among accumulo services. "
+          + "Mutually exclusive with SASL RPC configuration."),
   INSTANCE_RPC_SSL_CLIENT_AUTH("instance.rpc.ssl.clientAuth", "false", PropertyType.BOOLEAN,
       "Require clients to present certs signed by a trusted root"),
   /**
    * @since 1.7.0
    */
   INSTANCE_RPC_SASL_ENABLED("instance.rpc.sasl.enabled", "false", PropertyType.BOOLEAN,
-      "Configures Thrift RPCs to require SASL with GSSAPI which supports Kerberos authentication. Mutually exclusive with SSL RPC configuration."),
+      "Configures Thrift RPCs to require SASL with GSSAPI which supports "
+          + "Kerberos authentication. Mutually exclusive with SSL RPC configuration."),
   @Deprecated
   INSTANCE_RPC_SASL_PROXYUSERS("instance.rpc.sasl.impersonation.", null, PropertyType.PREFIX,
       "Prefix that allows configuration of users that are allowed to impersonate other users"),
   INSTANCE_RPC_SASL_ALLOWED_USER_IMPERSONATION("instance.rpc.sasl.allowed.user.impersonation", "",
       PropertyType.STRING,
-      "One-line configuration property controlling what users are allowed to impersonate other users"),
+      "One-line configuration property controlling what users are allowed to "
+          + "impersonate other users"),
   INSTANCE_RPC_SASL_ALLOWED_HOST_IMPERSONATION("instance.rpc.sasl.allowed.host.impersonation", "",
       PropertyType.STRING,
-      "One-line configuration property controlling the network locations (hostnames) that are allowed to impersonate other users"),
+      "One-line configuration property controlling the network locations "
+          + "(hostnames) that are allowed to impersonate other users"),
 
   // general properties
   GENERAL_PREFIX("general.", null, PropertyType.PREFIX,
-      "Properties in this category affect the behavior of accumulo overall, but do not have to be consistent throughout a cloud."),
+      "Properties in this category affect the behavior of accumulo overall, but "
+          + "do not have to be consistent throughout a cloud."),
   GENERAL_CLASSPATHS(AccumuloClassLoader.CLASSPATH_PROPERTY_NAME,
       AccumuloClassLoader.ACCUMULO_CLASSPATH_VALUE, PropertyType.STRING,
-      "A list of all of the places to look for a class. Order does matter, as it will look for the jar "
-          + "starting in the first location to the last. Please note, hadoop conf and hadoop lib directories NEED to be here, "
-          + "along with accumulo lib and zookeeper directory. Supports full regex on filename alone."), // needs
-                                                                                                        // special
-                                                                                                        // treatment
-                                                                                                        // in
-                                                                                                        // accumulo
-                                                                                                        // start
-                                                                                                        // jar
+      "A list of all of the places to look for a class. Order does matter, as "
+          + "it will look for the jar starting in the first location to the last. "
+          + "Please note, hadoop conf and hadoop lib directories NEED to be here, "
+          + "along with accumulo lib and zookeeper directory. Supports full regex on "
+          + " filename alone."),
+
+  // needs special treatment in accumulo start jar
   GENERAL_DYNAMIC_CLASSPATHS(AccumuloVFSClassLoader.DYNAMIC_CLASSPATH_PROPERTY_NAME,
       AccumuloVFSClassLoader.DEFAULT_DYNAMIC_CLASSPATH_VALUE, PropertyType.STRING,
-      "A list of all of the places where changes in jars or classes will force a reload of the classloader."),
+      "A list of all of the places where changes in jars or classes will force "
+          + "a reload of the classloader."),
   GENERAL_RPC_TIMEOUT("general.rpc.timeout", "120s", PropertyType.TIMEDURATION,
       "Time to wait on I/O for simple, short RPC calls"),
   @Experimental
   GENERAL_RPC_SERVER_TYPE("general.rpc.server.type", "", PropertyType.STRING,
-      "Type of Thrift server to instantiate, see org.apache.accumulo.server.rpc.ThriftServerType for more information. Only useful for benchmarking thrift servers"),
+      "Type of Thrift server to instantiate, see "
+          + "org.apache.accumulo.server.rpc.ThriftServerType for more information. "
+          + "Only useful for benchmarking thrift servers"),
   GENERAL_KERBEROS_KEYTAB("general.kerberos.keytab", "", PropertyType.PATH,
       "Path to the kerberos keytab to use. Leave blank if not using kerberoized hdfs"),
   GENERAL_KERBEROS_PRINCIPAL("general.kerberos.principal", "", PropertyType.STRING,
       "Name of the kerberos principal to use. _HOST will automatically be "
-          + "replaced by the machines hostname in the hostname portion of the principal. Leave blank if not using kerberoized hdfs"),
+          + "replaced by the machines hostname in the hostname portion of the "
+          + "principal. Leave blank if not using kerberoized hdfs"),
   GENERAL_KERBEROS_RENEWAL_PERIOD("general.kerberos.renewal.period", "30s",
       PropertyType.TIMEDURATION,
-      "The amount of time between attempts to perform "
-          + "Kerberos ticket renewals. This does not equate to how often tickets are actually renewed (which is performed at 80% of the ticket lifetime)."),
+      "The amount of time between attempts to perform Kerberos ticket renewals. "
+          + "This does not equate to how often tickets are actually renewed (which is "
+          + "performed at 80% of the ticket lifetime)."),
   GENERAL_MAX_MESSAGE_SIZE("general.server.message.size.max", "1G", PropertyType.MEMORY,
       "The maximum size of a message that can be sent to a server."),
   GENERAL_SIMPLETIMER_THREADPOOL_SIZE("general.server.simpletimer.threadpool.size", "1",
@@ -237,7 +282,8 @@ public enum Property {
   GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS("general.security.credential.provider.paths", "",
       PropertyType.STRING, "Comma-separated list of paths to CredentialProviders"),
   GENERAL_LEGACY_METRICS("general.legacy.metrics", "false", PropertyType.BOOLEAN,
-      "Use the old metric infrastructure configured by accumulo-metrics.xml, instead of Hadoop Metrics2"),
+      "Use the old metric infrastructure configured by accumulo-metrics.xml, "
+          + "instead of Hadoop Metrics2"),
   GENERAL_DELEGATION_TOKEN_LIFETIME("general.delegation.token.lifetime", "7d",
       PropertyType.TIMEDURATION,
       "The length of time that delegation tokens and secret keys are valid"),
@@ -254,7 +300,8 @@ public enum Property {
       "The port used for handling client connections on the master"),
   MASTER_TABLET_BALANCER("master.tablet.balancer",
       "org.apache.accumulo.server.master.balancer.TableLoadBalancer", PropertyType.CLASSNAME,
-      "The balancer class that accumulo will use to make tablet assignment and migration decisions."),
+      "The balancer class that accumulo will use to make tablet assignment and "
+          + "migration decisions."),
   MASTER_RECOVERY_MAXAGE("master.recovery.max.age", "60m", PropertyType.TIMEDURATION,
       "Recovery files older than this age will be removed."),
   MASTER_RECOVERY_MAXTIME("master.recovery.time.max", "30m", PropertyType.TIMEDURATION,
@@ -266,13 +313,15 @@ public enum Property {
   MASTER_BULK_TIMEOUT("master.bulk.timeout", "5m", PropertyType.TIMEDURATION,
       "The time to wait for a tablet server to process a bulk import request"),
   MASTER_BULK_RENAME_THREADS("master.bulk.rename.threadpool.size", "20", PropertyType.COUNT,
-      "The number of threads to use when moving user files to bulk ingest directories under accumulo control"),
+      "The number of threads to use when moving user files to bulk ingest "
+          + "directories under accumulo control"),
   MASTER_MINTHREADS("master.server.threads.minimum", "20", PropertyType.COUNT,
       "The minimum number of threads to use to handle incoming requests."),
   MASTER_THREADCHECK("master.server.threadcheck.time", "1s", PropertyType.TIMEDURATION,
       "The time between adjustments of the server thread pool."),
   MASTER_RECOVERY_DELAY("master.recovery.delay", "10s", PropertyType.TIMEDURATION,
-      "When a tablet server's lock is deleted, it takes time for it to completely quit. This delay gives it time before log recoveries begin."),
+      "When a tablet server's lock is deleted, it takes time for it to "
+          + "completely quit. This delay gives it time before log recoveries begin."),
   MASTER_LEASE_RECOVERY_WAITING_PERIOD("master.lease.recovery.interval", "5s",
       PropertyType.TIMEDURATION,
       "The amount of time to wait after requesting a WAL file to be recovered"),
@@ -280,10 +329,12 @@ public enum Property {
       "org.apache.accumulo.server.master.recovery.HadoopLogCloser", PropertyType.CLASSNAME,
       "A class that implements a mechansim to steal write access to a file"),
   MASTER_FATE_THREADPOOL_SIZE("master.fate.threadpool.size", "4", PropertyType.COUNT,
-      "The number of threads used to run FAult-Tolerant Executions. These are primarily table operations like merge."),
+      "The number of threads used to run FAult-Tolerant Executions. These are "
+          + "primarily table operations like merge."),
   MASTER_REPLICATION_SCAN_INTERVAL("master.replication.status.scan.interval", "30s",
       PropertyType.TIMEDURATION,
-      "Amount of time to sleep before scanning the status section of the replication table for new data"),
+      "Amount of time to sleep before scanning the status section of the "
+          + "replication table for new data"),
   MASTER_REPLICATION_COORDINATOR_PORT("master.replication.coordinator.port", "10001",
       PropertyType.PORT, "Port for the replication coordinator service"),
   MASTER_REPLICATION_COORDINATOR_MINTHREADS("master.replication.coordinator.minthreads", "4",
@@ -315,25 +366,31 @@ public enum Property {
   @Deprecated
   TSERV_MUTATION_QUEUE_MAX("tserver.mutation.queue.max", "1M", PropertyType.MEMORY,
       "This setting is deprecated. See tserver.total.mutation.queue.max. "
-          + "The amount of memory to use to store write-ahead-log mutations-per-session before flushing them. Since the buffer is per write session, consider the"
-          + " max number of concurrent writer when configuring. When using Hadoop 2, Accumulo will call hsync() on the WAL . For a small number of "
-          + "concurrent writers, increasing this buffer size decreases the frequncy of hsync calls. For a large number of concurrent writers a small buffers "
-          + "size is ok because of group commit."),
+          + "The amount of memory to use to store write-ahead-log "
+          + "mutations-per-session before flushing them. Since the buffer is per "
+          + "write session, consider the max number of concurrent writer when "
+          + "configuring. When using Hadoop 2, Accumulo will call hsync() on the WAL. "
+          + "For a small number of concurrent writers, increasing this buffer size "
+          + "decreases the frequncy of hsync calls. For a large number of concurrent "
+          + "writers a small buffers size is ok because of group commit."),
   TSERV_TOTAL_MUTATION_QUEUE_MAX("tserver.total.mutation.queue.max", "50M", PropertyType.MEMORY,
       "The amount of memory used to store write-ahead-log mutations before flushing them."),
   TSERV_TABLET_SPLIT_FINDMIDPOINT_MAXOPEN("tserver.tablet.split.midpoint.files.max", "300",
       PropertyType.COUNT,
-      "To find a tablets split points, all index files are opened. This setting determines how many index "
-          + "files can be opened at once. When there are more index files than this setting multiple passes "
-          + "must be made, which is slower. However opening too many files at once can cause problems."),
+      "To find a tablets split points, all index files are opened. This setting "
+          + "determines how many index files can be opened at once. When there are "
+          + "more index files than this setting multiple passes must be made, which is "
+          + "slower. However opening too many files at once can cause problems."),
   TSERV_WALOG_MAX_SIZE("tserver.walog.max.size", "1G", PropertyType.MEMORY,
-      "The maximum size for each write-ahead log. See comment for property tserver.memory.maps.max"),
+      "The maximum size for each write-ahead log. See comment for tserver.memory.maps.max"),
   TSERV_WALOG_MAX_AGE("tserver.walog.max.age", "24h", PropertyType.TIMEDURATION,
       "The maximum age for each write-ahead log."),
   TSERV_WALOG_TOLERATED_CREATION_FAILURES("tserver.walog.tolerated.creation.failures", "50",
       PropertyType.COUNT,
-      "The maximum number of failures tolerated when creating a new WAL file.  Values < 0 will allow unlimited creation failures."
-          + " Exceeding this number of failures consecutively trying to create a new WAL causes the TabletServer to exit."),
+      "The maximum number of failures tolerated when creating a new WAL file. "
+          + "Negative values will allow unlimited creation failures. Exceeding this "
+          + "number of failures consecutively trying to create a new WAL causes the "
+          + "TabletServer to exit."),
   TSERV_WALOG_TOLERATED_WAIT_INCREMENT("tserver.walog.tolerated.wait.increment", "1000ms",
       PropertyType.TIMEDURATION,
       "The amount of time to wait between failures to create or write a WALog."),
@@ -348,23 +405,29 @@ public enum Property {
   TSERV_SCAN_MAX_OPENFILES("tserver.scan.files.open.max", "100", PropertyType.COUNT,
       "Maximum total files that all tablets in a tablet server can open for scans. "),
   TSERV_MAX_IDLE("tserver.files.open.idle", "1m", PropertyType.TIMEDURATION,
-      "Tablet servers leave previously used files open for future queries. "
-          + "This setting determines how much time an unused file should be kept open until it is closed."),
+      "Tablet servers leave previously used files open for future queries. This "
+          + "setting determines how much time an unused file should be kept open until "
+          + "it is closed."),
   TSERV_NATIVEMAP_ENABLED("tserver.memory.maps.native.enabled", "true", PropertyType.BOOLEAN,
-      "An in-memory data store for accumulo implemented in c++ that increases the amount of data accumulo can hold in memory and avoids Java GC pauses."),
+      "An in-memory data store for accumulo implemented in c++ that increases "
+          + "the amount of data accumulo can hold in memory and avoids Java GC " + "pauses."),
   TSERV_MAXMEM("tserver.memory.maps.max", "1G", PropertyType.MEMORY,
-      "Maximum amount of memory that can be used to buffer data written to a tablet server. There are two other properties that can effectively limit memory"
-          + " usage table.compaction.minor.logs.threshold and tserver.walog.max.size. Ensure that table.compaction.minor.logs.threshold *"
-          + " tserver.walog.max.size >= this property."),
+      "Maximum amount of memory that can be used to buffer data written to a"
+          + " tablet server. There are two other properties that can effectively limit"
+          + " memory usage table.compaction.minor.logs.threshold and"
+          + " tserver.walog.max.size. Ensure that table.compaction.minor.logs.threshold"
+          + " * tserver.walog.max.size >= this property."),
   TSERV_MEM_MGMT("tserver.memory.manager",
       "org.apache.accumulo.server.tabletserver.LargestFirstMemoryManager", PropertyType.CLASSNAME,
       "An implementation of MemoryManger that accumulo will use."),
   TSERV_SESSION_MAXIDLE("tserver.session.idle.max", "1m", PropertyType.TIMEDURATION,
-      "When a tablet server's SimpleTimer thread triggers to check "
-          + "idle sessions, this configurable option will be used to evaluate scan sessions to determine if they can be closed due to inactivity"),
+      "When a tablet server's SimpleTimer thread triggers to check idle"
+          + " sessions, this configurable option will be used to evaluate scan sessions"
+          + " to determine if they can be closed due to inactivity"),
   TSERV_UPDATE_SESSION_MAXIDLE("tserver.session.update.idle.max", "1m", PropertyType.TIMEDURATION,
-      "When a tablet server's SimpleTimer thread triggers to check "
-          + "idle sessions, this configurable option will be used to evaluate update sessions to determine if they can be closed due to inactivity"),
+      "When a tablet server's SimpleTimer thread triggers to check idle"
+          + " sessions, this configurable option will be used to evaluate update"
+          + " sessions to determine if they can be closed due to inactivity"),
   TSERV_READ_AHEAD_MAXCONCURRENT("tserver.readahead.concurrent.max", "16", PropertyType.COUNT,
       "The maximum number of concurrent read ahead that will execute. This effectively"
           + " limits the number of long running scans that can run concurrently per tserver."),
@@ -376,7 +439,8 @@ public enum Property {
   TSERV_MAJC_MAXCONCURRENT("tserver.compaction.major.concurrent.max", "3", PropertyType.COUNT,
       "The maximum number of concurrent major compactions for a tablet server"),
   TSERV_MAJC_THROUGHPUT("tserver.compaction.major.throughput", "0B", PropertyType.MEMORY,
-      "Maximum number of bytes to read or write per second over all major compactions on a TabletServer, or 0B for unlimited."),
+      "Maximum number of bytes to read or write per second over all major"
+          + " compactions on a TabletServer, or 0B for unlimited."),
   TSERV_MINC_MAXCONCURRENT("tserver.compaction.minor.concurrent.max", "4", PropertyType.COUNT,
       "The maximum number of concurrent minor compactions for a tablet server"),
   TSERV_MAJC_TRACE_PERCENT("tserver.compaction.major.trace.percent", "0.1", PropertyType.FRACTION,
@@ -389,20 +453,27 @@ public enum Property {
       "The number of concurrent threads that will load bloom filters in the background. "
           + "Setting this to zero will make bloom filters load in the foreground."),
   TSERV_MONITOR_FS("tserver.monitor.fs", "true", PropertyType.BOOLEAN,
-      "When enabled the tserver will monitor file systems and kill itself when one switches from rw to ro. This is usually and indication that Linux has"
+      "When enabled the tserver will monitor file systems and kill itself when"
+          + " one switches from rw to ro. This is usually and indication that Linux has"
           + " detected a bad disk."),
   TSERV_MEMDUMP_DIR("tserver.dir.memdump", "/tmp", PropertyType.PATH,
-      "A long running scan could possibly hold memory that has been minor compacted. To prevent this, the in memory map is dumped to a local file and the "
-          + "scan is switched to that local file. We can not switch to the minor compacted file because it may have been modified by iterators. The file "
-          + "dumped to the local dir is an exact copy of what was in memory."),
+      "A long running scan could possibly hold memory that has been minor"
+          + " compacted. To prevent this, the in memory map is dumped to a local file"
+          + " and the scan is switched to that local file. We can not switch to the"
+          + " minor compacted file because it may have been modified by iterators. The"
+          + " file dumped to the local dir is an exact copy of what was in memory."),
   TSERV_BULK_PROCESS_THREADS("tserver.bulk.process.threads", "1", PropertyType.COUNT,
-      "The master will task a tablet server with pre-processing a bulk file prior to assigning it to the appropriate tablet servers. This configuration"
-          + " value controls the number of threads used to process the files."),
+      "The master will task a tablet server with pre-processing a bulk file"
+          + " prior to assigning it to the appropriate tablet servers. This"
+          + " configuration value controls the number of threads used to process the" + " files."),
   TSERV_BULK_ASSIGNMENT_THREADS("tserver.bulk.assign.threads", "1", PropertyType.COUNT,
-      "The master delegates bulk file processing and assignment to tablet servers. After the bulk file has been processed, the tablet server will assign"
-          + " the file to the appropriate tablets on all servers. This property controls the number of threads used to communicate to the other servers."),
+      "The master delegates bulk file processing and assignment to tablet"
+          + " servers. After the bulk file has been processed, the tablet server will"
+          + " assign the file to the appropriate tablets on all servers. This property"
+          + " controls the number of threads used to communicate to the other servers."),
   TSERV_BULK_RETRY("tserver.bulk.retry.max", "5", PropertyType.COUNT,
-      "The number of times the tablet server will attempt to assign a file to a tablet as it migrates and splits."),
+      "The number of times the tablet server will attempt to assign a file to a"
+          + " tablet as it migrates and splits."),
   TSERV_BULK_TIMEOUT("tserver.bulk.timeout", "5m", PropertyType.TIMEDURATION,
       "The time to wait for a tablet server to process a bulk import request."),
   TSERV_MINTHREADS("tserver.server.threads.minimum", "20", PropertyType.COUNT,
@@ -412,13 +483,17 @@ public enum Property {
   TSERV_MAX_MESSAGE_SIZE("tserver.server.message.size.max", "1G", PropertyType.MEMORY,
       "The maximum size of a message that can be sent to a tablet server."),
   TSERV_HOLD_TIME_SUICIDE("tserver.hold.time.max", "5m", PropertyType.TIMEDURATION,
-      "The maximum time for a tablet server to be in the \"memory full\" state. If the tablet server cannot write out memory"
-          + " in this much time, it will assume there is some failure local to its node, and quit. A value of zero is equivalent to forever."),
+      "The maximum time for a tablet server to be in the \"memory full\" state."
+          + " If the tablet server cannot write out memory in this much time, it will"
+          + " assume there is some failure local to its node, and quit. A value of zero"
+          + " is equivalent to forever."),
   TSERV_WAL_BLOCKSIZE("tserver.wal.blocksize", "0", PropertyType.MEMORY,
-      "The size of the HDFS blocks used to write to the Write-Ahead log. If zero, it will be 110% of tserver.walog.max.size (that is, try to use just one"
-          + " block)"),
+      "The size of the HDFS blocks used to write to the Write-Ahead log. If"
+          + " zero, it will be 110% of tserver.walog.max.size (that is, try to use"
+          + " just one block)"),
   TSERV_WAL_REPLICATION("tserver.wal.replication", "0", PropertyType.COUNT,
-      "The replication to use when writing the Write-Ahead log to HDFS. If zero, it will use the HDFS default replication setting."),
+      "The replication to use when writing the Write-Ahead log to HDFS. If"
+          + " zero, it will use the HDFS default replication setting."),
   TSERV_RECOVERY_MAX_CONCURRENT("tserver.recovery.concurrent.max", "2", PropertyType.COUNT,
       "The maximum number of threads to use to sort logs during" + " recovery"),
   TSERV_SORT_BUFFER_SIZE("tserver.sort.buffer.size", "200M", PropertyType.MEMORY,
@@ -426,16 +501,19 @@ public enum Property {
   TSERV_ARCHIVE_WALOGS("tserver.archive.walogs", "false", PropertyType.BOOLEAN,
       "Keep copies of the WALOGs for debugging purposes"),
   TSERV_WORKQ_THREADS("tserver.workq.threads", "2", PropertyType.COUNT,
-      "The number of threads for the distributed work queue. These threads are used for copying failed bulk files."),
+      "The number of threads for the distributed work queue. These threads are"
+          + " used for copying failed bulk files."),
   TSERV_WAL_SYNC("tserver.wal.sync", "true", PropertyType.BOOLEAN,
-      "Use the SYNC_BLOCK create flag to sync WAL writes to disk. Prevents problems recovering from sudden system resets."),
+      "Use the SYNC_BLOCK create flag to sync WAL writes to disk. Prevents"
+          + " problems recovering from sudden system resets."),
   @Deprecated
   TSERV_WAL_SYNC_METHOD("tserver.wal.sync.method", "hsync", PropertyType.STRING,
       "This property is deprecated. Use table.durability instead."),
   TSERV_ASSIGNMENT_DURATION_WARNING("tserver.assignment.duration.warning", "10m",
       PropertyType.TIMEDURATION,
-      "The amount of time an assignment can run "
-          + " before the server will print a warning along with the current stack trace. Meant to help debug stuck assignments"),
+      "The amount of time an assignment can run before the server will print a"
+          + " warning along with the current stack trace. Meant to help debug stuck"
+          + " assignments"),
   TSERV_REPLICATION_REPLAYERS("tserver.replication.replayer.", null, PropertyType.PREFIX,
       "Allows configuration of implementation used to apply replicated data"),
   TSERV_REPLICATION_DEFAULT_HANDLER("tserver.replication.default.replayer",
@@ -446,7 +524,8 @@ public enum Property {
   TSERV_ASSIGNMENT_MAXCONCURRENT("tserver.assignment.concurrent.max", "2", PropertyType.COUNT,
       "The number of threads available to load tablets. Recoveries are still performed serially."),
   TSERV_SLOW_FLUSH_MILLIS("tserver.slow.flush.time", "100ms", PropertyType.TIMEDURATION,
-      "If a flush to the write-ahead log takes longer than this period of time, debugging information will written, and may result in a log rollover."),
+      "If a flush to the write-ahead log takes longer than this period of time,"
+          + " debugging information will written, and may result in a log rollover."),
 
   // accumulo garbage collector properties
   GC_PREFIX("gc.", null, PropertyType.PREFIX,
@@ -496,9 +575,11 @@ public enum Property {
   MONITOR_SSL_TRUSTSTORETYPE("monitor.ssl.trustStoreType", "jks", PropertyType.STRING,
       "Type of SSL truststore"),
   MONITOR_SSL_INCLUDE_CIPHERS("monitor.ssl.include.ciphers", "", PropertyType.STRING,
-      "A comma-separated list of allows SSL Ciphers, see monitor.ssl.exclude.ciphers to disallow ciphers"),
+      "A comma-separated list of allows SSL Ciphers, see"
+          + " monitor.ssl.exclude.ciphers to disallow ciphers"),
   MONITOR_SSL_EXCLUDE_CIPHERS("monitor.ssl.exclude.ciphers", "", PropertyType.STRING,
-      "A comma-separated list of disallowed SSL Ciphers, see mmonitor.ssl.include.ciphers to allow ciphers"),
+      "A comma-separated list of disallowed SSL Ciphers, see"
+          + " monitor.ssl.include.ciphers to allow ciphers"),
   MONITOR_SSL_INCLUDE_PROTOCOLS("monitor.ssl.include.protocols", "TLSv1,TLSv1.1,TLSv1.2",
       PropertyType.STRING, "A comma-separate list of allowed SSL protocols"),
 
@@ -527,43 +608,51 @@ public enum Property {
       "The password for the user used to store distributed traces"),
   @Sensitive
   TRACE_TOKEN_PROPERTY_PREFIX("trace.token.property.", null, PropertyType.PREFIX,
-      "The prefix used to create a token for storing distributed traces. For each propetry required by trace.token.type, place this prefix in front of it."),
+      "The prefix used to create a token for storing distributed traces. For"
+          + " each propetry required by trace.token.type, place this prefix in front of it."),
   TRACE_TOKEN_TYPE("trace.token.type", PasswordToken.class.getName(), PropertyType.CLASSNAME,
       "An AuthenticationToken type supported by the authorizer"),
 
   // per table properties
   TABLE_PREFIX("table.", null, PropertyType.PREFIX,
-      "Properties in this category affect tablet server treatment of tablets, but can be configured "
-          + "on a per-table basis. Setting these properties in the site file will override the default globally "
-          + "for all tables and not any specific table. However, both the default and the global setting can be "
-          + "overridden per table using the table operations API or in the shell, which sets the overridden value "
-          + "in zookeeper. Restarting accumulo tablet servers after setting these properties in the site file "
-          + "will cause the global setting to take effect. However, you must use the API or the shell to change "
-          + "properties in zookeeper that are set on a table."),
+      "Properties in this category affect tablet server treatment of tablets,"
+          + " but can be configured on a per-table basis. Setting these properties in"
+          + " the site file will override the default globally for all tables and not"
+          + " any specific table. However, both the default and the global setting can"
+          + " be overridden per table using the table operations API or in the shell,"
+          + " which sets the overridden value in zookeeper. Restarting accumulo tablet"
+          + " servers after setting these properties in the site file will cause the"
+          + " global setting to take effect. However, you must use the API or the shell"
+          + " to change properties in zookeeper that are set on a table."),
   TABLE_ARBITRARY_PROP_PREFIX("table.custom.", null, PropertyType.PREFIX,
       "Prefix to be used for user defined arbitrary properties."),
   TABLE_MAJC_RATIO("table.compaction.major.ratio", "3", PropertyType.FRACTION,
-      "minimum ratio of total input size to maximum input file size for running a major compactionWhen adjusting this property you may want to also "
-          + "adjust table.file.max. Want to avoid the situation where only merging minor compactions occur."),
+      "minimum ratio of total input size to maximum input file size for running"
+          + " a major compactionWhen adjusting this property you may want to also"
+          + " adjust table.file.max. Want to avoid the situation where only merging"
+          + " minor compactions occur."),
   TABLE_MAJC_COMPACTALL_IDLETIME("table.compaction.major.everything.idle", "1h",
       PropertyType.TIMEDURATION,
-      "After a tablet has been idle (no mutations) for this time period it may have all "
-          + "of its files compacted into one. There is no guarantee an idle tablet will be compacted. "
-          + "Compactions of idle tablets are only started when regular compactions are not running. Idle "
-          + "compactions only take place for tablets that have one or more files."),
+      "After a tablet has been idle (no mutations) for this time period it may"
+          + " have all of its files compacted into one. There is no guarantee an idle"
+          + " tablet will be compacted. Compactions of idle tablets are only started"
+          + " when regular compactions are not running. Idle compactions only take"
+          + " place for tablets that have one or more files."),
   TABLE_SPLIT_THRESHOLD("table.split.threshold", "1G", PropertyType.MEMORY,
       "When combined size of files exceeds this amount a tablet is split."),
   TABLE_MAX_END_ROW_SIZE("table.split.endrow.size.max", "10K", PropertyType.MEMORY,
       "Maximum size of end row"),
   TABLE_MINC_LOGS_MAX("table.compaction.minor.logs.threshold", "3", PropertyType.COUNT,
-      "When there are more than this many write-ahead logs against a tablet, it will be minor compacted. See comment for property tserver.memory.maps.max"),
+      "When there are more than this many write-ahead logs against a tablet, it"
+          + " will be minor compacted. See comment for property" + " tserver.memory.maps.max"),
   TABLE_MINC_COMPACT_IDLETIME("table.compaction.minor.idle", "5m", PropertyType.TIMEDURATION,
       "After a tablet has been idle (no mutations) for this time period it may have its "
           + "in-memory map flushed to disk in a minor compaction. There is no guarantee an idle "
           + "tablet will be compacted."),
   TABLE_MINC_MAX_MERGE_FILE_SIZE("table.compaction.minor.merge.file.size.max", "0",
       PropertyType.MEMORY,
-      "The max file size used for a merging minor compaction. The default value of 0 disables a max file size."),
+      "The max file size used for a merging minor compaction. The default value"
+          + " of 0 disables a max file size."),
   TABLE_SCAN_MAXMEM("table.scan.max.memory", "512K", PropertyType.MEMORY,
       "The maximum amount of memory that will be used to cache results of a client query/scan. "
           + "Once this limit is reached, the buffered data is sent to the client."),
@@ -571,85 +660,104 @@ public enum Property {
       "Change the type of file a table writes"),
   TABLE_LOAD_BALANCER("table.balancer",
       "org.apache.accumulo.server.master.balancer.DefaultLoadBalancer", PropertyType.STRING,
-      "This property can be set to allow the LoadBalanceByTable load balancer to change the called Load Balancer for this table"),
+      "This property can be set to allow the LoadBalanceByTable load balancer"
+          + " to change the called Load Balancer for this table"),
   TABLE_FILE_COMPRESSION_TYPE("table.file.compress.type", "gz", PropertyType.STRING,
       "One of gz,snappy,lzo,none"),
   TABLE_FILE_COMPRESSED_BLOCK_SIZE("table.file.compress.blocksize", "100K", PropertyType.MEMORY,
-      "Similar to the hadoop io.seqfile.compress.blocksize setting, so that files have better query performance. The maximum value for this is "
-          + Integer.MAX_VALUE
-          + ". (This setting is the size threshold prior to compression, and applies even compression is disabled.)"),
+      "Similar to the hadoop io.seqfile.compress.blocksize setting, so that"
+          + " files have better query performance. The maximum value for this is "
+          + Integer.MAX_VALUE + ". (This setting is the size threshold prior to"
+          + " compression, and applies even compression is disabled.)"),
   TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX("table.file.compress.blocksize.index", "128K",
       PropertyType.MEMORY,
-      "Determines how large index blocks can be in files that support multilevel indexes. The maximum value for this is "
-          + Integer.MAX_VALUE + "."
-          + " (This setting is the size threshold prior to compression, and applies even compression is disabled.)"),
+      "Determines how large index blocks can be in files that support"
+          + " multilevel indexes. The maximum value for this is " + Integer.MAX_VALUE
+          + ". (This setting is the size threshold prior to compression, and applies"
+          + " even compression is disabled.)"),
   TABLE_FILE_BLOCK_SIZE("table.file.blocksize", "0B", PropertyType.MEMORY,
-      "Overrides the hadoop dfs.block.size setting so that files have better query performance. The maximum value for this is "
-          + Integer.MAX_VALUE),
+      "Overrides the hadoop dfs.block.size setting so that files have better"
+          + " query performance. The maximum value for this is " + Integer.MAX_VALUE),
   TABLE_FILE_REPLICATION("table.file.replication", "0", PropertyType.COUNT,
       "Determines how many replicas to keep of a tables' files in HDFS. "
           + "When this value is LTE 0, HDFS defaults are used."),
   TABLE_FILE_MAX("table.file.max", "15", PropertyType.COUNT,
-      "Determines the max # of files each tablet in a table can have. When adjusting this property you may want to consider adjusting"
-          + " table.compaction.major.ratio also. Setting this property to 0 will make it default to tserver.scan.files.open.max-1, this will prevent a"
-          + " tablet from having more files than can be opened. Setting this property low may throttle ingest and increase query performance."),
+      "Determines the max # of files each tablet in a table can have. When"
+          + " adjusting this property you may want to consider adjusting"
+          + " table.compaction.major.ratio also. Setting this property to 0 will make"
+          + " it default to tserver.scan.files.open.max-1, this will prevent a tablet"
+          + " from having more files than can be opened. Setting this property low may"
+          + " throttle ingest and increase query performance."),
   @Deprecated
   TABLE_WALOG_ENABLED("table.walog.enabled", "true", PropertyType.BOOLEAN,
       "This setting is deprecated.  Use table.durability=none instead."),
   TABLE_BLOOM_ENABLED("table.bloom.enabled", "false", PropertyType.BOOLEAN,
       "Use bloom filters on this table."),
   TABLE_BLOOM_LOAD_THRESHOLD("table.bloom.load.threshold", "1", PropertyType.COUNT,
-      "This number of seeks that would actually use a bloom filter must occur before a file's bloom filter is loaded."
-          + " Set this to zero to initiate loading of bloom filters when a file is opened."),
+      "This number of seeks that would actually use a bloom filter must occur"
+          + " before a file's bloom filter is loaded. Set this to zero to initiate"
+          + " loading of bloom filters when a file is opened."),
   TABLE_BLOOM_SIZE("table.bloom.size", "1048576", PropertyType.COUNT,
       "Bloom filter size, as number of keys."),
   TABLE_BLOOM_ERRORRATE("table.bloom.error.rate", "0.5%", PropertyType.FRACTION,
       "Bloom filter error rate."),
   TABLE_BLOOM_KEY_FUNCTOR("table.bloom.key.functor",
       "org.apache.accumulo.core.file.keyfunctor.RowFunctor", PropertyType.CLASSNAME,
-      "A function that can transform the key prior to insertion and check of bloom filter. org.apache.accumulo.core.file.keyfunctor.RowFunctor,"
-          + ",org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor, and org.apache.accumulo.core.file.keyfunctor.ColumnQualifierFunctor are"
-          + " allowable values. One can extend any of the above mentioned classes to perform specialized parsing of the key. "),
+      "A function that can transform the key prior to insertion and check of"
+          + " bloom filter. org.apache.accumulo.core.file.keyfunctor.RowFunctor,"
+          + " org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor, and"
+          + " org.apache.accumulo.core.file.keyfunctor.ColumnQualifierFunctor are"
+          + " allowable values. One can extend any of the above mentioned classes to"
+          + " perform specialized parsing of the key. "),
   TABLE_BLOOM_HASHTYPE("table.bloom.hash.type", "murmur", PropertyType.STRING,
       "The bloom filter hash type"),
   TABLE_DURABILITY("table.durability", "sync", PropertyType.DURABILITY,
-      "The durability used to write to the write-ahead log."
-          + " Legal values are: none, which skips the write-ahead log; "
-          + "log, which sends the data to the write-ahead log, but does nothing to make it durable; "
-          + "flush, which pushes data to the file system; and "
-          + "sync, which ensures the data is written to disk."),
+      "The durability used to write to the write-ahead log. Legal values are:"
+          + " none, which skips the write-ahead log; log, which sends the data to the"
+          + " write-ahead log, but does nothing to make it durable; flush, which pushes"
+          + " data to the file system; and sync, which ensures the data is written to disk."),
   TABLE_FAILURES_IGNORE("table.failures.ignore", "false", PropertyType.BOOLEAN,
-      "If you want queries for your table to hang or fail when data is missing from the system, "
-          + "then set this to false. When this set to true missing data will be reported but queries "
-          + "will still run possibly returning a subset of the data."),
+      "If you want queries for your table to hang or fail when data is missing"
+          + " from the system, then set this to false. When this set to true missing"
+          + " data will be reported but queries will still run possibly returning a"
+          + " subset of the data."),
   TABLE_DEFAULT_SCANTIME_VISIBILITY("table.security.scan.visibility.default", "",
       PropertyType.STRING,
-      "The security label that will be assumed at scan time if an entry does not have a visibility set.\n"
-          + "Note: An empty security label is displayed as []. The scan results will show an empty visibility even if "
-          + "the visibility from this setting is applied to the entry.\n"
-          + "CAUTION: If a particular key has an empty security label AND its table's default visibility is also empty, "
-          + "access will ALWAYS be granted for users with permission to that table. Additionally, if this field is changed, "
-          + "all existing data with an empty visibility label will be interpreted with the new label on the next scan."),
+      "The security label that will be assumed at scan time if an entry does"
+          + " not have a visibility expression.\n"
+          + "Note: An empty security label is displayed as []. The scan results"
+          + " will show an empty visibility even if the visibility from this"
+          + " setting is applied to the entry.\n"
+          + "CAUTION: If a particular key has an empty security label AND its"
+          + " table's default visibility is also empty, access will ALWAYS be"
+          + " granted for users with permission to that table. Additionally, if this"
+          + " field is changed, all existing data with an empty visibility label"
+          + " will be interpreted with the new label on the next scan."),
   TABLE_LOCALITY_GROUPS("table.groups.enabled", "", PropertyType.STRING,
       "A comma separated list of locality group names to enable for this table."),
   TABLE_CONSTRAINT_PREFIX("table.constraint.", null, PropertyType.PREFIX,
-      "Properties in this category are per-table properties that add constraints to a table. "
-          + "These properties start with the category prefix, followed by a number, and their values "
-          + "correspond to a fully qualified Java class that implements the Constraint interface.\n"
-          + "For example:\ntable.constraint.1 = org.apache.accumulo.core.constraints.MyCustomConstraint\n"
-          + "and:\ntable.constraint.2 = my.package.constraints.MySecondConstraint"),
+      "Properties in this category are per-table properties that add"
+          + " constraints to a table. These properties start with the category"
+          + " prefix, followed by a number, and their values correspond to a fully"
+          + " qualified Java class that implements the Constraint interface.\n" + "For example:\n"
+          + "table.constraint.1 = org.apache.accumulo.core.constraints.MyCustomConstraint\n"
+          + "and:\n" + " table.constraint.2 = my.package.constraints.MySecondConstraint"),
   TABLE_INDEXCACHE_ENABLED("table.cache.index.enable", "true", PropertyType.BOOLEAN,
       "Determines whether index cache is enabled."),
   TABLE_BLOCKCACHE_ENABLED("table.cache.block.enable", "false", PropertyType.BOOLEAN,
       "Determines whether file block cache is enabled."),
   TABLE_ITERATOR_PREFIX("table.iterator.", null, PropertyType.PREFIX,
-      "Properties in this category specify iterators that are applied at various stages (scopes) of interaction "
-          + "with a table. These properties start with the category prefix, followed by a scope (minc, majc, scan, etc.), "
-          + "followed by a period, followed by a name, as in table.iterator.scan.vers, or table.iterator.scan.custom. "
-          + "The values for these properties are a number indicating the ordering in which it is applied, and a class name "
-          + "such as:\n table.iterator.scan.vers = 10,org.apache.accumulo.core.iterators.VersioningIterator\n "
-          + "These iterators can take options if additional properties are set that look like this property, "
-          + "but are suffixed with a period, followed by 'opt' followed by another period, and a property name.\n"
+      "Properties in this category specify iterators that are applied at"
+          + " various stages (scopes) of interaction with a table. These properties"
+          + " start with the category prefix, followed by a scope (minc, majc, scan,"
+          + " etc.), followed by a period, followed by a name, as in"
+          + " table.iterator.scan.vers, or table.iterator.scan.custom. The values for"
+          + " these properties are a number indicating the ordering in which it is"
+          + " applied, and a class name such as:\n"
+          + "table.iterator.scan.vers = 10,org.apache.accumulo.core.iterators.VersioningIterator\n"
+          + "These iterators can take options if additional properties are set that"
+          + " look like this property, but are suffixed with a period, followed by 'opt'"
+          + " followed by another period, and a property name.\n"
           + "For example, table.iterator.minc.vers.opt.maxVersions = 3"),
   TABLE_ITERATOR_SCAN_PREFIX(TABLE_ITERATOR_PREFIX.getKey() + IteratorScope.scan.name() + ".", null,
       PropertyType.PREFIX, "Convenience prefix to find options for the scan iterator scope"),
@@ -658,12 +766,15 @@ public enum Property {
   TABLE_ITERATOR_MAJC_PREFIX(TABLE_ITERATOR_PREFIX.getKey() + IteratorScope.majc.name() + ".", null,
       PropertyType.PREFIX, "Convenience prefix to find options for the majc iterator scope"),
   TABLE_LOCALITY_GROUP_PREFIX("table.group.", null, PropertyType.PREFIX,
-      "Properties in this category are per-table properties that define locality groups in a table. These properties start "
-          + "with the category prefix, followed by a name, followed by a period, and followed by a property for that group.\n"
-          + "For example table.group.group1=x,y,z sets the column families for a group called group1. Once configured, "
-          + "group1 can be enabled by adding it to the list of groups in the "
-          + TABLE_LOCALITY_GROUPS.getKey() + " property.\n"
-          + "Additional group options may be specified for a named group by setting table.group.<name>.opt.<key>=<value>."),
+      "Properties in this category are per-table properties that define"
+          + " locality groups in a table. These properties start with the category"
+          + " prefix, followed by a name, followed by a period, and followed by a"
+          + " property for that group.\n"
+          + "For example table.group.group1=x,y,z sets the column families for a"
+          + " group called group1. Once configured, group1 can be enabled by adding"
+          + " it to the list of groups in the " + TABLE_LOCALITY_GROUPS.getKey() + " property.\n"
+          + "Additional group options may be specified for a named group by setting"
+          + " table.group.<name>.opt.<key>=<value>."),
   TABLE_FORMATTER_CLASS("table.formatter", DefaultFormatter.class.getName(), PropertyType.STRING,
       "The Formatter class to apply on results in the shell"),
   TABLE_INTERPRETER_CLASS("table.interepreter", DefaultScanInterpreter.class.getName(),
@@ -679,42 +790,57 @@ public enum Property {
   TABLE_REPLICATION("table.replication", "false", PropertyType.BOOLEAN,
       "Is replication enabled for the given table"),
   TABLE_REPLICATION_TARGET("table.replication.target.", null, PropertyType.PREFIX,
-      "Enumerate a mapping of other systems which this table should "
-          + "replicate their data to. The key suffix is the identifying cluster name and the value is an identifier for a location on the target system, "
-          + "e.g. the ID of the table on the target to replicate to"),
+      "Enumerate a mapping of other systems which this table should replicate"
+          + " their data to. The key suffix is the identifying cluster name and the"
+          + " value is an identifier for a location on the target system,"
+          + " e.g. the ID of the table on the target to replicate to"),
   @Experimental
   TABLE_VOLUME_CHOOSER("table.volume.chooser", "org.apache.accumulo.server.fs.RandomVolumeChooser",
       PropertyType.CLASSNAME,
-      "The class that will be used to select which volume will be used to create new files for this table."),
+      "The class that will be used to select which volume will be used to"
+          + " create new files for this table."),
   TABLE_SAMPLER("table.sampler", "", PropertyType.CLASSNAME,
-      "The name of a class that implements org.apache.accumulo.core.Sampler.  Setting this option enables storing a sample of data which can be scanned."
-          + "  Always having a current sample can useful for query optimization and data comprehension.   After enabling sampling for an existing table, a compaction "
-          + "is needed to compute the sample for existing data.  The compact command in the shell has an option to only compact files without sample data."),
+      "The name of a class that implements org.apache.accumulo.core.Sampler."
+          + " Setting this option enables storing a sample of data which can be"
+          + " scanned. Always having a current sample can useful for query optimization"
+          + " and data comprehension. After enabling sampling for an existing table,"
+          + " a compaction is needed to compute the sample for existing data. The"
+          + " compact command in the shell has an option to only compact files without"
+          + " sample data."),
   TABLE_SAMPLER_OPTS("table.sampler.opt.", null, PropertyType.PREFIX,
-      "The property is used to set options for a sampler.  If a sample had two options like hasher and modulous, then the two properties "
-          + "table.sampler.opt.hasher=${hash algorithm} and table.sampler.opt.modulous=${mod} would be set."),
+      "The property is used to set options for a sampler. If a sample had two"
+          + " options like hasher and modulous, then the two properties"
+          + " table.sampler.opt.hasher=${hash algorithm} and"
+          + " table.sampler.opt.modulous=${mod} would be set."),
   TABLE_SUSPEND_DURATION("table.suspend.duration", "0s", PropertyType.TIMEDURATION,
-      "For tablets belonging to this table: When a tablet server dies, allow the tablet server this duration to revive before reassigning its tablets"
-          + "to other tablet servers."),
+      "For tablets belonging to this table: When a tablet server dies, allow"
+          + " the tablet server this duration to revive before reassigning its tablets"
+          + " to other tablet servers."),
 
   // VFS ClassLoader properties
   VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY(
       AccumuloVFSClassLoader.VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY, "", PropertyType.STRING,
-      "Configuration for a system level vfs classloader. Accumulo jar can be configured here and loaded out of HDFS."),
+      "Configuration for a system level vfs classloader. Accumulo jar can be"
+          + " configured here and loaded out of HDFS."),
   VFS_CONTEXT_CLASSPATH_PROPERTY(AccumuloVFSClassLoader.VFS_CONTEXT_CLASSPATH_PROPERTY, null,
       PropertyType.PREFIX,
-      "Properties in this category are define a classpath. These properties start  with the category prefix, followed by a context name. "
-          + "The value is a comma seperated list of URIs. Supports full regex on filename alone. For example, "
-          + "general.vfs.context.classpath.cx1=hdfs://nn1:9902/mylibdir/*.jar. "
-          + "You can enable post delegation for a context, which will load classes from the context first instead of the parent first. "
-          + "Do this by setting general.vfs.context.classpath.<name>.delegation=post, where <name> is your context name"
-          + "If delegation is not specified, it defaults to loading from parent classloader first."),
+      "Properties in this category are define a classpath. These properties"
+          + " start  with the category prefix, followed by a context name. The value is"
+          + " a comma seperated list of URIs. Supports full regex on filename alone."
+          + " For example, general.vfs.context.classpath.cx1=hdfs://nn1:9902/mylibdir/*.jar."
+          + " You can enable post delegation for a context, which will load classes from the"
+          + " context first instead of the parent first. Do this by setting"
+          + " general.vfs.context.classpath.<name>.delegation=post, where <name> is"
+          + " your context name. If delegation is not specified, it defaults to loading"
+          + " from parent classloader first."),
   @Interpolated
   VFS_CLASSLOADER_CACHE_DIR(AccumuloVFSClassLoader.VFS_CACHE_DIR,
       "${java.io.tmpdir}" + File.separator + "accumulo-vfs-cache-${user.name}",
       PropertyType.ABSOLUTEPATH,
-      "Directory to use for the vfs cache. The cache will keep a soft reference to all of the classes loaded in the VM."
-          + " This should be on local disk on each node with sufficient space. It defaults to ${java.io.tmpdir}/accumulo-vfs-cache-${user.name}"),
+      "Directory to use for the vfs cache. The cache will keep a soft reference"
+          + " to all of the classes loaded in the VM. This should be on local disk on"
+          + " each node with sufficient space. It defaults to"
+          + " ${java.io.tmpdir}/accumulo-vfs-cache-${user.name}"),
 
   @Interpolated
   @Experimental
@@ -735,7 +861,8 @@ public enum Property {
   REPLICATION_PEER_KEYTAB("replication.peer.keytab.", null, PropertyType.PREFIX,
       "The keytab to use when authenticating with the given peer"),
   REPLICATION_NAME("replication.name", "", PropertyType.STRING,
-      "Name of this cluster with respect to replication. Used to identify this instance from other peers"),
+      "Name of this cluster with respect to replication. Used to identify this"
+          + " instance from other peers"),
   REPLICATION_MAX_WORK_QUEUE("replication.max.work.queue", "1000", PropertyType.COUNT,
       "Upper bound of the number of files queued for replication"),
   REPLICATION_WORK_ASSIGNMENT_SLEEP("replication.work.assignment.sleep", "30s",
@@ -745,7 +872,8 @@ public enum Property {
   REPLICATION_RECEIPT_SERVICE_PORT("replication.receipt.service.port", "10002", PropertyType.PORT,
       "Listen port used by thrift service in tserver listening for replication"),
   REPLICATION_WORK_ATTEMPTS("replication.work.attempts", "10", PropertyType.COUNT,
-      "Number of attempts to try to replicate some data before giving up and letting it naturally be retried later"),
+      "Number of attempts to try to replicate some data before giving up and"
+          + " letting it naturally be retried later"),
   REPLICATION_MIN_THREADS("replication.receiver.min.threads", "1", PropertyType.COUNT,
       "Minimum number of threads for replication"),
   REPLICATION_THREADCHECK("replication.receiver.threadcheck.time", "30s", PropertyType.TIMEDURATION,
@@ -759,14 +887,17 @@ public enum Property {
       "Amount of time to wait before the replication work loop begins in the master."),
   REPLICATION_WORK_PROCESSOR_DELAY("replication.work.processor.delay", "0s",
       PropertyType.TIMEDURATION,
-      "Amount of time to wait before first checking for replication work, not useful outside of tests"),
+      "Amount of time to wait before first checking for replication work, not"
+          + " useful outside of tests"),
   REPLICATION_WORK_PROCESSOR_PERIOD("replication.work.processor.period", "0s",
       PropertyType.TIMEDURATION,
-      "Amount of time to wait before re-checking for replication work, not useful outside of tests"),
+      "Amount of time to wait before re-checking for replication work, not"
+          + " useful outside of tests"),
   REPLICATION_TRACE_PERCENT("replication.trace.percent", "0.1", PropertyType.FRACTION,
       "The sampling percentage to use for replication traces"),
   REPLICATION_RPC_TIMEOUT("replication.rpc.timeout", "2m", PropertyType.TIMEDURATION,
-      "Amount of time for a single replication RPC call to last before failing the attempt. See replication.work.attempts."),
+      "Amount of time for a single replication RPC call to last before failing"
+          + " the attempt. See replication.work.attempts."),
 
   ;
 
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java b/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java
index 6f7cfd9..0fc500a 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java
@@ -41,45 +41,56 @@ public enum PropertyType {
   PREFIX(null, Predicates.<String> alwaysFalse(), null),
 
   TIMEDURATION("duration", boundedUnits(0, Long.MAX_VALUE, true, "", "ms", "s", "m", "h", "d"),
-      "A non-negative integer optionally followed by a unit of time (whitespace disallowed), as in 30s.\n"
-          + "If no unit of time is specified, seconds are assumed. Valid units are 'ms', 's', 'm', 'h' for milliseconds, seconds, minutes, and hours.\n"
-          + "Examples of valid durations are '600', '30s', '45m', '30000ms', '3d', and '1h'.\n"
-          + "Examples of invalid durations are '1w', '1h30m', '1s 200ms', 'ms', '', and 'a'.\n"
-          + "Unless otherwise stated, the max value for the duration represented in milliseconds is "
-          + Long.MAX_VALUE),
+      "A non-negative integer optionally followed by a unit of time (whitespace"
+          + " disallowed), as in 30s.\n"
+          + "If no unit of time is specified, seconds are assumed. Valid units"
+          + " are 'ms', 's', 'm', 'h' for milliseconds, seconds," + " minutes, and" + " hours.\n"
+          + "Examples of valid durations are '600', '30s', '45m', '30000ms'," + " '3d', and '1h'.\n"
+          + "Examples of invalid durations are '1w', '1h30m', '1s 200ms', 'ms', '',"
+          + " and 'a'.\nUnless otherwise stated, the max value for the duration"
+          + " represented in milliseconds is " + Long.MAX_VALUE),
 
   MEMORY("memory", boundedUnits(0, Long.MAX_VALUE, false, "", "B", "K", "M", "G"),
-      "A positive integer optionally followed by a unit of memory (whitespace disallowed), as in 2G.\n"
-          + "If no unit is specified, bytes are assumed. Valid units are 'B', 'K', 'M', 'G', for bytes, kilobytes, megabytes, and gigabytes.\n"
+      "A positive integer optionally followed by a unit of memory (whitespace"
+          + " disallowed), as in 2G.\n"
+          + "If no unit is specified, bytes are assumed. Valid units are 'B', 'K',"
+          + " 'M', 'G', for bytes, kilobytes, megabytes, and gigabytes.\n"
           + "Examples of valid memories are '1024', '20B', '100K', '1500M', '2G'.\n"
-          + "Examples of invalid memories are '1M500K', '1M 2K', '1MB', '1.5G', '1,024K', '', and 'a'.\n"
-          + "Unless otherwise stated, the max value for the memory represented in bytes is "
+          + "Examples of invalid memories are '1M500K', '1M 2K', '1MB', '1.5G',"
+          + " '1,024K', '', and 'a'.\n"
+          + "Unless otherwise stated, the max value for the memory represented in" + " bytes is "
           + Long.MAX_VALUE),
 
   HOSTLIST("host list",
       new Matches(
           "[\\w-]+(?:\\.[\\w-]+)*(?:\\:\\d{1,5})?(?:,[\\w-]+(?:\\.[\\w-]+)*(?:\\:\\d{1,5})?)*"),
       "A comma-separated list of hostnames or ip addresses, with optional port numbers.\n"
-          + "Examples of valid host lists are 'localhost:2000,www.example.com,10.10.1.1:500' and 'localhost'.\n"
+          + "Examples of valid host lists are"
+          + " 'localhost:2000,www.example.com,10.10.1.1:500' and 'localhost'.\n"
           + "Examples of invalid host lists are '', ':1000', and 'localhost:80000'"),
 
   @SuppressWarnings("unchecked")
   PORT("port",
       Predicates.or(new Bounds(1024, 65535), in(true, "0"), new PortRange("\\d{4,5}-\\d{4,5}")),
-      "An positive integer in the range 1024-65535 (not already in use or specified elsewhere in the configuration),\n"
-          + "zero to indicate any open ephemeral port, or a range of positive integers specified as M-N"),
+      "An positive integer in the range 1024-65535 (not already in use or"
+          + " specified elsewhere in the configuration),\n"
+          + "zero to indicate any open ephemeral port, or a range of positive"
+          + " integers specified as M-N"),
 
   COUNT("count", new Bounds(0, Integer.MAX_VALUE),
       "A non-negative integer in the range of 0-" + Integer.MAX_VALUE),
 
   FRACTION("fraction/percentage", new FractionPredicate(),
-      "A floating point number that represents either a fraction or, if suffixed with the '%' character, a percentage.\n"
-          + "Examples of valid fractions/percentages are '10', '1000%', '0.05', '5%', '0.2%', '0.0005'.\n"
-          + "Examples of invalid fractions/percentages are '', '10 percent', 'Hulk Hogan'"),
+      "A floating point number that represents either a fraction or, if"
+          + " suffixed with the '%' character, a percentage.\n"
+          + "Examples of valid fractions/percentages are '10', '1000%', '0.05',"
+          + " '5%', '0.2%', '0.0005'.\n"
+          + "Examples of invalid fractions/percentages are '', '10 percent'," + " 'Hulk Hogan'"),
 
   PATH("path", Predicates.<String> alwaysTrue(),
-      "A string that represents a filesystem path, which can be either relative or absolute to some directory. The filesystem depends on the property. The "
-          + "following environment variables will be substituted: "
+      "A string that represents a filesystem path, which can be either relative"
+          + " or absolute to some directory. The filesystem depends on the property."
+          + " The following environment variables will be substituted: "
           + Constants.PATH_PROPERTY_ENV_VARS),
 
   ABSOLUTEPATH("absolute path", new Predicate<String>() {
@@ -87,7 +98,8 @@ public enum PropertyType {
     public boolean apply(final String input) {
       return input == null || input.trim().isEmpty() || new Path(input.trim()).isAbsolute();
     }
-  }, "An absolute filesystem path. The filesystem depends on the property. This is the same as path, but enforces that its root is explicitly specified."),
+  }, "An absolute filesystem path. The filesystem depends on the property."
+      + " This is the same as path, but enforces that its root is explicitly" + " specified."),
 
   CLASSNAME("java class", new Matches("[\\w$.]*"),
       "A fully qualified java class name representing a class on the classpath.\n"
@@ -101,7 +113,8 @@ public enum PropertyType {
       "One of 'none', 'log', 'flush' or 'sync'."),
 
   STRING("string", Predicates.<String> alwaysTrue(),
-      "An arbitrary string of characters whose format is unspecified and interpreted based on the context of the property to which it applies."),
+      "An arbitrary string of characters whose format is unspecified and"
+          + " interpreted based on the context of the property to which it applies."),
 
   BOOLEAN("boolean", in(false, null, "true", "false"),
       "Has a value of either 'true' or 'false' (case-insensitive)"),
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java b/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
index 089f442..14c5183 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
@@ -164,9 +164,8 @@ public class SiteConfiguration extends AccumuloConfiguration {
           }
         }
       } catch (IOException e) {
-        log.warn(
-            "Failed to extract sensitive properties from Hadoop CredentialProvider, falling back to accumulo-site.xml",
-            e);
+        log.warn("Failed to extract sensitive properties from Hadoop"
+            + " CredentialProvider, falling back to accumulo-site.xml", e);
       }
     }
   }
diff --git a/core/src/main/java/org/apache/accumulo/core/data/ConstraintViolationSummary.java b/core/src/main/java/org/apache/accumulo/core/data/ConstraintViolationSummary.java
index 59fe632..e04a8ff 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/ConstraintViolationSummary.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/ConstraintViolationSummary.java
@@ -78,7 +78,8 @@ public class ConstraintViolationSummary {
   @Override
   public String toString() {
     return String.format(
-        "ConstraintViolationSummary(constrainClass:%s, violationCode:%d, violationDescription:%s, numberOfViolatingMutations:%d)",
+        "ConstraintViolationSummary(constrainClass:%s, violationCode:%d,"
+            + " violationDescription:%s, numberOfViolatingMutations:%d)",
         constrainClass, violationCode, violationDescription, numberOfViolatingMutations);
   }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/data/impl/TabletIdImpl.java b/core/src/main/java/org/apache/accumulo/core/data/impl/TabletIdImpl.java
index fb0b99a..81486e0 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/impl/TabletIdImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/impl/TabletIdImpl.java
@@ -28,7 +28,9 @@ public class TabletIdImpl implements TabletId {
   private KeyExtent ke;
 
   @SuppressWarnings("deprecation")
-  public static final Function<org.apache.accumulo.core.data.KeyExtent,TabletId> KE_2_TID_OLD = new Function<org.apache.accumulo.core.data.KeyExtent,TabletId>() {
+  // @formatter:off
+  public static final Function<org.apache.accumulo.core.data.KeyExtent,TabletId> KE_2_TID_OLD =
+    new Function<org.apache.accumulo.core.data.KeyExtent,TabletId>() {
     @Override
     public TabletId apply(org.apache.accumulo.core.data.KeyExtent input) {
       // the following if null check is to appease findbugs... grumble grumble spent a good part of
@@ -40,9 +42,12 @@ public class TabletIdImpl implements TabletId {
       return new TabletIdImpl(input);
     }
   };
+  // @formatter:on
 
   @SuppressWarnings("deprecation")
-  public static final Function<TabletId,org.apache.accumulo.core.data.KeyExtent> TID_2_KE_OLD = new Function<TabletId,org.apache.accumulo.core.data.KeyExtent>() {
+  // @formatter:off
+  public static final Function<TabletId,org.apache.accumulo.core.data.KeyExtent> TID_2_KE_OLD =
+    new Function<TabletId,org.apache.accumulo.core.data.KeyExtent>() {
     @Override
     public org.apache.accumulo.core.data.KeyExtent apply(TabletId input) {
       if (input == null)
@@ -50,8 +55,8 @@ public class TabletIdImpl implements TabletId {
       return new org.apache.accumulo.core.data.KeyExtent(input.getTableId(), input.getEndRow(),
           input.getPrevEndRow());
     }
-
   };
+  // @formatter:on
 
   @Deprecated
   public TabletIdImpl(org.apache.accumulo.core.data.KeyExtent ke) {
diff --git a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
index 7300d7c..5608d18 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
@@ -137,7 +137,10 @@ public abstract class FileOperations {
    */
   @SuppressWarnings("unchecked")
   public NeedsFile<NeedsRange<OpenScanReaderOperationBuilder>> newScanReaderBuilder() {
-    return (NeedsFile<NeedsRange<OpenScanReaderOperationBuilder>>) (NeedsFile<?>) new OpenScanReaderOperation();
+    // @formatter:off
+    return (NeedsFile<NeedsRange<OpenScanReaderOperationBuilder>>)
+      (NeedsFile<?>) new OpenScanReaderOperation();
+    // @formatter:on
   }
 
   /**
@@ -173,7 +176,10 @@ public abstract class FileOperations {
   /**
    * Options common to all FileOperations.
    */
-  protected static class FileAccessOperation<SubclassType extends FileAccessOperation<SubclassType>> {
+  // @formatter:off
+  protected static class
+    FileAccessOperation<SubclassType extends FileAccessOperation<SubclassType>> {
+  // @formatter:on
     private AccumuloConfiguration tableConfiguration;
 
     private String filename;
diff --git a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/LruBlockCache.java b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/LruBlockCache.java
index e28a7bc..b8a29ac 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/LruBlockCache.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/LruBlockCache.java
@@ -410,7 +410,8 @@ public class LruBlockCache implements BlockCache, HeapSize {
       float memoryMB = ((float) bucketMemory.totalSize()) / ((float) (1024 * 1024));
 
       log.trace(
-          "Block cache LRU eviction completed. Freed {} bytes. Priority Sizes: Single={}MB ({}), Multi={}MB ({}), Memory={}MB ({})",
+          "Block cache LRU eviction completed. Freed {} bytes. Priority Sizes:"
+              + " Single={}MB ({}), Multi={}MB ({}), Memory={}MB ({})",
           bytesFreed, singleMB, bucketSingle.totalSize(), multiMB, bucketMulti.totalSize(),
           memoryMB, bucketMemory.totalSize());
 
@@ -599,8 +600,10 @@ public class LruBlockCache implements BlockCache, HeapSize {
     float freeMB = ((float) freeSize) / ((float) (1024 * 1024));
     float maxMB = ((float) maxSize) / ((float) (1024 * 1024));
     log.debug(
-        "Cache Stats: Sizes: Total={}MB ({}), Free={}MB ({}), Max={}MB ({}), Counts: Blocks={}, Access={}, Hit={}, Miss={}, Evictions={}, Evicted={},"
-            + "Ratios: Hit Ratio={}%, Miss Ratio={}%, Evicted/Run={}, Duplicate Reads={}",
+        "Cache Stats: Sizes: Total={}MB ({}), Free={}MB ({}), Max={}MB"
+            + " ({}), Counts: Blocks={}, Access={}, Hit={}, Miss={}, Evictions={},"
+            + " Evicted={}, Ratios: Hit Ratio={}%, Miss Ratio={}%, Evicted/Run={},"
+            + " Duplicate Reads={}",
         sizeMB, totalSize, freeMB, freeSize, maxMB, maxSize, size(), stats.getRequestCount(),
         stats.getHitCount(), stats.getMissCount(), stats.getEvictionCount(),
         stats.getEvictedCount(), stats.getHitRatio() * 100, stats.getMissRatio() * 100,
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
index 92d3082..64b3501 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -65,8 +65,10 @@ public class CreateEmpty {
     @Parameter(names = {"-c", "--codec"}, description = "the compression codec to use.",
         validateWith = IsSupportedCompressionAlgorithm.class)
     String codec = Compression.COMPRESSION_NONE;
-    @Parameter(description = " <path> { <path> ... } Each path given is a URL. "
-        + "Relative paths are resolved according to the default filesystem defined in your Hadoop configuration, which is usually an HDFS instance.",
+    @Parameter(
+        description = " <path> { <path> ... } Each path given is a URL."
+            + " Relative paths are resolved according to the default filesystem defined in"
+            + " your Hadoop configuration, which is usually an HDFS instance.",
         required = true, validateWith = NamedLikeRFile.class)
     List<String> files = new ArrayList<>();
   }
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/KeyShortener.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/KeyShortener.java
index 9ee17ac..590659d 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/KeyShortener.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/KeyShortener.java
@@ -27,8 +27,9 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.Bytes;
 
-/*
- * Code to shorten keys that will be placed into RFile indexes. This code attempts to find a key thats between two keys that shorter.
+/**
+ * Code to shorten keys that will be placed into RFile indexes. This code attempts to find a key
+ * thats between two keys that shorter.
  */
 public class KeyShortener {
 
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
index bf10e88..14c7018 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
@@ -1385,9 +1385,8 @@ public class RFile {
         ArrayList<ByteSequence> setCF;
 
         if (lcg.columnFamilies == null) {
-          Preconditions.checkState(lcg.isDefaultLG,
-              " Group %s has null families. Only expect default locality group to have null families.",
-              lcg.name);
+          Preconditions.checkState(lcg.isDefaultLG, "Group %s has null families. "
+              + "Only expect default locality group to have null families.", lcg.name);
           setCF = new ArrayList<>();
         } else {
           setCF = new ArrayList<>(lcg.columnFamilies.keySet());
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/BCFile.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/BCFile.java
index bd6120a..778be3b 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/BCFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/BCFile.java
@@ -17,6 +17,8 @@
 
 package org.apache.accumulo.core.file.rfile.bcfile;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 import java.io.ByteArrayOutputStream;
 import java.io.Closeable;
 import java.io.DataInput;
@@ -26,7 +28,6 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -555,8 +556,10 @@ public final class BCFile {
   }
 
   // sha256 of some random data
-  private static final byte[] NO_CPYPTO_KEY = "ce18cf53c4c5077f771249b38033fa14bcb31cca0e5e95a371ee72daa8342ea2"
-      .getBytes(StandardCharsets.UTF_8);
+  // @formatter:off
+  private static final byte[] NO_CPYPTO_KEY =
+    "ce18cf53c4c5077f771249b38033fa14bcb31cca0e5e95a371ee72daa8342ea2".getBytes(UTF_8);
+  // @formatter:on
 
   // This class is used as a place holder in the cache for RFiles that have no crypto....
   private static final BCFileCryptoModuleParameters NO_CRYPTO = new BCFileCryptoModuleParameters() {
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/AggregatingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/AggregatingIterator.java
index e9666d4..e80c24d 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/AggregatingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/AggregatingIterator.java
@@ -43,7 +43,10 @@ import org.slf4j.LoggerFactory;
 public class AggregatingIterator implements SortedKeyValueIterator<Key,Value>, OptionDescriber {
 
   private SortedKeyValueIterator<Key,Value> iterator;
-  private ColumnToClassMapping<org.apache.accumulo.core.iterators.aggregation.Aggregator> aggregators;
+  // @formatter:off
+  private ColumnToClassMapping<org.apache.accumulo.core.iterators.aggregation.Aggregator>
+    aggregators;
+  // @formatter:on
 
   private Key workKey = new Key();
 
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java b/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
index 279917b..be686e1 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
@@ -81,7 +81,10 @@ public abstract class Combiner extends WrappingIterator implements OptionDescrib
 
   protected static final String COLUMNS_OPTION = "columns";
   protected static final String ALL_OPTION = "all";
-  protected static final String REDUCE_ON_FULL_COMPACTION_ONLY_OPTION = "reduceOnFullCompactionOnly";
+  // @formatter:off
+  protected static final String REDUCE_ON_FULL_COMPACTION_ONLY_OPTION =
+    "reduceOnFullCompactionOnly";
+  // @formatter:on
 
   private boolean isMajorCompaction;
   private boolean reduceOnFullCompactionOnly;
@@ -190,9 +193,9 @@ public abstract class Combiner extends WrappingIterator implements OptionDescrib
         loggedMsgCache.get(this.getClass().getName(), new Callable<Boolean>() {
           @Override
           public Boolean call() throws Exception {
-            sawDeleteLog.error(
-                "Combiner of type {} saw a delete during a partial compaction.  This could cause undesired results.  See ACCUMULO-2232.  Will not log subsequent "
-                    + "occurences for at least 1 hour.",
+            sawDeleteLog.error("Combiner of type {} saw a delete during a"
+                + " partial compaction. This could cause undesired results. See"
+                + " ACCUMULO-2232. Will not log subsequent occurences for at least" + " 1 hour.",
                 Combiner.this.getClass().getSimpleName());
             // the value is not used and does not matter
             return Boolean.TRUE;
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java b/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
index 96c1665..8c39fef 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
@@ -100,8 +100,8 @@ public abstract class Filter extends WrappingIterator implements OptionDescriber
   @Override
   public IteratorOptions describeOptions() {
     return new IteratorOptions("filter", "Filter accepts or rejects each Key/Value pair",
-        Collections.singletonMap("negate",
-            "default false keeps k/v that pass accept method, true rejects k/v that pass accept method"),
+        Collections.singletonMap("negate", "default false keeps k/v that pass"
+            + " accept method, true rejects k/v that pass accept method"),
         null);
   }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorEnvironment.java b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorEnvironment.java
index 73cde2a..39a73b6 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorEnvironment.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorEnvironment.java
@@ -55,7 +55,8 @@ public interface IteratorEnvironment {
    *     SortedKeyValueIterator&lt;Key,Value&gt; source;
    *     SortedKeyValueIterator&lt;Key,Value&gt; sampleIter;
    *     &#64;Override
-   *     void init(SortedKeyValueIterator&lt;Key,Value&gt; source, Map&lt;String,String&gt; options, IteratorEnvironment env) {
+   *     void init(SortedKeyValueIterator&lt;Key,Value&gt; source, Map&lt;String,String&gt; options,
+   *       IteratorEnvironment env) {
    *       IteratorEnvironment sampleEnv = env.cloneWithSamplingEnabled();
    *       //do some sanity checks on sampling config
    *       validateSamplingConfiguration(sampleEnv.getSamplerConfiguration());
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
index a8e8128..ce603d3 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
@@ -193,18 +193,24 @@ public class IteratorUtil {
     Collections.sort(iters, new IterInfoComparator());
   }
 
-  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V> loadIterators(
-      IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
-      AccumuloConfiguration conf, IteratorEnvironment env) throws IOException {
+  // @formatter:off
+  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V>
+    loadIterators(
+  // @formatter:on
+          IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
+          AccumuloConfiguration conf, IteratorEnvironment env) throws IOException {
     List<IterInfo> emptyList = Collections.emptyList();
     Map<String,Map<String,String>> emptyMap = Collections.emptyMap();
     return loadIterators(scope, source, extent, conf, emptyList, emptyMap, env);
   }
 
-  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V> loadIterators(
-      IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
-      AccumuloConfiguration conf, List<IteratorSetting> iterators, IteratorEnvironment env)
-      throws IOException {
+  // @formatter:off
+  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V>
+    loadIterators(
+  // @formatter:on
+          IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
+          AccumuloConfiguration conf, List<IteratorSetting> iterators, IteratorEnvironment env)
+          throws IOException {
 
     List<IterInfo> ssiList = new ArrayList<>();
     Map<String,Map<String,String>> ssio = new HashMap<>();
@@ -217,10 +223,13 @@ public class IteratorUtil {
     return loadIterators(scope, source, extent, conf, ssiList, ssio, env, true);
   }
 
-  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V> loadIterators(
-      IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
-      AccumuloConfiguration conf, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio,
-      IteratorEnvironment env) throws IOException {
+  // @formatter:off
+  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V>
+    loadIterators(
+  // @formatter:on
+          IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
+          AccumuloConfiguration conf, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio,
+          IteratorEnvironment env) throws IOException {
     return loadIterators(scope, source, extent, conf, ssiList, ssio, env, true);
   }
 
@@ -246,10 +255,13 @@ public class IteratorUtil {
     }
   }
 
-  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V> loadIterators(
-      IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
-      AccumuloConfiguration conf, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio,
-      IteratorEnvironment env, boolean useAccumuloClassLoader) throws IOException {
+  // @formatter:off
+  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V>
+    loadIterators(
+  // @formatter:on
+          IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
+          AccumuloConfiguration conf, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio,
+          IteratorEnvironment env, boolean useAccumuloClassLoader) throws IOException {
     List<IterInfo> iters = new ArrayList<>(ssiList);
     Map<String,Map<String,String>> allOptions = new HashMap<>();
     parseIteratorConfiguration(scope, iters, ssio, allOptions, conf);
@@ -257,11 +269,14 @@ public class IteratorUtil {
         conf.get(Property.TABLE_CLASSPATH));
   }
 
-  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V> loadIterators(
-      IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
-      AccumuloConfiguration conf, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio,
-      IteratorEnvironment env, boolean useAccumuloClassLoader, String classLoaderContext)
-      throws IOException {
+  // @formatter:off
+  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V>
+    loadIterators(
+  // @formatter:on
+          IteratorScope scope, SortedKeyValueIterator<K,V> source, KeyExtent extent,
+          AccumuloConfiguration conf, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio,
+          IteratorEnvironment env, boolean useAccumuloClassLoader, String classLoaderContext)
+          throws IOException {
     List<IterInfo> iters = new ArrayList<>(ssiList);
     Map<String,Map<String,String>> allOptions = new HashMap<>();
     parseIteratorConfiguration(scope, iters, ssio, allOptions, conf);
@@ -269,18 +284,24 @@ public class IteratorUtil {
         classLoaderContext);
   }
 
-  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V> loadIterators(
-      SortedKeyValueIterator<K,V> source, Collection<IterInfo> iters,
-      Map<String,Map<String,String>> iterOpts, IteratorEnvironment env,
-      boolean useAccumuloClassLoader, String context) throws IOException {
+  // @formatter:off
+  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V>
+    loadIterators(
+  // @formatter:on
+          SortedKeyValueIterator<K,V> source, Collection<IterInfo> iters,
+          Map<String,Map<String,String>> iterOpts, IteratorEnvironment env,
+          boolean useAccumuloClassLoader, String context) throws IOException {
     return loadIterators(source, iters, iterOpts, env, useAccumuloClassLoader, context, null);
   }
 
-  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V> loadIterators(
-      SortedKeyValueIterator<K,V> source, Collection<IterInfo> iters,
-      Map<String,Map<String,String>> iterOpts, IteratorEnvironment env,
-      boolean useAccumuloClassLoader, String context,
-      Map<String,Class<? extends SortedKeyValueIterator<K,V>>> classCache) throws IOException {
+  // @formatter:off
+  public static <K extends WritableComparable<?>,V extends Writable> SortedKeyValueIterator<K,V>
+    loadIterators(
+  // @formatter:on
+          SortedKeyValueIterator<K,V> source, Collection<IterInfo> iters,
+          Map<String,Map<String,String>> iterOpts, IteratorEnvironment env,
+          boolean useAccumuloClassLoader, String context,
+          Map<String,Class<? extends SortedKeyValueIterator<K,V>>> classCache) throws IOException {
     // wrap the source in a SynchronizedIterator in case any of the additional configured iterators
     // want to use threading
     SortedKeyValueIterator<K,V> prev = new SynchronizedIterator<>(source);
@@ -324,9 +345,13 @@ public class IteratorUtil {
   }
 
   @SuppressWarnings("unchecked")
-  private static <K extends WritableComparable<?>,V extends Writable> Class<? extends SortedKeyValueIterator<K,V>> loadClass(
-      boolean useAccumuloClassLoader, String context, IterInfo iterInfo)
-      throws ClassNotFoundException, IOException {
+  // @formatter:off
+  private static
+  <K extends WritableComparable<?>,V extends Writable> Class<? extends SortedKeyValueIterator<K,V>>
+    loadClass(
+  // @formatter:on
+          boolean useAccumuloClassLoader, String context, IterInfo iterInfo)
+          throws ClassNotFoundException, IOException {
     Class<? extends SortedKeyValueIterator<K,V>> clazz;
     if (useAccumuloClassLoader) {
       if (context != null && !context.equals(""))
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/LongCombiner.java b/core/src/main/java/org/apache/accumulo/core/iterators/LongCombiner.java
index a555f7c..0753b47 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/LongCombiner.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/LongCombiner.java
@@ -104,8 +104,8 @@ public abstract class LongCombiner extends TypedValueCombiner<Long> {
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
     io.setName("longcombiner");
-    io.setDescription(
-        "LongCombiner can interpret Values as Longs in a variety of encodings (variable length, fixed length, or string) before combining");
+    io.setDescription("LongCombiner can interpret Values as Longs in a variety of encodings"
+        + " (variable length, fixed length, or string) before combining");
     io.addNamedOption(TYPE, "<VARLEN|FIXEDLEN|STRING|fullClassName>");
     return io;
   }
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/OptionDescriber.java b/core/src/main/java/org/apache/accumulo/core/iterators/OptionDescriber.java
index 6cadb9e..dc897a5 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/OptionDescriber.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/OptionDescriber.java
@@ -55,8 +55,9 @@ public interface OptionDescriber {
      *          is a list of descriptions of additional options that don't have fixed names (null if
      *          unused). The descriptions are intended to describe a category, and the user will
      *          provide parameter names and values in that category; e.g., the FilteringIterator
-     *          needs a list of Filters intended to be named by their priority numbers, so its<br>
-     *          {@code unnamedOptionDescriptions = Collections.singletonList("<filterPriorityNumber> <ageoff|regex|filterClass>")}
+     *          needs a list of Filters intended to be named by their priority numbers, so it's<br>
+     *          {@code unnamedOptionDescriptions = Collections}<br>
+     *          {@code .singletonList("<filterPriorityNumber> <ageoff|regex|filterClass>")}
      */
     public IteratorOptions(String name, String description, Map<String,String> namedOptions,
         List<String> unnamedOptionDescriptions) {
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/TypedValueCombiner.java b/core/src/main/java/org/apache/accumulo/core/iterators/TypedValueCombiner.java
index f22f8c4..7b5925e 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/TypedValueCombiner.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/TypedValueCombiner.java
@@ -209,8 +209,8 @@ public abstract class TypedValueCombiner<V> extends Combiner {
   @Override
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
-    io.addNamedOption(LOSSY,
-        "if true, failed decodes are ignored. Otherwise combiner will error on failed decodes (default false): <TRUE|FALSE>");
+    io.addNamedOption(LOSSY, "if true, failed decodes are ignored. Otherwise"
+        + " combiner will error on failed decodes (default false): <TRUE|FALSE>");
     return io;
   }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/AgeOffFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/AgeOffFilter.java
index a84cfdc..2e83155 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/AgeOffFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/AgeOffFilter.java
@@ -85,8 +85,8 @@ public class AgeOffFilter extends Filter {
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
     io.addNamedOption(TTL, "time to live (milliseconds)");
-    io.addNamedOption(CURRENT_TIME,
-        "if set, use the given value as the absolute time in milliseconds as the current time of day");
+    io.addNamedOption(CURRENT_TIME, "if set, use the given value as the"
+        + " absolute time in milliseconds as the current time of day");
     io.setName("ageoff");
     io.setDescription(
         "AgeOffFilter removes entries with timestamps more than <ttl> milliseconds old");
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/CfCqSliceOpts.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/CfCqSliceOpts.java
index 30e7107..128bffc 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/CfCqSliceOpts.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/CfCqSliceOpts.java
@@ -27,34 +27,38 @@ import org.apache.hadoop.io.Text;
 
 public class CfCqSliceOpts {
   public static final String OPT_MIN_CF = "minCf";
-  public static final String OPT_MIN_CF_DESC = "UTF-8 encoded string representing minimum column family. "
-      + "Optional parameter. If minCf and minCq are undefined, the column slice will start at the first column "
-      + "of each row. If you want to do an exact match on column families, it's more efficient to leave minCf "
-      + "and maxCf undefined and use the scanner's fetchColumnFamily method.";
+  public static final String OPT_MIN_CF_DESC = "UTF-8 encoded string"
+      + " representing minimum column family. Optional parameter. If minCf and minCq"
+      + " are undefined, the column slice will start at the first column of each row."
+      + " If you want to do an exact match on column families, it's more efficient to"
+      + " leave minCf and maxCf undefined and use the scanner's fetchColumnFamily" + " method.";
 
   public static final String OPT_MIN_CQ = "minCq";
-  public static final String OPT_MIN_CQ_DESC = "UTF-8 encoded string representing minimum column qualifier. "
-      + "Optional parameter. If minCf and minCq are undefined, the column slice will start at the first column "
-      + "of each row.";
+  public static final String OPT_MIN_CQ_DESC = "UTF-8 encoded string"
+      + " representing minimum column qualifier. Optional parameter. If minCf and"
+      + " minCq are undefined, the column slice will start at the first column of" + " each row.";
 
   public static final String OPT_MAX_CF = "maxCf";
-  public static final String OPT_MAX_CF_DESC = "UTF-8 encoded string representing maximum column family. "
-      + "Optional parameter. If minCf and minCq are undefined, the column slice will start at the first column "
-      + "of each row. If you want to do an exact match on column families, it's more efficient to leave minCf "
-      + "and maxCf undefined and use the scanner's fetchColumnFamily method.";
+  public static final String OPT_MAX_CF_DESC = "UTF-8 encoded string"
+      + " representing maximum column family. Optional parameter. If minCf and minCq"
+      + " are undefined, the column slice will start at the first column of each row."
+      + " If you want to do an exact match on column families, it's more efficient to"
+      + " leave minCf and maxCf undefined and use the scanner's fetchColumnFamily" + " method.";
 
   public static final String OPT_MAX_CQ = "maxCq";
-  public static final String OPT_MAX_CQ_DESC = "UTF-8 encoded string representing maximum column qualifier. "
-      + "Optional parameter. If maxCf and MaxCq are undefined, the column slice will end at the last column of "
-      + "each row.";
+  public static final String OPT_MAX_CQ_DESC = "UTF-8 encoded string"
+      + " representing maximum column qualifier. Optional parameter. If maxCf and"
+      + " MaxCq are undefined, the column slice will end at the last column of each" + " row.";
 
   public static final String OPT_MIN_INCLUSIVE = "minInclusive";
-  public static final String OPT_MIN_INCLUSIVE_DESC = "UTF-8 encoded string indicating whether to include the "
-      + "minimum column in the slice range. Optional parameter, default is true.";
+  public static final String OPT_MIN_INCLUSIVE_DESC = "UTF-8 encoded string"
+      + " indicating whether to include the minimum column in the slice range."
+      + " Optional parameter, default is true.";
 
   public static final String OPT_MAX_INCLUSIVE = "maxInclusive";
-  public static final String OPT_MAX_INCLUSIVE_DESC = "UTF-8 encoded string indicating whether to include the "
-      + "maximum column in the slice range. Optional parameter, default is true.";
+  public static final String OPT_MAX_INCLUSIVE_DESC = "UTF-8 encoded string"
+      + " indicating whether to include the maximum column in the slice range."
+      + " Optional parameter, default is true.";
 
   Text minCf;
   Text minCq;
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/ColumnAgeOffFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/ColumnAgeOffFilter.java
index 1e2485b..7cd34f7 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/ColumnAgeOffFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/ColumnAgeOffFilter.java
@@ -100,8 +100,8 @@ public class ColumnAgeOffFilter extends Filter {
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
     io.setName("colageoff");
-    io.setDescription(
-        "ColumnAgeOffFilter ages off columns at different rates given a time to live in milliseconds for each column");
+    io.setDescription("ColumnAgeOffFilter ages off columns at different rates"
+        + " given a time to live in milliseconds for each column");
     io.addUnnamedOption("<col fam>[:<col qual>] <Long> (escape non-alphanum chars using %<hex>)");
     return io;
   }
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/ColumnSliceFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/ColumnSliceFilter.java
index bd18fd5..99e5dbd 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/ColumnSliceFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/ColumnSliceFilter.java
@@ -79,8 +79,8 @@ public class ColumnSliceFilter extends Filter {
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
     io.setName("columnSlice");
-    io.setDescription(
-        "The ColumnSliceFilter/Iterator allows you to filter for key/value pairs based on a lexicographic range of column qualifier names");
+    io.setDescription("The ColumnSliceFilter/Iterator allows you to filter for"
+        + " key/value pairs based on a lexicographic range of column qualifier" + " names");
     io.addNamedOption(START_BOUND, "start string in slice");
     io.addNamedOption(END_BOUND, "end string in slice");
     io.addNamedOption(START_INCLUSIVE, "include the start bound in the result set");
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/LargeRowFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/LargeRowFilter.java
index 2592649..6bbcf6c 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/LargeRowFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/LargeRowFilter.java
@@ -256,7 +256,8 @@ public class LargeRowFilter implements SortedKeyValueIterator<Key,Value>, Option
 
   @Override
   public IteratorOptions describeOptions() {
-    String description = "This iterator suppresses rows that exceed a specified number of columns. Once\n"
+    String description = "This iterator suppresses rows that exceed a specified"
+        + " number of columns. Once\n"
         + "a row exceeds the threshold, a marker is emitted and the row is always\n"
         + "suppressed by this iterator after that point in time.\n"
         + " This iterator works in a similar way to the RowDeletingIterator. See its\n"
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/MaxCombiner.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/MaxCombiner.java
index 20632ec..967b224 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/MaxCombiner.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/MaxCombiner.java
@@ -40,8 +40,9 @@ public class MaxCombiner extends LongCombiner {
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
     io.setName("max");
-    io.setDescription(
-        "MaxCombiner interprets Values as Longs and finds their maximum.  A variety of encodings (variable length, fixed length, or string) are available");
+    io.setDescription("MaxCombiner interprets Values as Longs and finds their"
+        + " maximum.  A variety of encodings (variable length, fixed length, or"
+        + " string) are available");
     return io;
   }
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/MinCombiner.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/MinCombiner.java
index 2aa2550..da69f49 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/MinCombiner.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/MinCombiner.java
@@ -40,8 +40,9 @@ public class MinCombiner extends LongCombiner {
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
     io.setName("min");
-    io.setDescription(
-        "MinCombiner interprets Values as Longs and finds their minimum.  A variety of encodings (variable length, fixed length, or string) are available");
+    io.setDescription("MinCombiner interprets Values as Longs and finds their"
+        + " minimum.  A variety of encodings (variable length, fixed length, or"
+        + " string) are available");
     return io;
   }
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
index a4134ea..f65baa6 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
@@ -153,8 +153,8 @@ public class RegExFilter extends Filter {
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
     io.setName("regex");
-    io.setDescription(
-        "The RegExFilter/Iterator allows you to filter for key/value pairs based on regular expressions");
+    io.setDescription("The RegExFilter/Iterator allows you to filter for"
+        + " key/value pairs based on regular expressions");
     io.addNamedOption(RegExFilter.ROW_REGEX, "regular expression on row");
     io.addNamedOption(RegExFilter.COLF_REGEX, "regular expression on column family");
     io.addNamedOption(RegExFilter.COLQ_REGEX, "regular expression on column qualifier");
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/RowEncodingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/RowEncodingIterator.java
index 00a64e9..e4a2825 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/RowEncodingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/RowEncodingIterator.java
@@ -172,8 +172,10 @@ public abstract class RowEncodingIterator
 
   @Override
   public IteratorOptions describeOptions() {
-    String desc = "This iterator encapsulates an entire row of Key/Value pairs into a single Key/Value pair.";
-    String bufferDesc = "Maximum buffer size (in accumulo memory spec) to use for buffering keys before throwing a BufferOverflowException.";
+    String desc = "This iterator encapsulates an entire row of Key/Value pairs"
+        + " into a single Key/Value pair.";
+    String bufferDesc = "Maximum buffer size (in accumulo memory spec) to use"
+        + " for buffering keys before throwing a BufferOverflowException.";
     HashMap<String,String> namedOptions = new HashMap<>();
     namedOptions.put(MAX_BUFFER_SIZE_OPT, bufferDesc);
     return new IteratorOptions(getClass().getSimpleName(), desc, namedOptions, null);
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingArrayCombiner.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingArrayCombiner.java
index 7ee018d..35ee47f 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingArrayCombiner.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingArrayCombiner.java
@@ -128,9 +128,9 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
     io.setName("sumarray");
-    io.setDescription(
-        "SummingArrayCombiner can interpret Values as arrays of Longs using a variety of encodings "
-            + "(arrays of variable length longs or fixed length longs, or comma-separated strings) before summing element-wise.");
+    io.setDescription("SummingArrayCombiner can interpret Values as arrays of"
+        + " Longs using a variety of encodings (arrays of variable length longs or"
+        + " fixed length longs, or comma-separated strings) before summing" + " element-wise.");
     io.addNamedOption(TYPE, "<VARLEN|FIXEDLEN|STRING|fullClassName>");
     return io;
   }
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingCombiner.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingCombiner.java
index cf0da3c..f3e147e 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingCombiner.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingCombiner.java
@@ -39,8 +39,9 @@ public class SummingCombiner extends LongCombiner {
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
     io.setName("sum");
-    io.setDescription(
-        "SummingCombiner interprets Values as Longs and adds them together.  A variety of encodings (variable length, fixed length, or string) are available");
+    io.setDescription("SummingCombiner interprets Values as Longs and adds them"
+        + " together.  A variety of encodings (variable length, fixed length, or"
+        + " string) are available");
     return io;
   }
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
index 9adf32b..fb5022d 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
@@ -142,12 +142,16 @@ abstract public class TransformingIterator extends WrappingIterator implements O
 
   @Override
   public IteratorOptions describeOptions() {
-    String desc = "This iterator allows ranges of key to be transformed (with the exception of row transformations).";
+    String desc = "This iterator allows ranges of key to be transformed (with"
+        + " the exception of row transformations).";
     String authDesc = "Comma-separated list of user's scan authorizations.  "
         + "If excluded or empty, then no visibility check is performed on transformed keys.";
-    String bufferDesc = "Maximum buffer size (in accumulo memory spec) to use for buffering keys before throwing a BufferOverflowException.  "
-        + "Users should keep this limit in mind when deciding what to transform.  That is, if transforming the column family for example, then all "
-        + "keys sharing the same row and column family must fit within this limit (along with their associated values)";
+    String bufferDesc = "Maximum buffer size (in accumulo memory spec) to use"
+        + " for buffering keys before throwing a BufferOverflowException. Users"
+        + " should keep this limit in mind when deciding what to transform. That"
+        + " is, if transforming the column family for example, then all keys"
+        + " sharing the same row and column family must fit within this limit"
+        + " (along with their associated values)";
     HashMap<String,String> namedOptions = new HashMap<>();
     namedOptions.put(AUTH_OPT, authDesc);
     namedOptions.put(MAX_BUFFER_SIZE_OPT, bufferDesc);
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/VisibilityFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/VisibilityFilter.java
index fe8adbc..0b741fd 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/VisibilityFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/VisibilityFilter.java
@@ -112,12 +112,14 @@ public class VisibilityFilter extends Filter implements OptionDescriber {
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
     io.setName("visibilityFilter");
-    io.setDescription(
-        "The VisibilityFilter allows you to filter for key/value pairs by a set of authorizations or filter invalid labels from corrupt files.");
+    io.setDescription("The VisibilityFilter allows you to filter for key/value"
+        + " pairs by a set of authorizations or filter invalid labels from corrupt" + " files.");
     io.addNamedOption(FILTER_INVALID_ONLY,
-        "if 'true', the iterator is instructed to ignore the authorizations and only filter invalid visibility labels (default: false)");
+        "if 'true', the iterator is instructed to ignore the authorizations and"
+            + " only filter invalid visibility labels (default: false)");
     io.addNamedOption(AUTHS,
-        "the serialized set of authorizations to filter against (default: empty string, accepts only entries visible by all)");
+        "the serialized set of authorizations to filter against (default: empty"
+            + " string, accepts only entries visible by all)");
     return io;
   }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
index aaf1163..1a96340 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
@@ -246,8 +246,12 @@ public class MetadataSchema {
 
   /**
    * Holds references to files that need replication
-   * <p>
-   * <code>~replhdfs://localhost:8020/accumulo/wal/tserver+port/WAL stat:local_table_id [] -&gt; protobuf</code>
+   *
+   * <pre>
+   * <code>
+   * ~replhdfs://localhost:8020/accumulo/wal/tserver+port/WAL stat:local_table_id [] -&gt; protobuf
+   * </code>
+   * </pre>
    */
   public static class ReplicationSection {
     public static final Text COLF = new Text("stat");
diff --git a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
index 53f8236..6100c67 100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
@@ -43,8 +43,13 @@ public class ReplicationSchema {
   /**
    * Portion of a file that must be replication to the given target: peer and some identifying
    * location on that peer, e.g. remote table ID
-   * <p>
-   * <code>hdfs://localhost:8020/accumulo/wal/tserver+port/WAL work:serialized_ReplicationTarget [] -&gt; Status Protobuf</code>
+   *
+   * <pre>
+   * <code>
+   * hdfs://localhost:8020/accumulo/wal/tserver+port/WAL work:serialized_ReplicationTarget []
+   * -&gt; Status Protobuf
+   * </code>
+   * </pre>
    */
   public static class WorkSection {
     public static final Text NAME = new Text("work");
@@ -85,8 +90,13 @@ public class ReplicationSchema {
 
   /**
    * Holds replication markers tracking status for files
-   * <p>
-   * <code>hdfs://localhost:8020/accumulo/wal/tserver+port/WAL repl:local_table_id [] -&gt; Status Protobuf</code>
+   *
+   * <pre>
+   * <code>
+   * hdfs://localhost:8020/accumulo/wal/tserver+port/WAL repl:local_table_id []
+   *  -&gt; Status Protobuf
+   * </code>
+   * </pre>
    */
   public static class StatusSection {
     public static final Text NAME = new Text("repl");
@@ -138,8 +148,13 @@ public class ReplicationSchema {
    * Holds the order in which files needed for replication were closed. The intent is to be able to
    * guarantee that files which were closed earlier were replicated first and we don't replay data
    * in the wrong order on our peers
-   * <p>
-   * <code>encodedTimeOfClosure\x00hdfs://localhost:8020/accumulo/wal/tserver+port/WAL order:source_table_id [] -&gt; Status Protobuf</code>
+   *
+   * <pre>
+   * <code>
+   * encodedTimeOfClosure\x00hdfs://localhost:8020/accumulo/wal/tserver+port/WAL
+   *   order:source_table_id [] -&gt; Status Protobuf
+   * </code>
+   * </pre>
    */
   public static class OrderSection {
     public static final Text NAME = new Text("order");
diff --git a/core/src/main/java/org/apache/accumulo/core/rpc/FilterTransport.java b/core/src/main/java/org/apache/accumulo/core/rpc/FilterTransport.java
index f8c4147..903ec2d 100644
--- a/core/src/main/java/org/apache/accumulo/core/rpc/FilterTransport.java
+++ b/core/src/main/java/org/apache/accumulo/core/rpc/FilterTransport.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
diff --git a/core/src/main/java/org/apache/accumulo/core/rpc/ThriftUtil.java b/core/src/main/java/org/apache/accumulo/core/rpc/ThriftUtil.java
index 9ae0cf0..bf717b1 100644
--- a/core/src/main/java/org/apache/accumulo/core/rpc/ThriftUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/rpc/ThriftUtil.java
@@ -271,8 +271,11 @@ public class ThriftUtil {
 
           // Wrap the real factory with our own that will set the protocol on the Socket before
           // returning it
-          ProtocolOverridingSSLSocketFactory wrappingSslSockFactory = new ProtocolOverridingSSLSocketFactory(
-              sslSockFactory, new String[] {sslParams.getClientProtocol()});
+          // @formatter:off
+          ProtocolOverridingSSLSocketFactory wrappingSslSockFactory =
+            new ProtocolOverridingSSLSocketFactory(sslSockFactory,
+                new String[] {sslParams.getClientProtocol()});
+          // @formatter:on
 
           // Create the TSocket from that
           transport = createClient(wrappingSslSockFactory, address.getHost(), address.getPort(),
@@ -312,9 +315,8 @@ public class ThriftUtil {
               // The current user has no credentials, let it fail naturally at the RPC layer (no
               // ticket)
               // We know this won't work, but we can't do anything else
-              log.warn(
-                  "The current user is a proxy user but there is no underlying real user (likely that RPCs will fail): {}",
-                  currentUser);
+              log.warn("The current user is a proxy user but there is no"
+                  + " underlying real user (likely that RPCs will fail): {}", currentUser);
               userForRpc = currentUser;
             }
           } else {
@@ -349,8 +351,8 @@ public class ThriftUtil {
           // will attempt to re-login to make the next attempt work.
           // Sadly, we have no way to determine the actual reason we got this TTransportException
           // other than inspecting the exception msg.
-          log.debug(
-              "Caught TTransportException opening SASL transport, checking if re-login is necessary before propagating the exception.");
+          log.debug("Caught TTransportException opening SASL transport,"
+              + " checking if re-login is necessary before propagating the" + " exception.");
           attemptClientReLogin();
 
           throw e;
diff --git a/core/src/main/java/org/apache/accumulo/core/rpc/UGIAssumingTransport.java b/core/src/main/java/org/apache/accumulo/core/rpc/UGIAssumingTransport.java
index b23157b..20f4398 100644
--- a/core/src/main/java/org/apache/accumulo/core/rpc/UGIAssumingTransport.java
+++ b/core/src/main/java/org/apache/accumulo/core/rpc/UGIAssumingTransport.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
diff --git a/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java b/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
index faffdcc..e73d8ee 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
@@ -218,7 +218,7 @@ public class ColumnVisibility {
   /*
    * Walks an expression's AST in order to:
    *  1) roll up expressions with the same operant (`a&(b&c) becomes a&b&c`)
-   *  2) sorts labels lexicographically (permutations of `a&b&c` are re-ordered to appear as `a&b&c`)
+   *  2) sort labels lexicographically (permutations of `a&b&c` are re-ordered to appear as `a&b&c`)
    *  3) dedupes labels (`a&b&a` becomes `a&b`)
    */
   // @formatter:on
@@ -540,7 +540,8 @@ public class ColumnVisibility {
    *   .
    *   .
    *   .
-   * ColumnVisibility cv = new ColumnVisibility(quote(&quot;A#C&quot;) + &quot;&amp;&quot; + quote(&quot;FOO&quot;));
+   * String s = quote(&quot;A#C&quot;) + &quot;&amp;&quot; + quote(&quot;FOO&quot;);
+   * ColumnVisibility cv = new ColumnVisibility(s);
    * </pre>
    *
    * @param term
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/BlockedOutputStream.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/BlockedOutputStream.java
index 041d518..ac8fa98 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/BlockedOutputStream.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/BlockedOutputStream.java
@@ -21,8 +21,11 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.ByteBuffer;
 
-// Buffers all input in a growing buffer until flush() is called. Then entire buffer is written, with size information, and padding to force the underlying
-// crypto output stream to also fully flush
+/*
+ * Buffers all input in a growing buffer until flush() is called. Then entire
+ * buffer is written, with size information, and padding to force the
+ * underlying crypto output stream to also fully flush
+*/
 public class BlockedOutputStream extends OutputStream {
   int blockSize;
   DataOutputStream out;
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java
index c6049bd..809a904 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java
@@ -28,8 +28,6 @@ import org.slf4j.LoggerFactory;
 
 /**
  * This factory module exists to assist other classes in loading crypto modules.
- *
- *
  */
 public class CryptoModuleFactory {
 
@@ -110,13 +108,15 @@ public class CryptoModuleFactory {
 
       } catch (InstantiationException e) {
         log.warn(String.format(
-            "Got instantiation exception %s when instantiating crypto module \"%s\".  No encryption will be used.",
+            "Got instantiation exception %s when"
+                + " instantiating crypto module \"%s\". No encryption will be used.",
             e.getCause().getClass().getName(), cryptoModuleClassname));
         log.warn("InstantiationException", e.getCause());
         return new NullCryptoModule();
       } catch (IllegalAccessException e) {
         log.warn(String.format(
-            "Got illegal access exception when trying to instantiate crypto module \"%s\".  No encryption will be used.",
+            "Got illegal access exception when trying to"
+                + " instantiate crypto module \"%s\". No encryption will be used.",
             cryptoModuleClassname));
         log.warn("IllegalAccessException", e);
         return new NullCryptoModule();
@@ -165,9 +165,8 @@ public class CryptoModuleFactory {
     try {
       keyEncryptionStrategyClazz = AccumuloVFSClassLoader.loadClass(className);
     } catch (ClassNotFoundException e1) {
-      log.warn(String.format(
-          "Could not find configured secret key encryption strategy \"%s\".  No encryption will be used.",
-          className));
+      log.warn(String.format("Could not find configured secret key encryption"
+          + " strategy \"%s\". No encryption will be used.", className));
       return new NullSecretKeyEncryptionStrategy();
     }
 
@@ -183,8 +182,9 @@ public class CryptoModuleFactory {
     }
 
     if (!implementsSecretKeyStrategy) {
-      log.warn(
-          "Configured Accumulo secret key encryption strategy \"%s\" does not implement the SecretKeyEncryptionStrategy interface. No encryption will be used.");
+      log.warn("Configured Accumulo secret key encryption strategy \"%s\" does"
+          + " not implement the SecretKeyEncryptionStrategy interface. No"
+          + " encryption will be used.");
       return new NullSecretKeyEncryptionStrategy();
     } else {
       try {
@@ -193,15 +193,15 @@ public class CryptoModuleFactory {
         log.debug("Successfully instantiated secret key encryption strategy " + className);
 
       } catch (InstantiationException e) {
-        log.warn(String.format(
-            "Got instantiation exception %s when instantiating secret key encryption strategy \"%s\".  No encryption will be used.",
-            e.getCause().getClass().getName(), className));
+        log.warn(String.format("Got instantiation exception %s when"
+            + " instantiating secret key encryption strategy \"%s\". No"
+            + " encryption will be used.", e.getCause().getClass().getName(), className));
         log.warn("InstantiationException", e.getCause());
         return new NullSecretKeyEncryptionStrategy();
       } catch (IllegalAccessException e) {
-        log.warn(String.format(
-            "Got illegal access exception when trying to instantiate secret key encryption strategy \"%s\".  No encryption will be used.",
-            className));
+        log.warn(String.format("Got illegal access exception when trying to"
+            + " instantiate secret key encryption strategy \"%s\". No encryption"
+            + " will be used.", className));
         log.warn("IllegalAccessException", e);
         return new NullSecretKeyEncryptionStrategy();
       }
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
index f1be91a..b7ba44f 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
@@ -63,7 +63,8 @@ public class DefaultCryptoModule implements CryptoModule {
     String cipherTransformation = getCipherTransformation(params);
 
     log.trace(String.format(
-        "Using cipher suite \"%s\" with key length %d with RNG \"%s\" and RNG provider \"%s\" and key encryption strategy \"%s\"",
+        "Using cipher suite \"%s\" with key length %d with"
+            + " RNG \"%s\" and RNG provider \"%s\" and key encryption strategy" + " \"%s\"",
         cipherTransformation, params.getKeyLength(), params.getRandomNumberGenerator(),
         params.getRandomNumberGeneratorProvider(), params.getKeyEncryptionStrategyClass()));
 
@@ -81,8 +82,8 @@ public class DefaultCryptoModule implements CryptoModule {
             new SecretKeySpec(params.getPlaintextKey(), params.getAlgorithmName()),
             params.getSecureRandom());
       } catch (InvalidKeyException e) {
-        log.error(
-            "Accumulo encountered an unknown error in generating the secret key object (SecretKeySpec) for an encrypted stream");
+        log.error("Accumulo encountered an unknown error in generating the"
+            + " secret key object (SecretKeySpec) for an encrypted stream");
         throw new RuntimeException(e);
       }
 
@@ -94,12 +95,12 @@ public class DefaultCryptoModule implements CryptoModule {
             new SecretKeySpec(params.getPlaintextKey(), params.getAlgorithmName()),
             new IvParameterSpec(params.getInitializationVector()));
       } catch (InvalidKeyException e) {
-        log.error(
-            "Accumulo encountered an unknown error in generating the secret key object (SecretKeySpec) for an encrypted stream");
+        log.error("Accumulo encountered an unknown error in generating the"
+            + " secret key object (SecretKeySpec) for an encrypted stream");
         throw new RuntimeException(e);
       } catch (InvalidAlgorithmParameterException e) {
-        log.error(
-            "Accumulo encountered an unknown error in setting up the initialization vector for an encrypted stream");
+        log.error("Accumulo encountered an unknown error in setting up the"
+            + " initialization vector for an encrypted stream");
         throw new RuntimeException(e);
       }
     }
@@ -157,8 +158,9 @@ public class DefaultCryptoModule implements CryptoModule {
 
     if (cipherMode == Cipher.ENCRYPT_MODE) {
 
-      StringBuilder errorBuf = new StringBuilder(
-          "The following problems were found with the CryptoModuleParameters object you provided for an encrypt operation:\n");
+      StringBuilder errorBuf = new StringBuilder("The following problems were"
+          + " found with the CryptoModuleParameters object you provided for an"
+          + " encrypt operation:\n");
       boolean allIsWell = true;
 
       allIsWell = validateNotEmpty(params.getAlgorithmName(), allIsWell, errorBuf,
@@ -190,8 +192,9 @@ public class DefaultCryptoModule implements CryptoModule {
       return allIsWell;
 
     } else if (cipherMode == Cipher.DECRYPT_MODE) {
-      StringBuilder errorBuf = new StringBuilder(
-          "The following problems were found with the CryptoModuleParameters object you provided for a decrypt operation:\n");
+      StringBuilder errorBuf = new StringBuilder("The following problems were"
+          + " found with the CryptoModuleParameters object you provided for a"
+          + " decrypt operation:\n");
       boolean allIsWell = true;
 
       allIsWell = validateNotEmpty(params.getPadding(), allIsWell, errorBuf,
@@ -404,11 +407,11 @@ public class DefaultCryptoModule implements CryptoModule {
           params.setBlockStreamSize(0);
       } else {
 
-        log.trace(
-            "Read something off of the encrypted input stream that was not the encryption header marker, so pushing back bytes and returning the given stream");
+        log.trace("Read something off of the encrypted input stream that was"
+            + " not the encryption header marker, so pushing back bytes and"
+            + " returning the given stream");
         // Push these bytes back on to the stream. This method is a bit roundabout but isolates our
-        // code
-        // from having to understand the format that DataOuputStream uses for its bytes.
+        // code from having to understand the format that DataOuputStream uses for its bytes.
         ByteArrayOutputStream tempByteOut = new ByteArrayOutputStream();
         DataOutputStream tempOut = new DataOutputStream(tempByteOut);
         tempOut.writeUTF(marker);
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModuleUtils.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModuleUtils.java
index fdfc971..d32cdf9 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModuleUtils.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModuleUtils.java
@@ -41,14 +41,12 @@ public class DefaultCryptoModuleUtils {
       secureRandom.nextBytes(throwAway);
 
     } catch (NoSuchAlgorithmException e) {
-      log.error(String.format(
-          "Accumulo configuration file specified a secure random generator \"%s\" that was not found by any provider.",
-          secureRNG));
+      log.error(String.format("Accumulo configuration file specified a secure"
+          + " random generator \"%s\" that was not found by any provider.", secureRNG));
       throw new RuntimeException(e);
     } catch (NoSuchProviderException e) {
-      log.error(String.format(
-          "Accumulo configuration file specified a secure random provider \"%s\" that does not exist",
-          secureRNGProvider));
+      log.error(String.format("Accumulo configuration file specified a secure"
+          + " random provider \"%s\" that does not exist", secureRNGProvider));
       throw new RuntimeException(e);
     }
     return secureRandom;
@@ -63,13 +61,13 @@ public class DefaultCryptoModuleUtils {
       try {
         cipher = Cipher.getInstance(cipherSuite);
       } catch (NoSuchAlgorithmException e) {
-        log.error(String.format(
-            "Accumulo configuration file contained a cipher suite \"%s\" that was not recognized by any providers",
-            cipherSuite));
+        log.error(String.format("Accumulo configuration file contained a cipher"
+            + " suite \"%s\" that was not recognized by any providers", cipherSuite));
         throw new RuntimeException(e);
       } catch (NoSuchPaddingException e) {
         log.error(String.format(
-            "Accumulo configuration file contained a cipher, \"%s\" with a padding that was not recognized by any providers",
+            "Accumulo configuration file contained a"
+                + " cipher, \"%s\" with a padding that was not recognized by any" + " providers",
             cipherSuite));
         throw new RuntimeException(e);
       }
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/NonCachingSecretKeyEncryptionStrategy.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/NonCachingSecretKeyEncryptionStrategy.java
index 7a4e1cc..bfe207a 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/NonCachingSecretKeyEncryptionStrategy.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/NonCachingSecretKeyEncryptionStrategy.java
@@ -49,8 +49,10 @@ public class NonCachingSecretKeyEncryptionStrategy implements SecretKeyEncryptio
       if (!fs.exists(pathToKey)) {
 
         if (encryptionMode == Cipher.UNWRAP_MODE) {
-          log.error("There was a call to decrypt the session key but no key encryption key exists. "
-              + "Either restore it, reconfigure the conf file to point to it in HDFS, or throw the affected data away and begin again.");
+          log.error("There was a call to decrypt the session key but no key"
+              + " encryption key exists. Either restore it, reconfigure the conf"
+              + " file to point to it in HDFS, or throw the affected data away and"
+              + " begin again.");
           throw new RuntimeException(
               "Could not find key encryption key file in configured location in HDFS ("
                   + pathToKeyName + ")");
diff --git a/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java b/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
index 98ade0d..a42198e 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
@@ -66,8 +66,9 @@ public class CreateToken implements KeywordExecutable {
     public String tokenClassName = PasswordToken.class.getName();
 
     @Parameter(names = {"-f", "--file"},
-        description = "The filename to save the auth token to. Multiple tokens can be stored in the same file,"
-            + " but only the first for each user will be recognized.")
+        description = "The filename to save the auth token to. Multiple tokens"
+            + " can be stored in the same file, but only the first for each user will"
+            + " be recognized.")
     public String tokenFile = null;
   }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/util/Merge.java b/core/src/main/java/org/apache/accumulo/core/util/Merge.java
index 2439ced..4b01131 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/Merge.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/Merge.java
@@ -71,7 +71,8 @@ public class Merge {
         converter = MemoryConverter.class)
     Long goalSize = null;
     @Parameter(names = {"-f", "--force"},
-        description = "merge small tablets even if merging them to larger tablets might cause a split")
+        description = "merge small tablets even if merging them to larger"
+            + " tablets might cause a split")
     boolean force = false;
     @Parameter(names = {"-b", "--begin"}, description = "start tablet",
         converter = TextConverter.class)
diff --git a/core/src/main/java/org/apache/accumulo/core/volume/NonConfiguredVolume.java b/core/src/main/java/org/apache/accumulo/core/volume/NonConfiguredVolume.java
index 19310dc..2d0b4a6 100644
--- a/core/src/main/java/org/apache/accumulo/core/volume/NonConfiguredVolume.java
+++ b/core/src/main/java/org/apache/accumulo/core/volume/NonConfiguredVolume.java
@@ -60,8 +60,8 @@ public class NonConfiguredVolume implements Volume {
 
   @Override
   public boolean isValidPath(Path p) {
-    throw new UnsupportedOperationException(
-        "Cannot determine if path is valid because this Volume isn't configured in accumulo-site.xml");
+    throw new UnsupportedOperationException("Cannot determine if path is valid"
+        + " because this Volume isn't configured in accumulo-site.xml");
   }
 
   @Override
diff --git a/core/src/test/java/org/apache/accumulo/core/client/security/SecurityErrorCodeTest.java b/core/src/test/java/org/apache/accumulo/core/client/security/SecurityErrorCodeTest.java
index e10ce23..5c42876 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/security/SecurityErrorCodeTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/security/SecurityErrorCodeTest.java
@@ -34,9 +34,12 @@ public class SecurityErrorCodeTest {
     for (SecurityErrorCode sec : SecurityErrorCode.values())
       secNames1.add(sec.name());
 
-    for (org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode sec : org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode
+    // @formatter:off
+    for (org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode sec :
+        org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode
         .values())
       secNames2.add(sec.name());
+    // @formatter:on
 
     Assert.assertEquals(secNames1, secNames2);
   }
diff --git a/core/src/test/java/org/apache/accumulo/core/conf/PropertyTypeTest.java b/core/src/test/java/org/apache/accumulo/core/conf/PropertyTypeTest.java
index 97cca59..3dcf0fd 100644
--- a/core/src/test/java/org/apache/accumulo/core/conf/PropertyTypeTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/conf/PropertyTypeTest.java
@@ -55,7 +55,8 @@ public class PropertyTypeTest {
   @Test
   public void testGetFormatDescription() {
     assertEquals(
-        "An arbitrary string of characters whose format is unspecified and interpreted based on the context of the property to which it applies.",
+        "An arbitrary string of characters whose format is unspecified"
+            + " and interpreted based on the context of the property to which it" + " applies.",
         PropertyType.STRING.getFormatDescription());
   }
 
diff --git a/core/src/test/java/org/apache/accumulo/core/data/ConstraintViolationSummaryTest.java b/core/src/test/java/org/apache/accumulo/core/data/ConstraintViolationSummaryTest.java
index 4aa5dfb..d6a6a70 100644
--- a/core/src/test/java/org/apache/accumulo/core/data/ConstraintViolationSummaryTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/data/ConstraintViolationSummaryTest.java
@@ -26,13 +26,14 @@ public class ConstraintViolationSummaryTest {
   public void testToString() {
     ConstraintViolationSummary cvs = new ConstraintViolationSummary("fooClass", (short) 1,
         "fooDescription", 100L);
-    assertEquals(
-        "ConstraintViolationSummary(constrainClass:fooClass, violationCode:1, violationDescription:fooDescription, numberOfViolatingMutations:100)",
-        cvs.toString());
+    assertEquals("ConstraintViolationSummary(constrainClass:fooClass,"
+        + " violationCode:1, violationDescription:fooDescription,"
+        + " numberOfViolatingMutations:100)", cvs.toString());
 
     cvs = new ConstraintViolationSummary(null, (short) 2, null, 101L);
     assertEquals(
-        "ConstraintViolationSummary(constrainClass:null, violationCode:2, violationDescription:null, numberOfViolatingMutations:101)",
+        "ConstraintViolationSummary(constrainClass:null,"
+            + " violationCode:2, violationDescription:null," + " numberOfViolatingMutations:101)",
         cvs.toString());
   }
 }
diff --git a/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java b/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java
index 6c4b208..5bbd2b5 100644
--- a/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/data/MutationTest.java
@@ -488,9 +488,9 @@ public class MutationTest {
       }
       sb.append(" ");
     }
-    assertEquals(
-        "80322031 32333435 36373839 20313233 34353637 38392031 32333435 36373839 20313233 34353637 38392031 32333435 36373839 06000000 00000001 ",
-        sb.toString());
+    assertEquals("80322031 32333435 36373839 20313233 34353637"
+        + " 38392031 32333435 36373839 20313233 34353637"
+        + " 38392031 32333435 36373839 06000000 00000001 ", sb.toString());
 
   }
 
@@ -673,8 +673,8 @@ public class MutationTest {
     try {
       m.put("cf", "cq", "v");
     } catch (IllegalStateException e) {
-      fail(
-          "Calling Mutation#hashCode then Mutation#put should not result in an IllegalStateException.");
+      fail("Calling Mutation#hashCode then Mutation#put should not result in an"
+          + " IllegalStateException.");
     }
   }
 
@@ -687,8 +687,8 @@ public class MutationTest {
       m.put("cf", "cq", "v");
       m2.put("cf", "cq", "v");
     } catch (IllegalStateException e) {
-      fail(
-          "Calling Mutation#equals then Mutation#put should not result in an IllegalStateException.");
+      fail("Calling Mutation#equals then Mutation#put should not result in an"
+          + " IllegalStateException.");
     }
   }
 }
diff --git a/core/src/test/java/org/apache/accumulo/core/iterators/aggregation/conf/AggregatorConfigurationTest.java b/core/src/test/java/org/apache/accumulo/core/iterators/aggregation/conf/AggregatorConfigurationTest.java
index fa3f687..373fcb6 100644
--- a/core/src/test/java/org/apache/accumulo/core/iterators/aggregation/conf/AggregatorConfigurationTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/iterators/aggregation/conf/AggregatorConfigurationTest.java
@@ -47,11 +47,14 @@ public class AggregatorConfigurationTest {
   @SuppressWarnings("deprecation")
   private void runTest(Text colf) {
     String encodedCols;
-    org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig ac3 = new org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig(
-        colf, "com.foo.SuperAgg");
+    // @formatter:off
+    org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig ac3 =
+      new org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig(colf, "com.foo.SuperAgg");
     encodedCols = ac3.encodeColumns();
-    org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig ac4 = org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig
+    org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig ac4 =
+      org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig
         .decodeColumns(encodedCols, "com.foo.SuperAgg");
+    // @formatter:on
 
     assertEquals(colf, ac4.getColumnFamily());
     assertNull(ac4.getColumnQualifier());
@@ -59,11 +62,15 @@ public class AggregatorConfigurationTest {
 
   @SuppressWarnings("deprecation")
   private void runTest(Text colf, Text colq) {
-    org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig ac = new org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig(
-        colf, colq, "com.foo.SuperAgg");
+    // @formatter:off
+    org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig ac =
+      new org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig(colf, colq,
+        "com.foo.SuperAgg");
     String encodedCols = ac.encodeColumns();
-    org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig ac2 = org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig
+    org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig ac2 =
+      org.apache.accumulo.core.iterators.conf.PerColumnIteratorConfig
         .decodeColumns(encodedCols, "com.foo.SuperAgg");
+    // @formatter:on
 
     assertEquals(colf, ac2.getColumnFamily());
     assertEquals(colq, ac2.getColumnQualifier());
diff --git a/core/src/test/java/org/apache/accumulo/core/iterators/user/VisibilityFilterTest.java b/core/src/test/java/org/apache/accumulo/core/iterators/user/VisibilityFilterTest.java
index 583f457..0243791 100644
--- a/core/src/test/java/org/apache/accumulo/core/iterators/user/VisibilityFilterTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/iterators/user/VisibilityFilterTest.java
@@ -88,7 +88,8 @@ public class VisibilityFilterTest {
     int count = 0;
     while (filter.hasTop()) {
       count++;
-      // System.out.println(DefaultFormatter.formatEntry(Collections.singletonMap(filter.getTopKey(),
+      // System.out.println(DefaultFormatter.formatEntry(
+      // Collections.singletonMap(filter.getTopKey(),
       // filter.getTopValue()).entrySet().iterator().next(),
       // false));
       assertEquals(expectedCF, filter.getTopKey().getColumnFamily());
diff --git a/core/src/test/java/org/apache/accumulo/core/security/crypto/CryptoTest.java b/core/src/test/java/org/apache/accumulo/core/security/crypto/CryptoTest.java
index fc4c500..b3045ce 100644
--- a/core/src/test/java/org/apache/accumulo/core/security/crypto/CryptoTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/security/crypto/CryptoTest.java
@@ -59,7 +59,10 @@ public class CryptoTest {
   public static final String CONFIG_FILE_SYSTEM_PROP = "org.apache.accumulo.config.file";
   public static final String CRYPTO_ON_CONF = "crypto-on-accumulo-site.xml";
   public static final String CRYPTO_OFF_CONF = "crypto-off-accumulo-site.xml";
-  public static final String CRYPTO_ON_KEK_OFF_CONF = "crypto-on-no-key-encryption-accumulo-site.xml";
+  // @formatter:off
+  public static final String CRYPTO_ON_KEK_OFF_CONF =
+    "crypto-on-no-key-encryption-accumulo-site.xml";
+  // @formatter:on
 
   @Rule
   public ExpectedException exception = ExpectedException.none();
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
index fcc8f75..3ead254 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
@@ -130,10 +130,10 @@ public class RandomBatchWriter {
     opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
     if ((opts.max - opts.min) < 1L * opts.num) { // right-side multiplied by 1L to convert to long
                                                  // in a way that doesn't trigger FindBugs
-      System.err.println(String.format(
-          "You must specify a min and a max that allow for at least num possible values. "
-              + "For example, you requested %d rows, but a min of %d and a max of %d (exclusive), which only allows for %d rows.",
-          opts.num, opts.min, opts.max, (opts.max - opts.min)));
+      System.err.println(String.format("You must specify a min and a max that"
+          + " allow for at least num possible values. For example, you requested"
+          + " %d rows, but a min of %d and a max of %d (exclusive), which only"
+          + " allows for %d rows.", opts.num, opts.min, opts.max, (opts.max - opts.min)));
       System.exit(1);
     }
     Random r;
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
index 60b957d..01f1f1e 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
@@ -43,7 +43,16 @@ import com.beust.jcommander.Parameter;
 
 /**
  * Takes a table and outputs the specified column to a set of part files on hdfs
- * {@code accumulo accumulo.examples.mapreduce.TableToFile <username> <password> <tablename> <column> <hdfs-output-path>}
+ *
+ * <p>
+ * CLI options (in order):
+ * <ul>
+ * <li>username
+ * <li>password
+ * <li>tablename
+ * <li>column
+ * <li>hdfs-output-path
+ * </ul>
  */
 public class TableToFile extends Configured implements Tool {
 
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/sample/SampleExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/sample/SampleExample.java
index e5709db..5000d7d 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/sample/SampleExample.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/sample/SampleExample.java
@@ -91,8 +91,8 @@ public class SampleExample {
     print(scanner);
     System.out.println();
 
-    System.out.println(
-        "Scanning with sampler configuration.  Data was written before sampler was set on table, scan should fail.");
+    System.out.println("Scanning with sampler configuration. Data was written"
+        + " before sampler was set on table, scan should fail.");
     scanner.setSamplerConfiguration(sc1);
     try {
       print(scanner);
@@ -113,8 +113,8 @@ public class SampleExample {
     bw.addMutation(createMutation("2317", "milk, eggs, bread, parmigiano-reggiano, butter",
         "file://groceries/9/txt"));
     bw.close();
-    System.out.println(
-        "Scanning sample after updating content for docId 2317 (should see content change in sample data) : ");
+    System.out.println("Scanning sample after updating content for docId 2317"
+        + " (should see content change in sample data) : ");
     print(scanner);
     System.out.println();
 
@@ -126,8 +126,8 @@ public class SampleExample {
     conn.tableOperations().compact(opts.getTableName(),
         new CompactionConfig().setCompactionStrategy(NO_SAMPLE_STRATEGY));
 
-    System.out.println(
-        "Scanning with old sampler configuration.  Sample data was created using new configuration with a compaction.  Scan should fail.");
+    System.out.println("Scanning with old sampler configuration. Sample data"
+        + " was created using new configuration with a compaction. Scan should fail.");
     try {
       // try scanning with old sampler configuration
       print(scanner);
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
index 22fdb1e..432610d 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
@@ -50,11 +50,13 @@ public class Query {
     List<String> terms = new ArrayList<>();
 
     @Parameter(names = {"--sample"},
-        description = "Do queries against sample, useful when sample is built using column qualifier")
+        description = "Do queries against sample, useful when sample is built"
+            + " using column qualifier")
     private boolean useSample = false;
 
     @Parameter(names = {"--sampleCutoff"},
-        description = "Use sample data to determine if a query might return a number of documents over the cutoff.  This check is per tablet.")
+        description = "Use sample data to determine if a query might return a"
+            + " number of documents over the cutoff. This check is per tablet.")
     private Integer sampleCutoff = null;
   }
 
diff --git a/fate/src/main/java/org/apache/accumulo/fate/util/AddressUtil.java b/fate/src/main/java/org/apache/accumulo/fate/util/AddressUtil.java
index eb1fe22..2f8c33f 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/util/AddressUtil.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/util/AddressUtil.java
@@ -44,24 +44,23 @@ public class AddressUtil {
     try {
       negativeTtl = Integer.parseInt(Security.getProperty("networkaddress.cache.negative.ttl"));
     } catch (NumberFormatException exception) {
-      log.warn(
-          "Failed to get JVM negative DNS respones cache TTL due to format problem (e.g. this JVM might not have the "
-              + "property). Falling back to default based on Oracle JVM 1.4+ (10s)",
-          exception);
+      log.warn("Failed to get JVM negative DNS respones cache TTL due to format problem "
+          + "(e.g. this JVM might not have the property). "
+          + "Falling back to default based on Oracle JVM 1.4+ (10s)", exception);
     } catch (SecurityException exception) {
-      log.warn(
-          "Failed to get JVM negative DNS response cache TTL due to security manager. Falling back to default based on Oracle JVM 1.4+ (10s)",
-          exception);
+      log.warn("Failed to get JVM negative DNS response cache TTL due to security manager. "
+          + "Falling back to default based on Oracle JVM 1.4+ (10s)", exception);
     }
     if (-1 == negativeTtl) {
       log.error(
-          "JVM negative DNS repsonse cache TTL is set to 'forever' and host lookup failed. TTL can be changed with security property "
+          "JVM negative DNS repsonse cache TTL is set to 'forever' and host lookup failed. "
+              + "TTL can be changed with security property "
               + "'networkaddress.cache.negative.ttl', see java.net.InetAddress.",
           originalException);
       throw new IllegalArgumentException(originalException);
     } else if (0 > negativeTtl) {
-      log.warn(
-          "JVM specified negative DNS response cache TTL was negative (and not 'forever'). Falling back to default based on Oracle JVM 1.4+ (10s)");
+      log.warn("JVM specified negative DNS response cache TTL was negative (and not 'forever'). "
+          + "Falling back to default based on Oracle JVM 1.4+ (10s)");
       negativeTtl = 10;
     }
     return negativeTtl;
diff --git a/fate/src/test/java/org/apache/accumulo/fate/util/AddressUtilTest.java b/fate/src/test/java/org/apache/accumulo/fate/util/AddressUtilTest.java
index 9c7ff62..1241c36 100644
--- a/fate/src/test/java/org/apache/accumulo/fate/util/AddressUtilTest.java
+++ b/fate/src/test/java/org/apache/accumulo/fate/util/AddressUtilTest.java
@@ -62,20 +62,20 @@ public class AddressUtilTest extends TestCase {
     }
     try {
       if (expectException) {
-        log.info(
-            "AddressUtil is (hopefully) going to spit out an error about DNS lookups. you can ignore it.");
+        log.info("AddressUtil is (hopefully) going to spit out an error about DNS lookups. "
+            + "you can ignore it.");
       }
       int result = AddressUtil.getAddressCacheNegativeTtl(null);
       if (expectException) {
-        fail(
-            "The JVM Security settings cache DNS failures forever. In this case we expect an exception but didn't get one.");
+        fail("The JVM Security settings cache DNS failures forever. "
+            + "In this case we expect an exception but didn't get one.");
       }
       assertEquals("Didn't get the ttl we expected", expectedTtl, result);
     } catch (IllegalArgumentException exception) {
       if (!expectException) {
         log.error("Got an exception when we weren't expecting.", exception);
-        fail(
-            "We only expect to throw an IllegalArgumentException when the JVM caches DNS failures forever.");
+        fail("We only expect to throw an IllegalArgumentException when the JVM "
+            + "caches DNS failures forever.");
       }
     }
   }
@@ -90,8 +90,8 @@ public class AddressUtilTest extends TestCase {
       return;
     }
     try {
-      log.info(
-          "AddressUtil is (hopefully) going to spit out an error about DNS lookups. you can ignore it.");
+      log.info("AddressUtil is (hopefully) going to spit out an error about DNS lookups. "
+          + "you can ignore it.");
       AddressUtil.getAddressCacheNegativeTtl(null);
       fail("The JVM Security settings cache DNS failures forever, this should cause an exception.");
     } catch (IllegalArgumentException exception) {
diff --git a/iterator-test-harness/src/main/java/org/apache/accumulo/iteratortest/testcases/MultipleHasTopCalls.java b/iterator-test-harness/src/main/java/org/apache/accumulo/iteratortest/testcases/MultipleHasTopCalls.java
index 467d552..8974ce3 100644
--- a/iterator-test-harness/src/main/java/org/apache/accumulo/iteratortest/testcases/MultipleHasTopCalls.java
+++ b/iterator-test-harness/src/main/java/org/apache/accumulo/iteratortest/testcases/MultipleHasTopCalls.java
@@ -82,8 +82,8 @@ public class MultipleHasTopCalls extends OutputVerifyingTestCase {
   }
 
   IllegalStateException badStateException(boolean expectedState) {
-    return new IllegalStateException(
-        "Multiple sequential calls to hasTop should not alter the state or return value of the iterator. Expected '"
-            + expectedState + ", but got '" + !expectedState + "'.");
+    return new IllegalStateException("Multiple sequential calls to hasTop"
+        + " should not alter the state or return value of the iterator. Expected '" + expectedState
+        + ", but got '" + !expectedState + "'.");
   }
 }
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
index cdc345a..02c0723 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
@@ -81,8 +81,8 @@ public class MiniAccumuloRunner {
   private static void printProperties() {
     System.out.println("#mini Accumulo cluster runner properties.");
     System.out.println("#");
-    System.out.println(
-        "#uncomment following propeties to use, propeties not set will use default or random value");
+    System.out.println("#uncomment following propeties to use, propeties not"
+        + " set will use default or random value");
     System.out.println();
     System.out.println("#" + INSTANCE_NAME_PROP + "=devTest");
     System.out.println("#" + DIRECTORY_PROP + "=/tmp/mac1");
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImpl.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImpl.java
index b2adba5..afcb9c3 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImpl.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImpl.java
@@ -193,8 +193,8 @@ public class MiniAccumuloConfigImpl {
     }
 
     if (!CredentialProviderFactoryShim.isHadoopCredentialProviderAvailable()) {
-      throw new RuntimeException(
-          "Cannot use CredentialProvider when implementation is not available. Be sure to use >=Hadoop-2.6.0");
+      throw new RuntimeException("Cannot use CredentialProvider when"
+          + " implementation is not available. Be sure to use >=Hadoop-2.6.0");
     }
 
     File keystoreFile = new File(getConfDir(), "credential-provider.jks");
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java b/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
index 49f19a0..89c755d 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
@@ -126,8 +126,8 @@ public class Proxy implements KeywordExecutable {
     String zookeepers = opts.prop.getProperty(ZOOKEEPERS_KEY);
 
     if (!useMini && !useMock && instance == null) {
-      System.err.println(
-          "Properties file must contain one of : useMiniAccumulo=true, useMockInstance=true, or instance=<instance name>");
+      System.err.println("Properties file must contain one of : useMiniAccumulo=true,"
+          + " useMockInstance=true, or instance=<instance name>");
       System.exit(1);
     }
 
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index 074c458..c5aeb9c 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -131,7 +131,8 @@ import com.google.common.cache.RemovalNotification;
 public class ProxyServer implements AccumuloProxy.Iface {
 
   public static final Logger logger = LoggerFactory.getLogger(ProxyServer.class);
-  public static final String RPC_ACCUMULO_PRINCIPAL_MISMATCH_MSG = "RPC principal did not match requested Accumulo principal";
+  public static final String RPC_ACCUMULO_PRINCIPAL_MISMATCH_MSG = "RPC"
+      + " principal did not match requested Accumulo principal";
   protected Instance instance;
 
   protected Class<? extends AuthenticationToken> tokenClass;
@@ -414,8 +415,11 @@ public class ProxyServer implements AccumuloProxy.Iface {
           .setIterators(getIteratorSettings(iterators)).setFlush(flush).setWait(wait);
 
       if (compactionStrategy != null) {
-        org.apache.accumulo.core.client.admin.CompactionStrategyConfig ccc = new org.apache.accumulo.core.client.admin.CompactionStrategyConfig(
+        // @formatter:off
+        org.apache.accumulo.core.client.admin.CompactionStrategyConfig ccc =
+          new org.apache.accumulo.core.client.admin.CompactionStrategyConfig(
             compactionStrategy.getClassName());
+        // @formatter:on
         if (compactionStrategy.options != null)
           ccc.setOptions(compactionStrategy.options);
         compactionConfig.setCompactionStrategy(ccc);
@@ -808,7 +812,10 @@ public class ProxyServer implements AccumuloProxy.Iface {
       List<ActiveScan> activeScans = getConnector(login).instanceOperations()
           .getActiveScans(tserver);
       for (ActiveScan scan : activeScans) {
-        org.apache.accumulo.proxy.thrift.ActiveScan pscan = new org.apache.accumulo.proxy.thrift.ActiveScan();
+        // @formatter:off
+        org.apache.accumulo.proxy.thrift.ActiveScan pscan =
+          new org.apache.accumulo.proxy.thrift.ActiveScan();
+        // @formatter:on
         pscan.client = scan.getClient();
         pscan.user = scan.getUser();
         pscan.table = scan.getTable();
@@ -822,7 +829,10 @@ public class ProxyServer implements AccumuloProxy.Iface {
         pscan.columns = new ArrayList<>();
         if (scan.getColumns() != null) {
           for (Column c : scan.getColumns()) {
-            org.apache.accumulo.proxy.thrift.Column column = new org.apache.accumulo.proxy.thrift.Column();
+            // @formatter:off
+            org.apache.accumulo.proxy.thrift.Column column =
+              new org.apache.accumulo.proxy.thrift.Column();
+            // @formatter:on
             column.setColFamily(c.getColumnFamily());
             column.setColQualifier(c.getColumnQualifier());
             column.setColVisibility(c.getColumnVisibility());
@@ -836,8 +846,11 @@ public class ProxyServer implements AccumuloProxy.Iface {
             String name = parts[0];
             int priority = Integer.parseInt(parts[1]);
             String classname = parts[2];
-            org.apache.accumulo.proxy.thrift.IteratorSetting settings = new org.apache.accumulo.proxy.thrift.IteratorSetting(
+            // @formatter:off
+            org.apache.accumulo.proxy.thrift.IteratorSetting settings =
+              new org.apache.accumulo.proxy.thrift.IteratorSetting(
                 priority, name, classname, scan.getSsio().get(name));
+            // @formatter:on
             pscan.iterators.add(settings);
           }
         }
@@ -866,7 +879,10 @@ public class ProxyServer implements AccumuloProxy.Iface {
       List<ActiveCompaction> active = getConnector(login).instanceOperations()
           .getActiveCompactions(tserver);
       for (ActiveCompaction comp : active) {
-        org.apache.accumulo.proxy.thrift.ActiveCompaction pcomp = new org.apache.accumulo.proxy.thrift.ActiveCompaction();
+        // @formatter:off
+        org.apache.accumulo.proxy.thrift.ActiveCompaction pcomp =
+          new org.apache.accumulo.proxy.thrift.ActiveCompaction();
+        // @formatter:on
         pcomp.age = comp.getAge();
         pcomp.entriesRead = comp.getEntriesRead();
         pcomp.entriesWritten = comp.getEntriesWritten();
@@ -885,9 +901,12 @@ public class ProxyServer implements AccumuloProxy.Iface {
         pcomp.iterators = new ArrayList<>();
         if (comp.getIterators() != null) {
           for (IteratorSetting setting : comp.getIterators()) {
-            org.apache.accumulo.proxy.thrift.IteratorSetting psetting = new org.apache.accumulo.proxy.thrift.IteratorSetting(
+            // @formatter:off
+            org.apache.accumulo.proxy.thrift.IteratorSetting psetting =
+              new org.apache.accumulo.proxy.thrift.IteratorSetting(
                 setting.getPriority(), setting.getName(), setting.getIteratorClass(),
                 setting.getOptions());
+            // @formatter:on
             pcomp.iterators.add(psetting);
           }
         }
@@ -2174,8 +2193,11 @@ public class ProxyServer implements AccumuloProxy.Iface {
         ConditionalMutation cmut = new ConditionalMutation(ByteBufferUtil.toBytes(cu.getKey()));
 
         for (Condition tcond : cu.getValue().conditions) {
-          org.apache.accumulo.core.data.Condition cond = new org.apache.accumulo.core.data.Condition(
+          // @formatter:off
+          org.apache.accumulo.core.data.Condition cond =
+            new org.apache.accumulo.core.data.Condition(
               tcond.column.getColFamily(), tcond.column.getColQualifier());
+          // @formatter:on
 
           if (tcond.getColumn().getColVisibility() != null
               && tcond.getColumn().getColVisibility().length > 0) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
index 2a454c9..2f98fdd 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
@@ -252,8 +252,9 @@ public class Accumulo {
               setting = setting.trim();
               if (bytes > 0 && Integer.parseInt(setting) > 10) {
                 log.warn("System swappiness setting is greater than ten (" + setting
-                    + ") which can cause time-sensitive operations to be delayed. "
-                    + " Accumulo is time sensitive because it needs to maintain distributed lock agreement.");
+                    + ") which can cause time-sensitive operations to"
+                    + " be delayed. Accumulo is time sensitive because it needs to"
+                    + " maintain distributed lock agreement.");
               }
             } finally {
               is.close();
@@ -340,9 +341,10 @@ public class Accumulo {
           new ZooStore<Accumulo>(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZFATE,
               ZooReaderWriter.getInstance()));
       if (!(fate.list().isEmpty())) {
-        throw new AccumuloException(
-            "Aborting upgrade because there are outstanding FATE transactions from a previous Accumulo version. "
-                + "Please see the README document for instructions on what to do under your previous version.");
+        throw new AccumuloException("Aborting upgrade because there are"
+            + " outstanding FATE transactions from a previous Accumulo version."
+            + " Please see the README document for instructions on what to do under"
+            + " your previous version.");
       }
     } catch (Exception exception) {
       log.fatal("Problem verifying Fate readiness", exception);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/GarbageCollectionLogger.java b/server/base/src/main/java/org/apache/accumulo/server/GarbageCollectionLogger.java
index c2560c0..b39a2c0 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/GarbageCollectionLogger.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/GarbageCollectionLogger.java
@@ -100,8 +100,8 @@ public class GarbageCollectionLogger {
     if (lastMemoryCheckTime > 0 && lastMemoryCheckTime < now) {
       final long diff = now - lastMemoryCheckTime;
       if (diff > keepAliveTimeout + 1000) {
-        log.warn(String.format(
-            "GC pause checker not called in a timely fashion. Expected every %.1f seconds but was %.1f seconds since last check",
+        log.warn(String.format("GC pause checker not called in a timely"
+            + " fashion. Expected every %.1f seconds but was %.1f seconds since" + " last check",
             keepAliveTimeout / 1000., diff / 1000.));
       }
       lastMemoryCheckTime = now;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java b/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
index a55a827..1f03fee 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
@@ -110,8 +110,8 @@ public class PreferredVolumeChooser extends RandomVolumeChooser {
     // If there are no preferred volumes left, then warn the user and choose randomly from the
     // instance volumes
     if (filteredOptions.isEmpty()) {
-      log.warn(
-          "Preferred volumes are not instance volumes. Defaulting to randomly choosing from instance volumes");
+      log.warn("Preferred volumes are not instance volumes. Defaulting to"
+          + " randomly choosing from instance volumes");
       return super.choose(env, options);
     }
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
index d3045bf..2557595 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
@@ -227,8 +227,9 @@ public class VolumeManagerImpl implements VolumeManager {
           synchronized (WARNED_ABOUT_SYNCONCLOSE) {
             if (!WARNED_ABOUT_SYNCONCLOSE.contains(entry.getKey())) {
               WARNED_ABOUT_SYNCONCLOSE.add(entry.getKey());
-              log.warn(DFS_DATANODE_SYNCONCLOSE
-                  + " set to false in hdfs-site.xml: data loss is possible on hard system reset or power loss");
+              log.warn(DFS_DATANODE_SYNCONCLOSE + " set to false in"
+                  + " hdfs-site.xml: data loss is possible on hard system reset or"
+                  + " power loss");
             }
           }
         }
@@ -492,8 +493,9 @@ public class VolumeManagerImpl implements VolumeManager {
     final String choice = chooser.choose(env, options);
     if (!(ArrayUtils.contains(options, choice))) {
       log.error("The configured volume chooser, '" + chooser.getClass()
-          + "', or one of its delegates returned a volume not in the set of options provided; "
-          + "will continue by relying on a RandomVolumeChooser. You should investigate and correct the named chooser.");
+          + "', or one of its delegates returned a volume not in the set of"
+          + " options provided; will continue by relying on a RandomVolumeChooser."
+          + " You should investigate and correct the named chooser.");
       return failsafeChooser.choose(env, options);
     }
     return choice;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index be6ca8b..4fab114 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@ -247,8 +247,8 @@ public class Initialize implements KeywordExecutable {
     log.info("Accumulo data dirs are "
         + Arrays.asList(VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance())));
     log.info("Zookeeper server is " + sconf.get(Property.INSTANCE_ZK_HOST));
-    log.info(
-        "Checking if Zookeeper is available. If this hangs, then you need to make sure zookeeper is running");
+    log.info("Checking if Zookeeper is available. If this hangs, then you need"
+        + " to make sure zookeeper is running");
     if (!zookeeperAvailable()) {
       // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j compatibility
       log.error("FATAL Zookeeper needs to be up and running in order to init. Exiting ...");
@@ -259,16 +259,16 @@ public class Initialize implements KeywordExecutable {
       c.beep();
       c.println();
       c.println();
-      c.println(
-          "Warning!!! Your instance secret is still set to the default, this is not secure. We highly recommend you change it.");
+      c.println("Warning!!! Your instance secret is still set to the default,"
+          + " this is not secure. We highly recommend you change it.");
       c.println();
       c.println();
       c.println("You can change the instance secret in accumulo by using:");
       c.println("   bin/accumulo " + org.apache.accumulo.server.util.ChangeSecret.class.getName()
           + " oldPassword newPassword.");
-      c.println(
-          "You will also need to edit your secret in your configuration file by adding the property instance.secret to your conf/accumulo-site.xml. "
-              + "Without this accumulo will not operate correctly");
+      c.println("You will also need to edit your secret in your configuration"
+          + " file by adding the property instance.secret to your"
+          + " conf/accumulo-site.xml. Without this accumulo will not operate" + " correctly");
     }
     try {
       if (isInitialized(fs)) {
@@ -382,8 +382,8 @@ public class Initialize implements KeywordExecutable {
         if (defaultFsUri.equals(fsDefaultName) && defaultFsUri.equals(fsDefaultFS)) {
           log.error("FATAL: Default filesystem value ('fs.defaultFS' or 'fs.default.name') of '"
               + defaultFsUri + "' was found in the Hadoop configuration");
-          log.error(
-              "FATAL: Please ensure that the Hadoop core-site.xml is on the classpath using 'general.classpaths' in accumulo-site.xml");
+          log.error("FATAL: Please ensure that the Hadoop core-site.xml is on"
+              + " the classpath using 'general.classpaths' in accumulo-site.xml");
         }
       }
 
@@ -409,8 +409,8 @@ public class Initialize implements KeywordExecutable {
           // Fail if the site configuration doesn't contain appropriate credentials to login as
           // servers
           if (StringUtils.isBlank(accumuloKeytab) || StringUtils.isBlank(accumuloPrincipal)) {
-            log.error(
-                "FATAL: No Kerberos credentials provided, and Accumulo is not properly configured for server login");
+            log.error("FATAL: No Kerberos credentials provided, and Accumulo is"
+                + " not properly configured for server login");
             return false;
           }
 
@@ -814,7 +814,8 @@ public class Initialize implements KeywordExecutable {
       if (aBasePath.equals(replacementVolume.getFirst()))
         log.error(aBasePath + " is set to be replaced in " + Property.INSTANCE_VOLUMES_REPLACEMENTS
             + " and should not appear in " + Property.INSTANCE_VOLUMES
-            + ". It is highly recommended that this property be removed as data could still be written to this volume.");
+            + ". It is highly recommended that this property be removed as data"
+            + " could still be written to this volume.");
     }
 
     if (ServerConstants.DATA_VERSION != Accumulo.getAccumuloPersistentVersion(
diff --git a/server/base/src/main/java/org/apache/accumulo/server/log/WalStateManager.java b/server/base/src/main/java/org/apache/accumulo/server/log/WalStateManager.java
index 75d129b..22e9ee1 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/log/WalStateManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/log/WalStateManager.java
@@ -37,27 +37,33 @@ import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/*
- * This class governs the space in Zookeeper that advertises the status of Write-Ahead Logs
- * in use by tablet servers and the replication machinery.
+/**
+ * This class governs the space in Zookeeper that advertises the status of Write-Ahead Logs in use
+ * by tablet servers and the replication machinery.
  *
- * The Master needs to know the state of the WALs to mark tablets during recovery.
- * The GC needs to know when a log is no longer needed so it can be removed.
- * The replication mechanism needs to know when a log is closed and can be forwarded to the destination table.
+ * <p>
+ * The Master needs to know the state of the WALs to mark tablets during recovery. The GC needs to
+ * know when a log is no longer needed so it can be removed. The replication mechanism needs to know
+ * when a log is closed and can be forwarded to the destination table.
  *
- * The state of the WALs is kept in Zookeeper under /accumulo/<instanceid>/wals.
- * For each server, there is a znode formatted like the TServerInstance.toString(): "host:port[sessionid]".
- * Under the server znode, is a node for each log, using the UUID for the log.
- * In each of the WAL znodes, is the current state of the log, and the full path to the log.
+ * <p>
+ * The state of the WALs is kept in Zookeeper under /accumulo/&lt;instanceid&gt;/wals. For each
+ * server, there is a znode formatted like the TServerInstance.toString(): "host:port[sessionid]".
+ * Under the server znode, is a node for each log, using the UUID for the log. In each of the WAL
+ * znodes, is the current state of the log, and the full path to the log.
  *
- * The state [OPEN, CLOSED, UNREFERENCED] is what the tablet server believes to be the state of the file.
+ * <p>
+ * The state [OPEN, CLOSED, UNREFERENCED] is what the tablet server believes to be the state of the
+ * file.
  *
- * In the event of a recovery, the log is identified as belonging to a dead server.  The master will update
- * the tablets assigned to that server with log references. Once all tablets have been reassigned and the log
- * references are removed, the log will be eligible for deletion.
+ * <p>
+ * In the event of a recovery, the log is identified as belonging to a dead server. The master will
+ * update the tablets assigned to that server with log references. Once all tablets have been
+ * reassigned and the log references are removed, the log will be eligible for deletion.
  *
- * Even when a log is UNREFERENCED by the tablet server, the replication mechanism may still need the log.
- * The GC will defer log removal until replication is finished with it.
+ * <p>
+ * Even when a log is UNREFERENCED by the tablet server, the replication mechanism may still need
+ * the log. The GC will defer log removal until replication is finished with it.
  *
  */
 public class WalStateManager {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
index 31db7de..b2beaa5 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
@@ -86,24 +86,25 @@ import com.google.common.collect.Multimap;
  */
 public class HostRegexTableLoadBalancer extends TableLoadBalancer implements ConfigurationObserver {
 
+  private static final String PROP_PREFIX = Property.TABLE_ARBITRARY_PROP_PREFIX.getKey();
+
   private static final Logger LOG = LoggerFactory.getLogger(HostRegexTableLoadBalancer.class);
-  public static final String HOST_BALANCER_PREFIX = Property.TABLE_ARBITRARY_PROP_PREFIX.getKey()
-      + "balancer.host.regex.";
-  public static final String HOST_BALANCER_OOB_CHECK_KEY = Property.TABLE_ARBITRARY_PROP_PREFIX
-      .getKey() + "balancer.host.regex.oob.period";
+  public static final String HOST_BALANCER_PREFIX = PROP_PREFIX + "balancer.host.regex.";
+  public static final String HOST_BALANCER_OOB_CHECK_KEY = PROP_PREFIX
+      + "balancer.host.regex.oob.period";
   private static final String HOST_BALANCER_OOB_DEFAULT = "5m";
   @Deprecated
-  public static final String HOST_BALANCER_POOL_RECHECK_KEY = Property.TABLE_ARBITRARY_PROP_PREFIX
-      .getKey() + "balancer.host.regex.pool.check";
-  public static final String HOST_BALANCER_REGEX_USING_IPS_KEY = Property.TABLE_ARBITRARY_PROP_PREFIX
-      .getKey() + "balancer.host.regex.is.ip";
-  public static final String HOST_BALANCER_REGEX_MAX_MIGRATIONS_KEY = Property.TABLE_ARBITRARY_PROP_PREFIX
-      .getKey() + "balancer.host.regex.concurrent.migrations";
+  public static final String HOST_BALANCER_POOL_RECHECK_KEY = PROP_PREFIX
+      + "balancer.host.regex.pool.check";
+  public static final String HOST_BALANCER_REGEX_USING_IPS_KEY = PROP_PREFIX
+      + "balancer.host.regex.is.ip";
+  public static final String HOST_BALANCER_REGEX_MAX_MIGRATIONS_KEY = PROP_PREFIX
+      + "balancer.host.regex.concurrent.migrations";
   private static final int HOST_BALANCER_REGEX_MAX_MIGRATIONS_DEFAULT = 250;
   protected static final String DEFAULT_POOL = "HostTableLoadBalancer.ALL";
   private static final int DEFAULT_OUTSTANDING_MIGRATIONS = 0;
-  public static final String HOST_BALANCER_OUTSTANDING_MIGRATIONS_KEY = Property.TABLE_ARBITRARY_PROP_PREFIX
-      .getKey() + "balancer.host.regex.max.outstanding.migrations";
+  public static final String HOST_BALANCER_OUTSTANDING_MIGRATIONS_KEY = PROP_PREFIX
+      + "balancer.host.regex.max.outstanding.migrations";
 
   protected long oobCheckMillis = AccumuloConfiguration.getTimeInMillis(HOST_BALANCER_OOB_DEFAULT);
 
@@ -130,8 +131,10 @@ public class HostRegexTableLoadBalancer extends TableLoadBalancer implements Con
    *          map of current tservers
    * @return current servers grouped by pool name, if not a match it is put into a default pool.
    */
-  protected synchronized Map<String,SortedMap<TServerInstance,TabletServerStatus>> splitCurrentByRegex(
-      SortedMap<TServerInstance,TabletServerStatus> current) {
+  // @formatter:off
+  protected synchronized Map<String,SortedMap<TServerInstance,TabletServerStatus>>
+    splitCurrentByRegex(SortedMap<TServerInstance,TabletServerStatus> current) {
+  // @formatter:on
     LOG.debug("Performing pool recheck - regrouping tablet servers based on regular expressions");
     Map<String,SortedMap<TServerInstance,TabletServerStatus>> newPools = new HashMap<>();
     for (Entry<TServerInstance,TabletServerStatus> e : current.entrySet()) {
@@ -373,9 +376,8 @@ public class HostRegexTableLoadBalancer extends TableLoadBalancer implements Con
             }
             String tid = tableIdMap.get(table);
             if (null == tid) {
-              LOG.warn(
-                  "Unable to check for out of bounds tablets for table {}, it may have been deleted or renamed.",
-                  table);
+              LOG.warn("Unable to check for out of bounds tablets for table {},"
+                  + " it may have been deleted or renamed.", table);
               continue;
             }
             try {
@@ -400,17 +402,15 @@ public class HostRegexTableLoadBalancer extends TableLoadBalancer implements Con
                     iter.next();
                   }
                   TServerInstance nextTS = iter.next();
-                  LOG.info(
-                      "Tablet {} is currently outside the bounds of the regex, migrating from {} to {}",
-                      ke, e.getKey(), nextTS);
+                  LOG.info("Tablet {} is currently outside the bounds of the"
+                      + " regex, migrating from {} to {}", ke, e.getKey(), nextTS);
                   migrationsOut.add(new TabletMigration(ke, e.getKey(), nextTS));
                   if (migrationsOut.size() >= this.maxTServerMigrations) {
                     break;
                   }
                 } else {
-                  LOG.warn(
-                      "No tablet servers online for pool {}, unable to migrate out of bounds tablets",
-                      poolName);
+                  LOG.warn("No tablet servers online for pool {}, unable to"
+                      + " migrate out of bounds tablets", poolName);
                 }
               }
             } catch (TException e1) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java
index 6263a9d..f73e511 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java
@@ -187,8 +187,8 @@ public class MetricsConfiguration {
       }
     } else {
       if (!alreadyWarned)
-        log.warn(
-            "ACCUMULO_CONF_DIR variable not found in environment. Metrics collection will be disabled.");
+        log.warn("ACCUMULO_CONF_DIR variable not found in environment. Metrics"
+            + " collection will be disabled.");
       alreadyWarned = true;
       notFound = true;
       return;
@@ -215,8 +215,7 @@ public class MetricsConfiguration {
   public static void main(String[] args) throws Exception {
     MetricsConfiguration mc = new MetricsConfiguration("master");
     while (true) {
-      System.out.println(
-          "------------------------------------------------------------------------------------------------");
+      System.out.println("-----------------------------------------------------------------------");
       long t1 = System.currentTimeMillis();
       System.out.println(mc.isEnabled() + " took: " + (System.currentTimeMillis() - t1));
       Thread.sleep(1000);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/rpc/TCredentialsUpdatingInvocationHandler.java b/server/base/src/main/java/org/apache/accumulo/server/rpc/TCredentialsUpdatingInvocationHandler.java
index 653eb8f..1e5f177 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/rpc/TCredentialsUpdatingInvocationHandler.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/rpc/TCredentialsUpdatingInvocationHandler.java
@@ -84,8 +84,8 @@ public class TCredentialsUpdatingInvocationHandler<I> implements InvocationHandl
     if (null == tcreds) {
       // Not all calls require authentication (e.g. closeMultiScan). We need to let these pass
       // through.
-      log.trace(
-          "Did not find a TCredentials object in the first two positions of the argument list, not updating principal");
+      log.trace("Did not find a TCredentials object in the first two positions"
+          + " of the argument list, not updating principal");
       return;
     }
 
@@ -99,9 +99,8 @@ public class TCredentialsUpdatingInvocationHandler<I> implements InvocationHandl
     if (SaslMechanism.DIGEST_MD5 == UGIAssumingProcessor.rpcMechanism()
         && DelegationTokenImpl.class.isAssignableFrom(tokenClass)) {
       if (!principal.equals(tcreds.principal)) {
-        log.warn(
-            "{} issued RPC with delegation token over DIGEST-MD5 as the Accumulo principal {}. Disallowing RPC",
-            principal, tcreds.principal);
+        log.warn("{} issued RPC with delegation token over DIGEST-MD5 as the"
+            + "Accumulo principal {}. Disallowing RPC", principal, tcreds.principal);
         throw new ThriftSecurityException("RPC principal did not match provided Accumulo principal",
             SecurityErrorCode.BAD_CREDENTIALS);
       }
@@ -138,8 +137,8 @@ public class TCredentialsUpdatingInvocationHandler<I> implements InvocationHandl
       }
       String clientAddr = TServerUtils.clientAddress.get();
       if (!usersWithHosts.getHosts().contains(clientAddr)) {
-        final String msg = "Principal in credentials object allowed mismatched Kerberos principals, but not on "
-            + clientAddr;
+        final String msg = "Principal in credentials object allowed mismatched"
+            + " Kerberos principals, but not on " + clientAddr;
         log.warn(msg);
         throw new ThriftSecurityException(msg, SecurityErrorCode.BAD_CREDENTIALS);
       }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java b/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
index 48fe407..f2a4bd9 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
@@ -457,12 +457,12 @@ public class TServerUtils {
     // this host, fail quickly and inform them to update
     // their configuration.
     if (!hostname.equals(fqdn)) {
-      log.error(
-          "Expected hostname of '{}' but got '{}'. Ensure the entries in the Accumulo hosts files (e.g. masters, slaves) are the FQDN for each host when using SASL.",
-          fqdn, hostname);
+      log.error("Expected hostname of '{}' but got '{}'. Ensure the entries in"
+          + " the Accumulo hosts files (e.g. masters, slaves) are the FQDN for each"
+          + " host when using SASL.", fqdn, hostname);
       transport.close();
-      throw new RuntimeException(
-          "SASL requires that the address the thrift server listens on is the same as the FQDN for this host");
+      throw new RuntimeException("SASL requires that the address the thrift"
+          + " server listens on is the same as the FQDN for this host");
     }
 
     final UserGroupInformation serverUser;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java b/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
index 52dc4f9..321dd93 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
@@ -135,7 +135,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_SCAN_AUDIT_TEMPLATE = "action: scan; targetTable: %s; authorizations: %s; range: %s; columns: %s; iterators: %s; iteratorOptions: %s;";
+  public static final String CAN_SCAN_AUDIT_TEMPLATE = "action: scan;"
+      + " targetTable: %s; authorizations: %s; range: %s; columns: %s;"
+      + " iterators: %s; iteratorOptions: %s;";
   private static final int MAX_ELEMENTS_TO_LOG = 10;
 
   private static List<String> truncate(Collection<?> list) {
@@ -177,7 +179,9 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_SCAN_BATCH_AUDIT_TEMPLATE = "action: scan; targetTable: %s; authorizations: %s; range: %s; columns: %s; iterators: %s; iteratorOptions: %s;";
+  public static final String CAN_SCAN_BATCH_AUDIT_TEMPLATE = "action: scan;"
+      + " targetTable: %s; authorizations: %s; range: %s; columns: %s;"
+      + " iterators: %s; iteratorOptions: %s;";
 
   @Override
   public boolean canScan(TCredentials credentials, String tableId, String namespaceId,
@@ -213,7 +217,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CHANGE_AUTHORIZATIONS_AUDIT_TEMPLATE = "action: changeAuthorizations; targetUser: %s; authorizations: %s";
+  public static final String CHANGE_AUTHORIZATIONS_AUDIT_TEMPLATE = "action:"
+      + " changeAuthorizations; targetUser: %s; authorizations: %s";
 
   @Override
   public void changeAuthorizations(TCredentials credentials, String user,
@@ -227,7 +232,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CHANGE_PASSWORD_AUDIT_TEMPLATE = "action: changePassword; targetUser: %s;";
+  public static final String CHANGE_PASSWORD_AUDIT_TEMPLATE = "action:"
+      + " changePassword; targetUser: %s;";
 
   @Override
   public void changePassword(TCredentials credentials, Credentials newInfo)
@@ -241,7 +247,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CREATE_USER_AUDIT_TEMPLATE = "action: createUser; targetUser: %s; Authorizations: %s;";
+  public static final String CREATE_USER_AUDIT_TEMPLATE = "action: createUser;"
+      + " targetUser: %s; Authorizations: %s;";
 
   @Override
   public void createUser(TCredentials credentials, Credentials newUser,
@@ -255,7 +262,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_CREATE_TABLE_AUDIT_TEMPLATE = "action: createTable; targetTable: %s;";
+  public static final String CAN_CREATE_TABLE_AUDIT_TEMPLATE = "action:"
+      + " createTable; targetTable: %s;";
 
   @Override
   public boolean canCreateTable(TCredentials c, String tableName, String namespaceId)
@@ -270,7 +278,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_DELETE_TABLE_AUDIT_TEMPLATE = "action: deleteTable; targetTable: %s;";
+  public static final String CAN_DELETE_TABLE_AUDIT_TEMPLATE = "action:"
+      + " deleteTable; targetTable: %s;";
 
   @Override
   public boolean canDeleteTable(TCredentials c, String tableId, String namespaceId)
@@ -286,7 +295,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_RENAME_TABLE_AUDIT_TEMPLATE = "action: renameTable; targetTable: %s; newTableName: %s;";
+  public static final String CAN_RENAME_TABLE_AUDIT_TEMPLATE = "action:"
+      + " renameTable; targetTable: %s; newTableName: %s;";
 
   @Override
   public boolean canRenameTable(TCredentials c, String tableId, String oldTableName,
@@ -301,7 +311,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_SPLIT_TABLE_AUDIT_TEMPLATE = "action: splitTable; targetTable: %s; targetNamespace: %s;";
+  public static final String CAN_SPLIT_TABLE_AUDIT_TEMPLATE = "action:"
+      + " splitTable; targetTable: %s; targetNamespace: %s;";
 
   @Override
   public boolean canSplitTablet(TCredentials credentials, String table, String namespaceId)
@@ -316,7 +327,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_PERFORM_SYSTEM_ACTION_AUDIT_TEMPLATE = "action: performSystemAction; principal: %s;";
+  public static final String CAN_PERFORM_SYSTEM_ACTION_AUDIT_TEMPLATE = "action:"
+      + " performSystemAction; principal: %s;";
 
   @Override
   public boolean canPerformSystemActions(TCredentials credentials) throws ThriftSecurityException {
@@ -331,7 +343,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_FLUSH_TABLE_AUDIT_TEMPLATE = "action: flushTable; targetTable: %s; targetNamespace: %s;";
+  public static final String CAN_FLUSH_TABLE_AUDIT_TEMPLATE = "action:"
+      + " flushTable; targetTable: %s; targetNamespace: %s;";
 
   @Override
   public boolean canFlush(TCredentials c, String tableId, String namespaceId)
@@ -346,7 +359,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_ALTER_TABLE_AUDIT_TEMPLATE = "action: alterTable; targetTable: %s; targetNamespace: %s;";
+  public static final String CAN_ALTER_TABLE_AUDIT_TEMPLATE = "action:"
+      + " alterTable; targetTable: %s; targetNamespace: %s;";
 
   @Override
   public boolean canAlterTable(TCredentials c, String tableId, String namespaceId)
@@ -361,7 +375,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_CLONE_TABLE_AUDIT_TEMPLATE = "action: cloneTable; targetTable: %s; newTableName: %s";
+  public static final String CAN_CLONE_TABLE_AUDIT_TEMPLATE = "action:"
+      + " cloneTable; targetTable: %s; newTableName: %s";
 
   @Override
   public boolean canCloneTable(TCredentials c, String tableId, String tableName,
@@ -378,7 +393,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_DELETE_RANGE_AUDIT_TEMPLATE = "action: deleteData; targetTable: %s; startRange: %s; endRange: %s;";
+  public static final String CAN_DELETE_RANGE_AUDIT_TEMPLATE = "action:"
+      + " deleteData; targetTable: %s; startRange: %s; endRange: %s;";
 
   @Override
   public boolean canDeleteRange(TCredentials c, String tableId, String tableName, Text startRow,
@@ -395,7 +411,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_BULK_IMPORT_AUDIT_TEMPLATE = "action: bulkImport; targetTable: %s; dataDir: %s; failDir: %s;";
+  public static final String CAN_BULK_IMPORT_AUDIT_TEMPLATE = "action:"
+      + " bulkImport; targetTable: %s; dataDir: %s; failDir: %s;";
 
   @Override
   public boolean canBulkImport(TCredentials c, String tableId, String tableName, String dir,
@@ -410,7 +427,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_COMPACT_TABLE_AUDIT_TEMPLATE = "action: compactTable; targetTable: %s; targetNamespace: %s;";
+  public static final String CAN_COMPACT_TABLE_AUDIT_TEMPLATE = "action:"
+      + " compactTable; targetTable: %s; targetNamespace: %s;";
 
   @Override
   public boolean canCompact(TCredentials c, String tableId, String namespaceId)
@@ -425,7 +443,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_CHANGE_AUTHORIZATIONS_AUDIT_TEMPLATE = "action: changeAuthorizations; targetUser: %s;";
+  public static final String CAN_CHANGE_AUTHORIZATIONS_AUDIT_TEMPLATE = "action:"
+      + " changeAuthorizations; targetUser: %s;";
 
   @Override
   public boolean canChangeAuthorizations(TCredentials c, String user)
@@ -440,7 +459,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_CHANGE_PASSWORD_AUDIT_TEMPLATE = "action: changePassword; targetUser: %s;";
+  public static final String CAN_CHANGE_PASSWORD_AUDIT_TEMPLATE = "action:"
+      + " changePassword; targetUser: %s;";
 
   @Override
   public boolean canChangePassword(TCredentials c, String user) throws ThriftSecurityException {
@@ -483,7 +503,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_GRANT_SYSTEM_AUDIT_TEMPLATE = "action: grantSystem; targetUser: %s; targetPermission: %s;";
+  public static final String CAN_GRANT_SYSTEM_AUDIT_TEMPLATE = "action:"
+      + " grantSystem; targetUser: %s; targetPermission: %s;";
 
   @Override
   public boolean canGrantSystem(TCredentials c, String user, SystemPermission sysPerm)
@@ -500,7 +521,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_GRANT_TABLE_AUDIT_TEMPLATE = "action: grantTable; targetUser: %s; targetTable: %s; targetNamespace: %s;";
+  public static final String CAN_GRANT_TABLE_AUDIT_TEMPLATE = "action:"
+      + " grantTable; targetUser: %s; targetTable: %s; targetNamespace: %s;";
 
   @Override
   public boolean canGrantTable(TCredentials c, String user, String table, String namespaceId)
@@ -515,7 +537,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_REVOKE_SYSTEM_AUDIT_TEMPLATE = "action: revokeSystem; targetUser: %s;, targetPermission: %s;";
+  public static final String CAN_REVOKE_SYSTEM_AUDIT_TEMPLATE = "action:"
+      + " revokeSystem; targetUser: %s;, targetPermission: %s;";
 
   @Override
   public boolean canRevokeSystem(TCredentials c, String user, SystemPermission sysPerm)
@@ -530,7 +553,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_REVOKE_TABLE_AUDIT_TEMPLATE = "action: revokeTable; targetUser: %s; targetTable %s; targetNamespace: %s;";
+  public static final String CAN_REVOKE_TABLE_AUDIT_TEMPLATE = "action:"
+      + " revokeTable; targetUser: %s; targetTable %s; targetNamespace: %s;";
 
   @Override
   public boolean canRevokeTable(TCredentials c, String user, String table, String namespaceId)
@@ -545,7 +569,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_IMPORT_AUDIT_TEMPLATE = "action: import; targetTable: %s; dataDir: %s;";
+  public static final String CAN_IMPORT_AUDIT_TEMPLATE = "action: import;"
+      + " targetTable: %s; dataDir: %s;";
 
   @Override
   public boolean canImport(TCredentials credentials, String tableName, String importDir,
@@ -561,7 +586,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_EXPORT_AUDIT_TEMPLATE = "action: export; targetTable: %s; dataDir: %s;";
+  public static final String CAN_EXPORT_AUDIT_TEMPLATE = "action: export;"
+      + " targetTable: %s; dataDir: %s;";
 
   @Override
   public boolean canExport(TCredentials credentials, String tableId, String tableName,
@@ -590,7 +616,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String GRANT_SYSTEM_PERMISSION_AUDIT_TEMPLATE = "action: grantSystemPermission; permission: %s; targetUser: %s;";
+  public static final String GRANT_SYSTEM_PERMISSION_AUDIT_TEMPLATE = "action:"
+      + " grantSystemPermission; permission: %s; targetUser: %s;";
 
   @Override
   public void grantSystemPermission(TCredentials credentials, String user,
@@ -604,7 +631,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String GRANT_TABLE_PERMISSION_AUDIT_TEMPLATE = "action: grantTablePermission; permission: %s; targetTable: %s; targetUser: %s;";
+  public static final String GRANT_TABLE_PERMISSION_AUDIT_TEMPLATE = "action:"
+      + " grantTablePermission; permission: %s; targetTable: %s; targetUser: %s;";
 
   @Override
   public void grantTablePermission(TCredentials credentials, String user, String tableId,
@@ -619,7 +647,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String REVOKE_SYSTEM_PERMISSION_AUDIT_TEMPLATE = "action: revokeSystemPermission; permission: %s; targetUser: %s;";
+  public static final String REVOKE_SYSTEM_PERMISSION_AUDIT_TEMPLATE = "action:"
+      + " revokeSystemPermission; permission: %s; targetUser: %s;";
 
   @Override
   public void revokeSystemPermission(TCredentials credentials, String user,
@@ -634,7 +663,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String REVOKE_TABLE_PERMISSION_AUDIT_TEMPLATE = "action: revokeTablePermission; permission: %s; targetTable: %s; targetUser: %s;";
+  public static final String REVOKE_TABLE_PERMISSION_AUDIT_TEMPLATE = "action:"
+      + " revokeTablePermission; permission: %s; targetTable: %s; targetUser: %s;";
 
   @Override
   public void revokeTablePermission(TCredentials credentials, String user, String tableId,
@@ -649,7 +679,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String HAS_SYSTEM_PERMISSION_AUDIT_TEMPLATE = "action: hasSystemPermission; permission: %s; targetUser: %s;";
+  public static final String HAS_SYSTEM_PERMISSION_AUDIT_TEMPLATE = "action:"
+      + " hasSystemPermission; permission: %s; targetUser: %s;";
 
   @Override
   public boolean hasSystemPermission(TCredentials credentials, String user,
@@ -664,7 +695,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_ONLINE_OFFLINE_TABLE_AUDIT_TEMPLATE = "action: %s; targetTable: %s;";
+  public static final String CAN_ONLINE_OFFLINE_TABLE_AUDIT_TEMPLATE = "action:"
+      + " %s; targetTable: %s;";
 
   @Override
   public boolean canOnlineOfflineTable(TCredentials credentials, String tableId, FateOperation op,
@@ -687,7 +719,8 @@ public class AuditedSecurityOperation extends SecurityOperation {
     }
   }
 
-  public static final String CAN_MERGE_TABLE_AUDIT_TEMPLATE = "action: mergeTable; targetTable: %s; targetNamespace: %s;";
+  public static final String CAN_MERGE_TABLE_AUDIT_TEMPLATE = "action:"
+      + " mergeTable; targetTable: %s; targetNamespace: %s;";
 
   @Override
   public boolean canMerge(TCredentials c, String tableId, String namespaceId)
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java b/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
index 032081a..d9d599c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
@@ -128,7 +128,8 @@ public class SecurityOperation {
         || !authenticator.validSecurityHandlers(authorizor, pm)
         || !permHandle.validSecurityHandlers(authent, author))
       throw new RuntimeException(authorizor + ", " + authenticator + ", and " + pm
-          + " do not play nice with eachother. Please choose authentication and authorization mechanisms that are compatible with one another.");
+          + " do not play nice with eachother. Please choose authentication and"
+          + " authorization mechanisms that are compatible with one another.");
 
     isKerberos = KerberosAuthenticator.class.isAssignableFrom(authenticator.getClass());
   }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/UserImpersonation.java b/server/base/src/main/java/org/apache/accumulo/server/security/UserImpersonation.java
index 95ea076..93a3205 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/UserImpersonation.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/UserImpersonation.java
@@ -36,9 +36,13 @@ import org.slf4j.LoggerFactory;
  * users capable of impersonating another user, the users which may be impersonated and the hosts in
  * which the impersonator may issue requests from.
  *
- * <code>INSTANCE_RPC_SASL_PROXYUSERS=rpc_user={allowed_accumulo_users=[...], allowed_client_hosts=[...]</code>
- * <code>INSTANCE_RPC_SASL_ALLOWED_USER_IMPERSONATION=rpc_user:user,user,user;...</code>
- * <code>INSTANCE_RPC_SASL_ALLOWED_HOST_IMPERSONATION=host,host:host...</code>
+ * <pre>
+ * <code>
+ * INSTANCE_RPC_SASL_PROXYUSERS=rpc_user={allowed_accumulo_users=[...], allowed_client_hosts=[...]
+ * INSTANCE_RPC_SASL_ALLOWED_USER_IMPERSONATION=rpc_user:user,user,user;...
+ * INSTANCE_RPC_SASL_ALLOWED_HOST_IMPERSONATION=host,host:host...
+ * </code>
+ * </pre>
  *
  * @see Property#INSTANCE_RPC_SASL_ALLOWED_USER_IMPERSONATION
  * @see Property#INSTANCE_RPC_SASL_ALLOWED_HOST_IMPERSONATION
@@ -216,8 +220,8 @@ public class UserImpersonation {
     }
 
     if (userConfigs.length != hostConfigs.length) {
-      String msg = String.format(
-          "Should have equal number of user and host impersonation elements in configuration. Got %d and %d elements, respectively.",
+      String msg = String.format("Should have equal number of user and host"
+          + " impersonation elements in configuration. Got %d and %d elements," + " respectively.",
           userConfigs.length, hostConfigs.length);
       throw new IllegalArgumentException(msg);
     }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthenticator.java b/server/base/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthenticator.java
index 8ae1cca..3685030 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthenticator.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthenticator.java
@@ -174,9 +174,8 @@ public final class ZKAuthenticator implements Authenticator {
         throw new AccumuloSecurityException(principal, SecurityErrorCode.DEFAULT_SECURITY_ERROR, e);
       }
     } else
-      throw new AccumuloSecurityException(principal, SecurityErrorCode.USER_DOESNT_EXIST); // user
-                                                                                           // doesn't
-                                                                                           // exist
+      // user doesn't exist
+      throw new AccumuloSecurityException(principal, SecurityErrorCode.USER_DOESNT_EXIST);
   }
 
   @Override
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
index 0c68c35..6128324 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
@@ -241,8 +241,10 @@ public class FileUtil {
         return .5;
       }
 
-      List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<SortedKeyValueIterator<Key,Value>>(
-          readers);
+      // @formatter:off
+      List<SortedKeyValueIterator<Key,Value>> iters =
+        new ArrayList<SortedKeyValueIterator<Key,Value>>(readers);
+      // @formatter:on
       MultiIterator mmfi = new MultiIterator(iters, true);
 
       // skip the prevendrow
@@ -324,9 +326,9 @@ public class FileUtil {
 
       if (numKeys == 0) {
         if (useIndex) {
-          log.warn(
-              "Failed to find mid point using indexes, falling back to data files which is slower. No entries between "
-                  + prevEndRow + " and " + endRow + " for " + mapFiles);
+          log.warn("Failed to find mid point using indexes, falling back to"
+              + " data files which is slower. No entries between " + prevEndRow + " and " + endRow
+              + " for " + mapFiles);
           // need to pass original map files, not possibly reduced indexes
           return findMidPoint(fs, acuConf, prevEndRow, endRow, origMapFiles, minSplit, false);
         }
@@ -334,8 +336,10 @@ public class FileUtil {
             + endRow + " for " + mapFiles);
       }
 
-      List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<SortedKeyValueIterator<Key,Value>>(
-          readers);
+      // @formatter:off
+      List<SortedKeyValueIterator<Key,Value>> iters =
+        new ArrayList<SortedKeyValueIterator<Key,Value>>(readers);
+      // @formatter:on
       MultiIterator mmfi = new MultiIterator(iters, true);
 
       // skip the prevendrow
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/SendLogToChainsaw.java b/server/base/src/main/java/org/apache/accumulo/server/util/SendLogToChainsaw.java
index 2c9b2c4..67fe7be 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/SendLogToChainsaw.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/SendLogToChainsaw.java
@@ -56,9 +56,9 @@ import com.beust.jcommander.Parameter;
 
 public class SendLogToChainsaw extends XMLLayout {
 
-  private static Pattern logPattern = Pattern.compile(
-      "^(\\d\\d)\\s(\\d\\d):(\\d\\d):(\\d\\d),(\\d\\d\\d)\\s\\[(.*)\\]\\s(TRACE|DEBUG|INFO|WARN|FATAL|ERROR)\\s*?:(.*)$",
-      Pattern.UNIX_LINES);
+  private static Pattern logPattern = Pattern
+      .compile("^(\\d\\d)\\s(\\d\\d):(\\d\\d):(\\d\\d),(\\d\\d\\d)\\s\\[(.*)\\]\\s"
+          + "(TRACE|DEBUG|INFO|WARN|FATAL|ERROR)\\s*?:(.*)$", Pattern.UNIX_LINES);
 
   private File[] logFiles = null;
 
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/FileSystemMonitorTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/FileSystemMonitorTest.java
index 9bffbb1..dac2673 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/FileSystemMonitorTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/FileSystemMonitorTest.java
@@ -46,7 +46,8 @@ public class FileSystemMonitorTest {
         "devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0",
         "tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0",
         "tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0",
-        "cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0",
+        "cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,"
+            + "release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0",
         "pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0",
         "cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0",
         "cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpuacct,cpu 0 0",
@@ -62,7 +63,8 @@ public class FileSystemMonitorTest {
         "/dev/vda2 /ignoreme reiserfs rw 0 0",
         "rpc_pipefs /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0",
         "selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0",
-        "systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=32,pgrp=1,timeout=300,minproto=5,maxproto=5,direct 0 0",
+        "systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=32,pgrp=1,"
+            + "timeout=300,minproto=5,maxproto=5,direct 0 0",
         "debugfs /sys/kernel/debug debugfs rw,relatime 0 0",
         "mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0",
         "hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0",
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
index e01e22c..0aeaaea 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
@@ -273,8 +273,8 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
             .substring(MetadataSchema.DeletesSection.getRowPrefix().length());
         result.add(cand);
         if (almostOutOfMemory(Runtime.getRuntime())) {
-          log.info(
-              "List of delete candidates has exceeded the memory threshold. Attempting to delete what has been gathered so far.");
+          log.info("List of delete candidates has exceeded the memory"
+              + " threshold. Attempting to delete what has been gathered so far.");
           return true;
         }
       }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/Master.java b/server/master/src/main/java/org/apache/accumulo/master/Master.java
index 1f6871e..9414b98 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/Master.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/Master.java
@@ -234,14 +234,14 @@ public class Master extends AccumuloServerContext
   static final boolean O = false;
   // @formatter:off
   static final boolean transitionOK[][] = {
-      //                              INITIAL HAVE_LOCK SAFE_MODE NORMAL UNLOAD_META UNLOAD_ROOT STOP
-      /* INITIAL */                   {X,     X,        O,        O,      O,         O,          X},
-      /* HAVE_LOCK */                 {O,     X,        X,        X,      O,         O,          X},
-      /* SAFE_MODE */                 {O,     O,        X,        X,      X,         O,          X},
-      /* NORMAL */                    {O,     O,        X,        X,      X,         O,          X},
-      /* UNLOAD_METADATA_TABLETS */   {O,     O,        X,        X,      X,         X,          X},
-      /* UNLOAD_ROOT_TABLET */        {O,     O,        O,        X,      X,         X,          X},
-      /* STOP */                      {O,     O,        O,        O,      O,         X,          X}};
+      //                            INITIAL HAVE_LOCK SAFE_MODE NORMAL UNLOAD_META UNLOAD_ROOT STOP
+      /* INITIAL */                 {X,     X,        O,        O,      O,         O,          X},
+      /* HAVE_LOCK */               {O,     X,        X,        X,      O,         O,          X},
+      /* SAFE_MODE */               {O,     O,        X,        X,      X,         O,          X},
+      /* NORMAL */                  {O,     O,        X,        X,      X,         O,          X},
+      /* UNLOAD_METADATA_TABLETS */ {O,     O,        X,        X,      X,         X,          X},
+      /* UNLOAD_ROOT_TABLET */      {O,     O,        O,        X,      X,         X,          X},
+      /* STOP */                    {O,     O,        O,        O,      O,         X,          X}};
   //@formatter:on
   synchronized void setMasterState(MasterState newState) {
     if (state.equals(newState))
@@ -327,8 +327,9 @@ public class Master extends AccumuloServerContext
       // the upgrade.
       // Change to Guava's Verify once we use Guava 17.
       if (null != fate) {
-        throw new IllegalStateException(
-            "Access to Fate should not have been initialized prior to the Master transitioning to active. Please save all logs and file a bug.");
+        throw new IllegalStateException("Access to Fate should not have been"
+            + " initialized prior to the Master transitioning to active. Please"
+            + " save all logs and file a bug.");
       }
       Accumulo.abortIfFateTransactions();
       try {
@@ -493,12 +494,14 @@ public class Master extends AccumuloServerContext
         // Fate still hasn't been started.
         // Change both to use Guava's Verify once we use Guava 17.
         if (!haveUpgradedZooKeeper) {
-          throw new IllegalStateException(
-              "We should only attempt to upgrade Accumulo's metadata table if we've already upgraded ZooKeeper. Please save all logs and file a bug.");
+          throw new IllegalStateException("We should only attempt to upgrade"
+              + " Accumulo's metadata table if we've already upgraded ZooKeeper."
+              + " Please save all logs and file a bug.");
         }
         if (null != fate) {
-          throw new IllegalStateException(
-              "Access to Fate should not have been initialized prior to the Master finishing upgrades. Please save all logs and file a bug.");
+          throw new IllegalStateException("Access to Fate should not have been"
+              + " initialized prior to the Master finishing upgrades. Please save"
+              + " all logs and file a bug.");
         }
         Runnable upgradeTask = new Runnable() {
           int version = accumuloPersistentVersion;
@@ -1044,9 +1047,8 @@ public class Master extends AccumuloServerContext
               }
           }
         } catch (Throwable t) {
-          log.error(
-              "Error occurred reading / switching master goal state. Will continue with attempt to update status",
-              t);
+          log.error("Error occurred reading / switching master goal state. Will"
+              + " continue with attempt to update status", t);
         }
 
         try {
@@ -1352,9 +1354,12 @@ public class Master extends AccumuloServerContext
 
     // Start the replication coordinator which assigns tservers to service replication requests
     MasterReplicationCoordinator impl = new MasterReplicationCoordinator(this);
-    ReplicationCoordinator.Processor<ReplicationCoordinator.Iface> replicationCoordinatorProcessor = new ReplicationCoordinator.Processor<>(
-        RpcWrapper.service(impl,
-            new ReplicationCoordinator.Processor<ReplicationCoordinator.Iface>(impl)));
+    // @formatter:off
+    ReplicationCoordinator.Processor<ReplicationCoordinator.Iface> replicationCoordinatorProcessor =
+      new ReplicationCoordinator.Processor<>(
+    // @formatter:on
+            RpcWrapper.service(impl,
+                new ReplicationCoordinator.Processor<ReplicationCoordinator.Iface>(impl)));
     ServerAddress replAddress = TServerUtils.startServer(this, hostname,
         Property.MASTER_REPLICATION_COORDINATOR_PORT, replicationCoordinatorProcessor,
         "Master Replication Coordinator", "Replication Coordinator", null,
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssigner.java b/server/master/src/main/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssigner.java
index 3d942c6..d6f31e7 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssigner.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssigner.java
@@ -168,9 +168,8 @@ public abstract class DistributedWorkQueueWorkAssigner implements WorkAssigner {
       // continuing
       // to add more work entries
       if (getQueueSize() > maxQueueSize) {
-        log.warn(
-            "Queued replication work exceeds configured maximum ({}), sleeping to allow work to occur",
-            maxQueueSize);
+        log.warn("Queued replication work exceeds configured maximum ({}),"
+            + " sleeping to allow work to occur", maxQueueSize);
         return;
       }
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/FinishedWorkUpdater.java b/server/master/src/main/java/org/apache/accumulo/master/replication/FinishedWorkUpdater.java
index 3dba90b..4495aae 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/FinishedWorkUpdater.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/FinishedWorkUpdater.java
@@ -163,9 +163,8 @@ public class FinishedWorkUpdater implements Runnable {
           try {
             replBw.addMutation(replMutation);
           } catch (MutationsRejectedException e) {
-            log.error(
-                "Error writing mutations to update replication Status messages in StatusSection, will retry",
-                e);
+            log.error("Error writing mutations to update replication Status"
+                + " messages in StatusSection, will retry", e);
             return;
           }
         }
@@ -178,9 +177,8 @@ public class FinishedWorkUpdater implements Runnable {
       try {
         replBw.close();
       } catch (MutationsRejectedException e) {
-        log.error(
-            "Error writing mutations to update replication Status messages in StatusSection, will retry",
-            e);
+        log.error("Error writing mutations to update replication Status"
+            + " messages in StatusSection, will retry", e);
       }
     }
   }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/RemoveCompleteReplicationRecords.java b/server/master/src/main/java/org/apache/accumulo/master/replication/RemoveCompleteReplicationRecords.java
index d72decd..aa6cc4d 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/RemoveCompleteReplicationRecords.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/RemoveCompleteReplicationRecords.java
@@ -71,12 +71,11 @@ public class RemoveCompleteReplicationRecords implements Runnable {
       bw = ReplicationTable.getBatchWriter(conn);
 
       if (bs == null || bw == null)
-        throw new AssertionError(
-            "Inconceivable; an exception should have been thrown, but 'bs' or 'bw' was null instead");
+        throw new AssertionError("Inconceivable; an exception should have been"
+            + " thrown, but 'bs' or 'bw' was null instead");
     } catch (ReplicationTableOfflineException e) {
-      log.debug(
-          "Not attempting to remove complete replication records as the table ({}) isn't yet online",
-          ReplicationTable.NAME);
+      log.debug("Not attempting to remove complete replication records as the"
+          + " table ({}) isn't yet online", ReplicationTable.NAME);
       return;
     }
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java b/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java
index 2691fca..c4c2ea4 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java
@@ -73,8 +73,9 @@ public class FateAdmin {
       System.exit(1);
     }
 
-    System.err.printf(
-        "This tool has been deprecated%nFATE administration now available within 'accumulo shell'%n$ fate fail <txid>... | delete <txid>... | print [<txid>...]%n%n");
+    System.err.printf("This tool has been deprecated%nFATE administration now"
+        + " available within 'accumulo shell'%n$ fate fail <txid>... | delete"
+        + " <txid>... | print [<txid>...]%n%n");
 
     AdminUtil<Master> admin = new AdminUtil<>();
 
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/BasicServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/BasicServlet.java
index 48d0129..af90b6e 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/BasicServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/BasicServlet.java
@@ -146,12 +146,12 @@ abstract public class BasicServlet extends HttpServlet {
     sb.append("<link rel='stylesheet' type='text/css' href='/web/screen.css' media='screen' />\n");
     sb.append("<script src='/web/functions.js' type='text/javascript'></script>\n");
 
-    sb.append(
-        "<!--[if lte IE 8]><script language=\"javascript\" type=\"text/javascript\" src=\"/web/flot/excanvas.js\"></script><![endif]-->\n");
-    sb.append(
-        "<script language=\"javascript\" type=\"text/javascript\" src=\"/web/flot/jquery.js\"></script>\n");
-    sb.append(
-        "<script language=\"javascript\" type=\"text/javascript\" src=\"/web/flot/jquery.flot.js\"></script>\n");
+    sb.append("<!--[if lte IE 8]><script language=\"javascript\" type=\"text/javascript\""
+        + " src=\"/web/flot/excanvas.js\"></script><![endif]-->\n");
+    sb.append("<script language=\"javascript\" type=\"text/javascript\""
+        + " src=\"/web/flot/jquery.js\"></script>\n");
+    sb.append("<script language=\"javascript\" type=\"text/javascript\""
+        + " src=\"/web/flot/jquery.flot.js\"></script>\n");
 
     sb.append("</head>\n");
 
@@ -203,9 +203,8 @@ abstract public class BasicServlet extends HttpServlet {
           + ")</a></span></span><br />\n");
     int numProblems = Monitor.getProblemSummary().entrySet().size();
     if (numProblems > 0)
-      sb.append(
-          "<span class='error'><a href='/problems'>Table&nbsp;Problems&nbsp;<span class='smalltext'>("
-              + numProblems + ")</a></span></span><br />\n");
+      sb.append("<span class='error'><a href='/problems'>Table&nbsp;Problems&nbsp;"
+          + "<span class='smalltext'>(" + numProblems + ")</a></span></span><br />\n");
     sb.append("<hr />\n");
     sb.append("<a href='/xml'>XML</a><br />\n");
     sb.append("<a href='/json'>JSON</a><hr />\n");
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/MasterServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/MasterServlet.java
index 58ec6be..08adce4 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/MasterServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/MasterServlet.java
@@ -163,7 +163,8 @@ public class MasterServlet extends BasicServlet {
               + " Note that deleted records are \"inserted\" and will make the ingest "
               + "rate increase in the near-term.");
       masterStatus.addSortableColumn("Entries<br />Read", new NumberType<Long>(),
-          "The total number of Key/Value pairs read on the server side.  Not all may be returned because of filtering.");
+          "The total number of Key/Value pairs read on the server side. Not"
+              + " all may be returned because of filtering.");
       masterStatus.addSortableColumn("Entries<br />Returned", new NumberType<Long>(),
           "The total number of Key/Value pairs returned as a result of scans.");
       masterStatus.addSortableColumn("Hold&nbsp;Time", new DurationType(0l, 0l),
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ProblemServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ProblemServlet.java
index 0f3eb7f..2075c38 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ProblemServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ProblemServlet.java
@@ -192,7 +192,8 @@ public class ProblemServlet extends BasicServlet {
         return "-";
       ProblemReport p = (ProblemReport) obj;
       return String.format(
-          "<a href='/op?table=%s&action=clearProblem&redir=%s&resource=%s&ptype=%s'>clear this problem</a>",
+          "<a href='/op?table=%s&action=clearProblem&redir=%s"
+              + "&resource=%s&ptype=%s'>clear this problem</a>",
           encode(p.getTableName()), currentPage, encode(p.getResource()),
           encode(p.getProblemType().name()));
     }
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ShellServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ShellServlet.java
index 4f140e5..13b0759 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ShellServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/ShellServlet.java
@@ -113,8 +113,8 @@ public class ShellServlet extends BasicServlet {
     sb.append("<div id='shell'>\n");
     sb.append("<pre id='shellResponse'>").append(shellThread.getOutput()).append("</pre>\n");
     sb.append("<form><span id='shellPrompt'>").append(shellThread.getPrompt());
-    sb.append(
-        "</span><input type='text' name='cmd' id='cmd' onkeydown='return handleKeyDown(event.keyCode);'>\n");
+    sb.append("</span><input type='text' name='cmd' id='cmd' "
+        + "onkeydown='return handleKeyDown(event.keyCode);'>\n");
     sb.append("</form>\n</div>\n");
     sb.append("<script type='text/javascript'>\n");
     sb.append("var url = '").append(req.getRequestURL().toString()).append("';\n");
@@ -148,8 +148,8 @@ public class ShellServlet extends BasicServlet {
     sb.append("\n");
     sb.append("function submitCmd(cmd) {\n");
     sb.append("  if (cmd=='history') {\n");
-    sb.append(
-        "    document.getElementById('shellResponse').innerHTML += document.getElementById('shellPrompt').innerHTML+cmd+'\\n';\n");
+    sb.append("    document.getElementById('shellResponse').innerHTML += "
+        + "document.getElementById('shellPrompt').innerHTML+cmd+'\\n';\n");
     sb.append("    document.getElementById('shellResponse').innerHTML += history.join('\\n');\n");
     sb.append("    return\n");
     sb.append("  }\n");
@@ -174,15 +174,15 @@ public class ShellServlet extends BasicServlet {
         "    document.getElementById('shellResponse').innerHTML += text.substring(0,index+1);\n");
     sb.append("    document.getElementById('shellPrompt').innerHTML = text.substring(index+1);\n");
     sb.append("    document.getElementById('cmd').value = '';\n");
-    sb.append(
-        "    document.getElementById('shell').scrollTop = document.getElementById('cmd').offsetTop;\n");
+    sb.append("    document.getElementById('shell').scrollTop ="
+        + " document.getElementById('cmd').offsetTop;\n");
     sb.append("  } else {\n");
     sb.append("    window.location = url;\n");
     sb.append("  }\n");
     sb.append("}\n");
     sb.append("</script>\n");
-    sb.append(
-        "<script type='text/javascript'>window.onload = function() { document.getElementById('cmd').select(); }</script>\n");
+    sb.append("<script type='text/javascript'>window.onload = "
+        + "function() { document.getElementById('cmd').select(); }</script>\n");
   }
 
   @Override
@@ -232,8 +232,8 @@ public class ShellServlet extends BasicServlet {
   }
 
   private String authenticationForm(String requestURI, String csrfToken) {
-    return "<div id='login'><form method=POST action='" + requestURI + "'>"
-        + "<table><tr><td>Mock:&nbsp</td><td><input type='checkbox' name='mock' value='mock'></td></tr>"
+    return "<div id='login'><form method=POST action='" + requestURI + "'><table>"
+        + "<tr><td>Mock:&nbsp</td><td><input type='checkbox' name='mock' value='mock'></td></tr>"
         + "<tr><td>Username:&nbsp;</td><td><input type='text' name='user'></td></tr>"
         + "<tr><td>Password:&nbsp;</td><td><input type='password' name='pass'></td><td>"
         + "<input type='hidden' name='" + CSRF_KEY + "' value='" + csrfToken
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
index f5098f9..e443e04 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
@@ -104,8 +104,8 @@ public class TServersServlet extends BasicServlet {
         tservers.addAll(Monitor.getMmi().tServerInfo);
 
       Table tServerList = new Table("tservers", "Tablet&nbsp;Servers");
-      tServerList.setSubCaption(
-          "Click on the <span style='color: #0000ff;'>server address</span> to view detailed performance statistics for that server.");
+      tServerList.setSubCaption("Click on the <span style='color: #0000ff;'>server address</span>"
+          + " to view detailed performance statistics for that server.");
 
       doTserverList(req, sb, tservers, null, tServerList);
       return;
@@ -374,17 +374,22 @@ public class TServersServlet extends BasicServlet {
     tServerList.addSortableColumn("Running<br />Scans", new CompactionsType("scans"),
         "The number of scans running and queued on this tablet server.");
     tServerList.addSortableColumn("Minor<br />Compactions", new CompactionsType("minor"),
-        "The number of minor compactions running and (queued waiting for resources). Minor compactions are the operations where entries are flushed from memory to disk.");
+        "The number of minor compactions running and (queued waiting for"
+            + " resources). Minor compactions are the operations where entries are"
+            + " flushed from memory to disk.");
     tServerList.addSortableColumn("Major<br />Compactions", new CompactionsType("major"),
-        "The number of major compactions running and (queued waiting for resources). "
-            + "Major compactions are the operations where many smaller files are grouped into a larger file, eliminating duplicates and cleaning up deletes.");
+        "The number of major compactions running and (queued waiting for"
+            + " resources). Major compactions are the operations where many smaller"
+            + " files are grouped into a larger file, eliminating duplicates and"
+            + " cleaning up deletes.");
     tServerList.addSortableColumn("Index Cache<br />Hit Rate", new PercentageType(),
         "The recent index cache hit rate.");
     tServerList.addSortableColumn("Data Cache<br />Hit Rate", new PercentageType(),
         "The recent data cache hit rate.");
     tServerList.addSortableColumn("OS&nbsp;Load",
         new NumberType<>(0., guessHighLoad * 1., 0., guessHighLoad * 3.),
-        "The Unix one minute load average. The average number of processes in the run queue over a one minute interval.");
+        "The Unix one minute load average. The average number of processes in"
+            + " the run queue over a one minute interval.");
 
     log.debug("tableId: " + tableId);
     for (TabletServerStatus status : tservers) {
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java
index 6c72b10..5bcc9f9 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java
@@ -98,13 +98,17 @@ public class TablesServlet extends BasicServlet {
     tableList.addSortableColumn("Ingest", new NumberType<Long>(),
         "The number of Key/Value pairs inserted.  Note that deletes are 'inserted'.");
     tableList.addSortableColumn("Entries<br />Read", new NumberType<Long>(),
-        "The number of Key/Value pairs read on the server side.  Not all key values read may be returned to client because of filtering.");
+        "The number of Key/Value pairs read on the server side. Not all key"
+            + " values read may be returned to client because of filtering.");
     tableList.addSortableColumn("Entries<br />Returned", new NumberType<Long>(),
-        "The number of Key/Value pairs returned to clients during queries.  This is <b>not</b> the number of scans.");
+        "The number of Key/Value pairs returned to clients during queries."
+            + " This is <b>not</b> the number of scans.");
     tableList.addSortableColumn("Hold&nbsp;Time", new DurationType(0l, 0l),
-        "The amount of time that ingest operations are suspended while waiting for data to be written to disk.");
+        "The amount of time that ingest operations are suspended while waiting"
+            + " for data to be written to disk.");
     tableList.addSortableColumn("Running<br />Scans", new CompactionsType("scans"),
-        "Information about the scans threads.  Shows how many threads are running and how much work is queued for the threads.");
+        "Information about the scans threads. Shows how many threads are"
+            + " running and how much work is queued for the threads.");
     tableList.addSortableColumn("Minor<br />Compactions", new CompactionsType("minor"),
         "Flushing memory to disk is called a \"minor compaction.\" "
             + "Multiple tablets can be minor compacted simultaneously, but " + ""
@@ -113,9 +117,10 @@ public class TablesServlet extends BasicServlet {
             + "indicated using parentheses. So <tt> 2 (3)</tt> indicates there are "
             + "two compactions running and three queued waiting for resources.");
     tableList.addSortableColumn("Major<br />Compactions", new CompactionsType("major"),
-        "Gathering up many small files and rewriting them as one larger file is called a 'Major Compaction'. "
-            + "Major Compactions are performed as a consequence of new files created from Minor Compactions and Bulk Load operations.  "
-            + "They reduce the number of files used during queries.");
+        "Gathering up many small files and rewriting them as one larger file is"
+            + " called a 'Major Compaction'. Major Compactions are performed as a"
+            + " consequence of new files created from Minor Compactions and Bulk Load"
+            + " operations. They reduce the number of files used during queries.");
     SortedMap<String,TableInfo> tableStats = new TreeMap<>();
 
     if (Monitor.getMmi() != null && Monitor.getMmi().tableMap != null)
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/VisServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/VisServlet.java
index 626aa3e..75209c6 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/VisServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/VisServlet.java
@@ -171,21 +171,19 @@ public class VisServlet extends BasicServlet {
     sb.append("<div class='left'>\n");
     sb.append("<div id='parameters' class='nowrap'>\n");
     // shape select box
-    sb.append(
-        "<span class='viscontrol'>Shape: <select id='shape' onchange='setShape(this)'><option>Circles</option><option")
-        .append(!cfg.useCircles ? " selected='true'" : "")
+    sb.append("<span class='viscontrol'>Shape: <select id='shape' onchange='setShape(this)'>"
+        + "<option>Circles</option><option").append(!cfg.useCircles ? " selected='true'" : "")
         .append(">Squares</option></select></span>\n");
     // size select box
-    sb.append(
-        "&nbsp;&nbsp<span class='viscontrol'>Size: <select id='size' onchange='setSize(this)'><option")
-        .append(cfg.spacing == 10 ? " selected='true'" : "").append(">10</option><option")
-        .append(cfg.spacing == 20 ? " selected='true'" : "").append(">20</option><option")
-        .append(cfg.spacing == 40 ? " selected='true'" : "").append(">40</option><option")
-        .append(cfg.spacing == 80 ? " selected='true'" : "")
+    sb.append("&nbsp;&nbsp<span class='viscontrol'>Size: <select id='size' "
+        + "onchange='setSize(this)'><option").append(cfg.spacing == 10 ? " selected='true'" : "")
+        .append(">10</option><option").append(cfg.spacing == 20 ? " selected='true'" : "")
+        .append(">20</option><option").append(cfg.spacing == 40 ? " selected='true'" : "")
+        .append(">40</option><option").append(cfg.spacing == 80 ? " selected='true'" : "")
         .append(">80</option></select></span>\n");
     // motion select box
-    sb.append(
-        "&nbsp;&nbsp<span class='viscontrol'>Motion: <select id='motion' onchange='setMotion(this)'>");
+    sb.append("&nbsp;&nbsp<span class='viscontrol'>"
+        + "Motion: <select id='motion' onchange='setMotion(this)'>");
     sb.append("<option selected='true'></option>");
     addOptions(sb, null);
     sb.append("</select></span>\n");
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/Basic.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/Basic.java
index e6be664..30d7943 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/Basic.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/Basic.java
@@ -162,8 +162,8 @@ abstract class Basic extends BasicServlet {
           conn.securityOperations().getUserAuthorizations(principal));
       return scanner;
     } catch (AccumuloSecurityException ex) {
-      sb.append(
-          "<h2>Unable to read trace table: check trace username and password configuration.</h2>\n");
+      sb.append("<h2>Unable to read trace table: check trace username "
+          + "and password configuration.</h2>\n");
       return null;
     } catch (TableNotFoundException ex) {
       return new NullScanner();
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/ShowTrace.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/ShowTrace.java
index 992bedd..8970dd0 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/ShowTrace.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/ShowTrace.java
@@ -126,8 +126,8 @@ public class ShowTrace extends Basic {
     sb.append(
         String.format("<span class='table-caption'>Trace %s started at<br>%s</span></caption>", id,
             dateString(start)));
-    sb.append(
-        "<tr><th>Time</th><th>Start</th><th>Service@Location</th><th>Name</th><th>Addl Data</th></tr>");
+    sb.append("<tr><th>Time</th><th>Start</th><th>Service@Location</th>"
+        + "<th>Name</th><th>Addl Data</th></tr>");
 
     final long finalStart = start;
     Set<Long> visited = tree.visit(new SpanTreeVisitor() {
@@ -139,8 +139,8 @@ public class ShowTrace extends Basic {
     });
     tree.nodes.keySet().removeAll(visited);
     if (!tree.nodes.isEmpty()) {
-      sb.append(
-          "<tr><td colspan=10>The following spans are not rooted (probably due to a parent span of length 0ms):<td></tr>\n");
+      sb.append("<tr><td colspan=10>The following spans are not rooted"
+          + " (probably due to a parent span of length 0ms):<td></tr>\n");
       for (RemoteSpan span : TraceDump.sortByStart(tree.nodes.values())) {
         appendRow(sb, 0, span, finalStart);
       }
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/Summary.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/Summary.java
index 9cf07eb..a034d3f 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/Summary.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/trace/Summary.java
@@ -189,7 +189,9 @@ public class Summary extends Basic {
     trace.addSortableColumn("max", new DurationType(), "Longest span duration");
     trace.addSortableColumn("avg", new DurationType(), "Average span duration");
     trace.addSortableColumn("Histogram", new HistogramType(),
-        "Counts of spans of different duration. Columns start at milliseconds, and each column is ten times longer: tens of milliseconds, seconds, tens of seconds, etc.");
+        "Counts of spans of different duration. Columns start at milliseconds,"
+            + " and each column is ten times longer: tens of milliseconds, seconds,"
+            + " tens of seconds, etc.");
 
     for (Entry<String,Stats> entry : summary.entrySet()) {
       Stats stat = entry.getValue();
diff --git a/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java b/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java
index e2e5cfe..7c3d4e7 100644
--- a/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java
+++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/AsyncSpanReceiver.java
@@ -173,7 +173,8 @@ public abstract class AsyncSpanReceiver<SpanKey,Destination> implements SpanRece
         if (now - lastNotificationOfDroppedSpans > 60 * 1000) {
           log.warn("Tracing spans are being dropped because there are already " + maxQueueSize
               + " spans queued for delivery.\n"
-              + "This does not affect performance, security or data integrity, but distributed tracing information is being lost.");
+              + "This does not affect performance, security or data integrity,"
+              + " but distributed tracing information is being lost.");
           lastNotificationOfDroppedSpans = now;
         }
         return;
diff --git a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java
index 099e3e5..c32c59d 100644
--- a/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java
+++ b/server/tracer/src/main/java/org/apache/accumulo/tracer/TraceServer.java
@@ -172,9 +172,8 @@ public class TraceServer implements Watcher {
         if (timeMutation != null)
           writer.addMutation(timeMutation);
       } catch (MutationsRejectedException exception) {
-        log.warn(
-            "Unable to write mutation to table; discarding span. set log level to DEBUG for span information and stacktrace. cause: "
-                + exception);
+        log.warn("Unable to write mutation to table; discarding span. set log"
+            + " level to DEBUG for span information and stacktrace. cause: " + exception);
         if (log.isDebugEnabled()) {
           log.debug("discarded span due to rejection of mutation: " + spanMutation, exception);
         }
@@ -183,9 +182,8 @@ public class TraceServer implements Watcher {
          * mutation to a writer that has been closed since we retrieved it
          */
       } catch (RuntimeException exception) {
-        log.warn(
-            "Unable to write mutation to table; discarding span. set log level to DEBUG for stacktrace. cause: "
-                + exception);
+        log.warn("Unable to write mutation to table; discarding span. set log"
+            + " level to DEBUG for stacktrace. cause: " + exception);
         log.debug("unable to write mutation to table due to exception.", exception);
       }
     }
@@ -319,16 +317,14 @@ public class TraceServer implements Watcher {
         }
       }
     } catch (MutationsRejectedException exception) {
-      log.warn(
-          "Problem flushing traces, resetting writer. Set log level to DEBUG to see stacktrace. cause: "
-              + exception);
+      log.warn("Problem flushing traces, resetting writer. Set log level to"
+          + " DEBUG to see stacktrace. cause: " + exception);
       log.debug("flushing traces failed due to exception", exception);
       resetWriter();
       /* XXX e.g. if the writer was closed between when we grabbed it and when we called flush. */
     } catch (RuntimeException exception) {
-      log.warn(
-          "Problem flushing traces, resetting writer. Set log level to DEBUG to see stacktrace. cause: "
-              + exception);
+      log.warn("Problem flushing traces, resetting writer. Set log level to"
+          + " DEBUG to see stacktrace. cause: " + exception);
       log.debug("flushing traces failed due to exception", exception);
       resetWriter();
     }
@@ -340,9 +336,8 @@ public class TraceServer implements Watcher {
       writer = connector.createBatchWriter(tableName,
           new BatchWriterConfig().setMaxLatency(BATCH_WRITER_MAX_LATENCY, TimeUnit.SECONDS));
     } catch (Exception ex) {
-      log.warn(
-          "Unable to create a batch writer, will retry. Set log level to DEBUG to see stacktrace. cause: "
-              + ex);
+      log.warn("Unable to create a batch writer, will retry. Set log level to"
+          + " DEBUG to see stacktrace. cause: " + ex);
       log.debug("batch writer creation failed with exception.", ex);
     } finally {
       /* Trade in the new writer (even if null) for the one we need to close. */
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
index 1eb6f4e..173e953 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
@@ -110,8 +110,10 @@ public class InMemoryMap {
   public static final String TYPE_LOCALITY_GROUP_MAP = "LocalityGroupMap";
   public static final String TYPE_LOCALITY_GROUP_MAP_NATIVE = "LocalityGroupMap with native";
 
-  private AtomicReference<Pair<SamplerConfigurationImpl,Sampler>> samplerRef = new AtomicReference<>(
-      null);
+  // @formatter:off
+  private AtomicReference<Pair<SamplerConfigurationImpl,Sampler>> samplerRef =
+    new AtomicReference<>(null);
+  // @formatter:on
 
   private AccumuloConfiguration config;
 
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
index 9e18f68..472b414 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
@@ -149,6 +149,7 @@ import org.apache.accumulo.core.util.ServerServices.Service;
 import org.apache.accumulo.core.util.SimpleThreadPool;
 import org.apache.accumulo.core.util.ratelimit.RateLimiter;
 import org.apache.accumulo.core.util.ratelimit.SharedRateLimiterFactory;
+import org.apache.accumulo.core.util.ratelimit.SharedRateLimiterFactory.RateProvider;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.util.LoggingRunnable;
 import org.apache.accumulo.fate.util.Retry;
@@ -811,7 +812,8 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
 
       if (log.isDebugEnabled()) {
         log.debug(String.format(
-            "MultiScanSess %s %,d entries in %.2f secs (lookup_time:%.2f secs tablets:%,d ranges:%,d) ",
+            "MultiScanSess %s %,d entries in %.2f secs"
+                + " (lookup_time:%.2f secs tablets:%,d ranges:%,d) ",
             TServerUtils.clientAddress.get(), session.numEntries, (t2 - session.startTime) / 1000.0,
             session.totalLookupTime / 1000.0, session.numTablets, session.numRanges));
       }
@@ -1036,9 +1038,8 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
             } catch (FSError ex) { // happens when DFS is localFS
               log.warn("logging mutations failed, retrying");
             } catch (Throwable t) {
-              log.error(
-                  "Unknown exception logging mutations, counts for mutations in flight not decremented!",
-                  t);
+              log.error("Unknown exception logging mutations, counts"
+                  + " for mutations in flight not decremented!", t);
               throw new RuntimeException(t);
             }
           }
@@ -1360,9 +1361,8 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
           } catch (FSError ex) { // happens when DFS is localFS
             log.warn("logging mutations failed, retrying");
           } catch (Throwable t) {
-            log.error(
-                "Unknown exception logging mutations, counts for mutations in flight not decremented!",
-                t);
+            log.error("Unknown exception logging mutations, counts for"
+                + " mutations in flight not decremented!", t);
             throw new RuntimeException(t);
           }
         }
@@ -1606,9 +1606,8 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
         log.warn("Got " + request + " message from unauthenticatable user: " + e.getUser());
         if (getCredentials().getToken().getClass().getName()
             .equals(credentials.getTokenClassName())) {
-          log.error(
-              "Got message from a service with a mismatched configuration. Please ensure a compatible configuration.",
-              e);
+          log.error("Got message from a service with a mismatched configuration."
+              + " Please ensure a compatible configuration.", e);
         }
         throw e;
       }
@@ -1952,9 +1951,9 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
     public void removeLogs(TInfo tinfo, TCredentials credentials, List<String> filenames)
         throws TException {
       log.warn("Garbage collector is attempting to remove logs through the tablet server");
-      log.warn(
-          "This is probably because your file Garbage Collector is an older version than your tablet servers.\n"
-              + "Restart your file Garbage Collector.");
+      log.warn("This is probably because your file"
+          + " Garbage Collector is an older version than your tablet servers.\n"
+          + "Restart your file Garbage Collector.");
     }
   }
 
@@ -2530,8 +2529,10 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
         new ReplicationServicer.Processor<ReplicationServicer.Iface>(handler));
     ReplicationServicer.Iface repl = TCredentialsUpdatingWrapper.service(rpcProxy,
         handler.getClass(), getConfiguration());
-    ReplicationServicer.Processor<ReplicationServicer.Iface> processor = new ReplicationServicer.Processor<>(
-        repl);
+    // @formatter:off
+    ReplicationServicer.Processor<ReplicationServicer.Iface> processor =
+      new ReplicationServicer.Processor<>(repl);
+    // @formatter:on
     AccumuloConfiguration conf = getServerConfigurationFactory().getConfiguration();
     Property maxMessageSizeProperty = (conf.get(Property.TSERV_MAX_MESSAGE_SIZE) != null
         ? Property.TSERV_MAX_MESSAGE_SIZE
@@ -2573,8 +2574,8 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
         zoo.putPersistentData(zPath, new byte[] {}, NodeExistsPolicy.SKIP);
       } catch (KeeperException e) {
         if (KeeperException.Code.NOAUTH == e.code()) {
-          log.error(
-              "Failed to write to ZooKeeper. Ensure that accumulo-site.xml, specifically instance.secret, is consistent.");
+          log.error("Failed to write to ZooKeeper. Ensure that"
+              + " accumulo-site.xml, specifically instance.secret, is consistent.");
         }
         throw e;
       }
@@ -2666,9 +2667,8 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
         // out here? AUTH_FAILURE?
         // If we get the error, do we just put it on a timer and retry the exists(String, Watcher)
         // call?
-        log.error(
-            "Failed to perform initial check for authentication tokens in ZooKeeper. Delegation token authentication will be unavailable.",
-            e);
+        log.error("Failed to perform initial check for authentication tokens in"
+            + " ZooKeeper. Delegation token authentication will be unavailable.", e);
       }
     }
 
@@ -3392,7 +3392,7 @@ public class TabletServer extends AccumuloServerContext implements Runnable {
 
   private static final String MAJC_READ_LIMITER_KEY = "tserv_majc_read";
   private static final String MAJC_WRITE_LIMITER_KEY = "tserv_majc_write";
-  private final SharedRateLimiterFactory.RateProvider rateProvider = new SharedRateLimiterFactory.RateProvider() {
+  private final RateProvider rateProvider = new RateProvider() {
     @Override
     public long getDesiredRate() {
       return getConfiguration().getMemoryInBytes(Property.TSERV_MAJC_THROUGHPUT);
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
index 6d3cbd3..ff8202e 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
@@ -176,12 +176,15 @@ public class TabletServerResourceManager {
       // Still check block cache sizes when using native maps.
       if (dCacheSize + iCacheSize + totalQueueSize > runtime.maxMemory()) {
         throw new IllegalArgumentException(String.format(
-            "Block cache sizes %,d and mutation queue size %,d is too large for this JVM configuration %,d",
+            "Block cache sizes %,d"
+                + " and mutation queue size %,d is too large for this JVM configuration %,d",
             dCacheSize + iCacheSize, totalQueueSize, runtime.maxMemory()));
       }
     } else if (maxMemory + dCacheSize + iCacheSize + totalQueueSize > runtime.maxMemory()) {
       throw new IllegalArgumentException(String.format(
-          "Maximum tablet server map memory %,d block cache sizes %,d and mutation queue size %,d is too large for this JVM configuration %,d",
+          "Maximum tablet server"
+              + " map memory %,d block cache sizes %,d and mutation queue size %,d is"
+              + " too large for this JVM configuration %,d",
           maxMemory, dCacheSize + iCacheSize, totalQueueSize, runtime.maxMemory()));
     }
     runtime.gc();
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
index ff70d78..6aa4964 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
@@ -402,8 +402,11 @@ public class DfsLogger implements Comparable<DfsLogger> {
 
             // The DefaultCryptoModule will want to read the parameters from the underlying file, so
             // we will put the file back to that spot.
-            org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule = org.apache.accumulo.core.security.crypto.CryptoModuleFactory
+            // @formatter:off
+            org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule =
+              org.apache.accumulo.core.security.crypto.CryptoModuleFactory
                 .getCryptoModule(DefaultCryptoModule.class.getName());
+            // @formatter:on
 
             CryptoModuleParameters params = CryptoModuleFactory
                 .createParamsObjectFromAccumuloConfiguration(conf);
@@ -428,8 +431,8 @@ public class DfsLogger implements Comparable<DfsLogger> {
 
       }
     } catch (EOFException e) {
-      log.warn(
-          "Got EOFException trying to read WAL header information, assuming the rest of the file has no data.");
+      log.warn("Got EOFException trying to read WAL header information,"
+          + " assuming the rest of the file has no data.");
       // A TabletServer might have died before the (complete) header was written
       throw new LogHeaderIncompleteException(e);
     }
@@ -475,8 +478,11 @@ public class DfsLogger implements Comparable<DfsLogger> {
       flush = logFile.getClass().getMethod("hflush");
 
       // Initialize the crypto operations.
-      org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule = org.apache.accumulo.core.security.crypto.CryptoModuleFactory
+      // @formatter:off
+      org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule =
+        org.apache.accumulo.core.security.crypto.CryptoModuleFactory
           .getCryptoModule(conf.getConfiguration().get(Property.CRYPTO_MODULE_CLASS));
+      // @formatter:on
 
       // Initialize the log file with a header and the crypto params used to set up this log file.
       logFile.write(LOG_FILE_HEADER_V3.getBytes(UTF_8));
@@ -600,8 +606,8 @@ public class DfsLogger implements Comparable<DfsLogger> {
     try {
       write(key, EMPTY);
     } catch (IllegalArgumentException e) {
-      log.error(
-          "Signature of sync method changed. Accumulo is likely incompatible with this version of Hadoop.");
+      log.error("Signature of sync method changed. Accumulo is likely"
+          + " incompatible with this version of Hadoop.");
       throw new RuntimeException(e);
     }
   }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/SortedLogRecovery.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/SortedLogRecovery.java
index e7c369d..a850d2f 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/SortedLogRecovery.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/SortedLogRecovery.java
@@ -125,8 +125,8 @@ public class SortedLogRecovery {
     }
 
     if (lastStartToFinish.compactionStatus == Status.LOOKING_FOR_FINISH)
-      throw new RuntimeException(
-          "COMPACTION_FINISH (without preceding COMPACTION_START) not followed by successful minor compaction");
+      throw new RuntimeException("COMPACTION_FINISH (without preceding"
+          + " COMPACTION_START) not followed by successful minor compaction");
 
     for (int i = 0; i < recoveryLogs.size(); i++) {
       Path logfile = recoveryLogs.get(i);
@@ -170,8 +170,8 @@ public class SortedLogRecovery {
 
     if (key.tserverSession.compareTo(lastStartToFinish.tserverSession) != 0) {
       if (lastStartToFinish.compactionStatus == Status.LOOKING_FOR_FINISH)
-        throw new RuntimeException(
-            "COMPACTION_FINISH (without preceding COMPACTION_START) is not followed by a successful minor compaction.");
+        throw new RuntimeException("COMPACTION_FINISH (without preceding"
+            + " COMPACTION_START) is not followed by a successful minor compaction.");
       lastStartToFinish.update(key.tserverSession);
     }
     KeyExtent alternative = extent;
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/metrics/TabletServerUpdateMetrics.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/metrics/TabletServerUpdateMetrics.java
index 284cf66..e0c5cb2 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/metrics/TabletServerUpdateMetrics.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/metrics/TabletServerUpdateMetrics.java
@@ -34,9 +34,8 @@ public class TabletServerUpdateMetrics extends AbstractMetricsImpl
     super();
     reset();
     try {
-      OBJECT_NAME = new ObjectName(
-          "accumulo.server.metrics:service=TServerInfo,name=TabletServerUpdateMetricsMBean,instance="
-              + Thread.currentThread().getName());
+      OBJECT_NAME = new ObjectName("accumulo.server.metrics:service=TServerInfo,"
+          + "name=TabletServerUpdateMetricsMBean,instance=" + Thread.currentThread().getName());
     } catch (Exception e) {
       log.error("Exception setting MBean object name", e);
     }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
index e98baae..c2505ff 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
@@ -280,9 +280,8 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
 
         if (null == peerTserverStr) {
           // Something went wrong, and we didn't get a valid tserver from the remote for some reason
-          log.warn(
-              "Did not receive tserver from master at {}, cannot proceed with replication. Will retry.",
-              target);
+          log.warn("Did not receive tserver from master at {}, cannot proceed"
+              + " with replication. Will retry.", target);
           continue;
         }
 
@@ -322,9 +321,8 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
         }
       }
 
-      log.info(
-          "No progress was made after {} attempts to replicate {}, returning so file can be re-queued",
-          numAttempts, p);
+      log.info("No progress was made after {} attempts to replicate {},"
+          + " returning so file can be re-queued", numAttempts, p);
 
       // We made no status, punt on it for now, and let it re-queue itself for work
       return status;
@@ -481,7 +479,8 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
             }
           } catch (TableNotFoundException e) {
             log.error(
-                "Tried to update status in replication table for {} as {}, but the table did not exist",
+                "Tried to update status in replication table for {} as"
+                    + " {}, but the table did not exist",
                 p, ProtobufUtil.toString(currentStatus), e);
             throw new RuntimeException("Replication table did not exist, will retry", e);
           } finally {
@@ -508,9 +507,8 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
         }
       }
     } catch (LogHeaderIncompleteException e) {
-      log.warn(
-          "Could not read header from {}, assuming that there is no data present in the WAL, therefore replication is complete",
-          p);
+      log.warn("Could not read header from {}, assuming that there is no data"
+          + " present in the WAL, therefore replication is complete", p);
       Status newStatus;
       // Bump up the begin to the (infinite) end, trying to be accurate
       if (status.getInfiniteEnd()) {
@@ -797,8 +795,8 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
           }
           break;
         default:
-          log.trace(
-              "Ignorning WAL entry which doesn't contain mutations, should not have received such entries");
+          log.trace("Ignorning WAL entry which doesn't contain mutations,"
+              + " should not have received such entries");
           break;
       }
     }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
index 7ce6bb9..5662c63 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
@@ -242,7 +242,8 @@ public class Compactor implements Callable<CompactionStats> {
       }
 
       log.debug(String.format(
-          "Compaction %s %,d read | %,d written | %,6d entries/sec | %,6.3f secs | %,12d bytes | %9.3f byte/sec",
+          "Compaction %s %,d read | %,d written | %,6d entries/sec"
+              + " | %,6.3f secs | %,12d bytes | %9.3f byte/sec",
           extent, majCStats.getEntriesRead(), majCStats.getEntriesWritten(),
           (int) (majCStats.getEntriesRead() / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0,
           mfwTmp.getLength(), mfwTmp.getLength() / ((t2 - t1) / 1000.0)));
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
index c189ec2..af822ef 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
@@ -501,9 +501,8 @@ class DatafileManager {
     long splitSize = tablet.getTableConfiguration()
         .getMemoryInBytes(Property.TABLE_SPLIT_THRESHOLD);
     if (dfv.getSize() > splitSize) {
-      log.debug(String.format(
-          "Minor Compaction wrote out file larger than split threshold.  split threshold = %,d  file size = %,d",
-          splitSize, dfv.getSize()));
+      log.debug(String.format("Minor Compaction wrote out file larger than split threshold."
+          + " split threshold = %,d  file size = %,d", splitSize, dfv.getSize()));
     }
   }
 
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
index 411a948..5018c97 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
@@ -632,9 +632,9 @@ public class Tablet implements TabletCommitter {
           }
           if (!results.isEmpty()
               && yieldPosition.compareTo(results.get(results.size() - 1).getKey()) <= 0) {
-            throw new IOException(
-                "Underlying iterator yielded to a position that does not follow the last key returned: "
-                    + yieldPosition + " <= " + results.get(results.size() - 1).getKey());
+            throw new IOException("Underlying iterator yielded to a position"
+                + " that does not follow the last key returned: " + yieldPosition + " <= "
+                + results.get(results.size() - 1).getKey());
           }
           addUnfinishedRange(lookupResult, range, yieldPosition, false);
 
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletCommitter.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletCommitter.java
index 934ce20..c90f1e1 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletCommitter.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletCommitter.java
@@ -25,8 +25,9 @@ import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.tserver.InMemoryMap;
 import org.apache.accumulo.tserver.log.DfsLogger;
 
-/*
- * A partial interface of Tablet to allow for testing of CommitSession without needing a real Tablet.
+/**
+ * A partial interface of Tablet to allow for testing of CommitSession without needing a real
+ * Tablet.
  */
 public interface TabletCommitter {
 
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/LargestFirstMemoryManagerTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/LargestFirstMemoryManagerTest.java
index 067ade2..aba2280 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/LargestFirstMemoryManagerTest.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/LargestFirstMemoryManagerTest.java
@@ -203,8 +203,10 @@ public class LargestFirstMemoryManagerTest {
         return !deletedTableId.equals(tableId);
       }
     };
-    LargestFirstMemoryManagerWithExistenceCheck mgr = new LargestFirstMemoryManagerWithExistenceCheck(
-        existenceCheck);
+    // @formatter:off
+    LargestFirstMemoryManagerWithExistenceCheck mgr =
+      new LargestFirstMemoryManagerWithExistenceCheck(existenceCheck);
+    // @formatter:on
     ServerConfiguration config = new ServerConfiguration() {
       ServerConfigurationFactory delegate = new ServerConfigurationFactory(inst);
 
diff --git a/shell/src/main/java/org/apache/accumulo/shell/Shell.java b/shell/src/main/java/org/apache/accumulo/shell/Shell.java
index 0cc7283..31cd5d4 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/Shell.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/Shell.java
@@ -350,9 +350,10 @@ public class Shell extends ShellOptions implements KeywordExecutable {
 
       if (hasToken) { // implied hasTokenOptions
         // Fully qualified name so we don't shadow java.util.Properties
-        org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties props;
-        // and line wrap it because the package name is so long
-        props = new org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties();
+        // @formatter:off
+        org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties props =
+          new org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties();
+        // @formatter:on
 
         if (!loginOptions.isEmpty()) {
           props.putAllStrings(loginOptions);
@@ -1180,8 +1181,9 @@ public class Shell extends ShellOptions implements KeywordExecutable {
 
   public void checkTableState() {
     if (getTableName().isEmpty())
-      throw new IllegalStateException(
-          "Not in a table context. Please use 'table <tableName>' to switch to a table, or use '-t' to specify a table if option is available.");
+      throw new IllegalStateException("Not in a table context. Please use"
+          + " 'table <tableName>' to switch to a table, or use '-t' to specify a"
+          + " table if option is available.");
   }
 
   private final void printConstraintViolationException(ConstraintViolationException cve) {
diff --git a/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java b/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
index 746bbce..2fab019 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
@@ -120,8 +120,9 @@ public class ShellOptionsJC {
 
   // Note: Don't use "password = true" because then it will prompt even if we have a token
   @Parameter(names = {"-p", "--password"},
-      description = "password (can be specified as 'pass:<password>', 'file:<local file containing the password>', "
-          + "'env:<variable containing the pass>', or stdin)",
+      description = "password (can be specified as 'pass:<password>',"
+          + " 'file:<local file containing the password>', 'env:<variable containing"
+          + " the pass>', or stdin)",
       converter = PasswordConverter.class)
   private String password;
 
@@ -178,8 +179,9 @@ public class ShellOptionsJC {
   private boolean hdfsZooInstance;
 
   @Parameter(names = {"-z", "--zooKeeperInstance"},
-      description = "use a zookeeper instance with the given instance name and list of zoo hosts. "
-          + "Syntax: -z <zoo-instance-name> <zoo-hosts>. Where <zoo-hosts> is a comma separated list of zookeeper servers.",
+      description = "use a zookeeper instance with the given instance name and"
+          + " list of zoo hosts. Syntax: -z <zoo-instance-name> <zoo-hosts>. Where"
+          + " <zoo-hosts> is a comma separated list of zookeeper servers.",
       arity = 2)
   private List<String> zooKeeperInstance = new ArrayList<>();
 
@@ -189,9 +191,11 @@ public class ShellOptionsJC {
   @Parameter(names = "--sasl", description = "use SASL to connect to Accumulo (Kerberos)")
   private boolean useSasl = false;
 
-  @Parameter(names = "--config-file", description = "read the given client config file. "
-      + "If omitted, the path searched can be specified with $ACCUMULO_CLIENT_CONF_PATH, "
-      + "which defaults to ~/.accumulo/config:$ACCUMULO_CONF_DIR/client.conf:/etc/accumulo/client.conf")
+  @Parameter(names = "--config-file",
+      description = "read the given client"
+          + " config file. If omitted, the path searched can be specified with"
+          + " $ACCUMULO_CLIENT_CONF_PATH, which defaults to"
+          + " ~/.accumulo/config:$ACCUMULO_CONF_DIR/client.conf:/etc/accumulo/client.conf")
   private String clientConfigFile = null;
 
   @Parameter(names = {"-zi", "--zooKeeperInstanceName"},
@@ -200,8 +204,8 @@ public class ShellOptionsJC {
   private String zooKeeperInstanceName;
 
   @Parameter(names = {"-zh", "--zooKeeperHosts"},
-      description = "use a zookeeper instance with the given comma separated list of zookeeper servers. "
-          + "This parameter is used in conjunction with -zi.")
+      description = "use a zookeeper instance with the given comma separated"
+          + " list of zookeeper servers. This parameter is used in conjunction with -zi.")
   private String zooKeeperHosts;
 
   @Parameter(names = "--auth-timeout",
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/ActiveScanIterator.java b/shell/src/main/java/org/apache/accumulo/shell/commands/ActiveScanIterator.java
index 233934c..80cf400 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/ActiveScanIterator.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/ActiveScanIterator.java
@@ -67,7 +67,8 @@ class ActiveScanIterator implements Iterator<String> {
     this.tsIter = tservers.iterator();
 
     final String header = String.format(
-        " %-21s| %-21s| %-9s| %-9s| %-7s| %-6s| %-8s| %-8s| %-10s| %-20s| %-10s| %-10s | %-20s | %s",
+        " %-21s| %-21s| %-9s| %-9s| %-7s| %-6s|"
+            + " %-8s| %-8s| %-10s| %-20s| %-10s| %-10s | %-20s | %s",
         "TABLET SERVER", "CLIENT", "AGE", "LAST", "STATE", "TYPE", "USER", "TABLE", "COLUMNS",
         "AUTHORIZATIONS", "TABLET", "SCAN ID", "ITERATORS", "ITERATOR OPTIONS");
 
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/CloneTableCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/CloneTableCommand.java
index 9fcd29f..46c0d66 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/CloneTableCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/CloneTableCommand.java
@@ -90,8 +90,8 @@ public class CloneTableCommand extends Command {
   @Override
   public Options getOptions() {
     final Options o = new Options();
-    setPropsOption = new Option("s", "set", true,
-        "set initial properties before the table comes online. Expects <prop>=<value>{,<prop>=<value>}");
+    setPropsOption = new Option("s", "set", true, "set initial properties"
+        + " before the table comes online. Expects <prop>=<value>{,<prop>=<value>}");
     o.addOption(setPropsOption);
     excludePropsOption = new Option("e", "exclude", true,
         "exclude properties that should not be copied from source table. Expects <prop>{,<prop>}");
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/CompactCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/CompactCommand.java
index 86c184e..da782fa 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/CompactCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/CompactCommand.java
@@ -47,9 +47,12 @@ public class CompactCommand extends TableOperation {
 
   @Override
   public String description() {
-    return "Initiates a major compaction on tablets within the specified range that have one or more files.  If no file selection options are specified, then "
-        + "all files will be compacted.  Options that configure output settings are only applied to this compaction and not later compactions.  If multiple "
-        + "concurrent user initiated compactions specify iterators or a compaction strategy, then all but one will fail to start.";
+    return "Initiates a major compaction on tablets within the specified range"
+        + " that have one or more files. If no file selection options are"
+        + " specified, then all files will be compacted. Options that configure"
+        + " output settings are only applied to this compaction and not later"
+        + " compactions. If multiple concurrent user initiated compactions specify"
+        + " iterators or a compaction strategy, then all but one will fail to" + " start.";
   }
 
   @Override
@@ -204,37 +207,48 @@ public class CompactCommand extends TableOperation {
     opts.addOption(cancelOpt);
 
     enoSampleOption = new Option(null, "sf-no-sample", false,
-        "Select files that have no sample data or sample data that differes from the table configuration.");
+        "Select files that have no sample data or sample data that differes"
+            + " from the table configuration.");
     opts.addOption(enoSampleOption);
     enameOption = newLAO("sf-ename",
-        "Select files using regular expression to match file names. Only matches against last part of path.");
+        "Select files using regular expression to match file names. Only"
+            + " matches against last part of path.");
     opts.addOption(enameOption);
     epathOption = newLAO("sf-epath",
-        "Select files using regular expression to match file paths to compact. Matches against full path.");
+        "Select files using regular expression to match file paths to compact."
+            + " Matches against full path.");
     opts.addOption(epathOption);
     sizeLtOption = newLAO("sf-lt-esize",
-        "Selects files less than specified size.  Uses the estimated size of file in metadata table.  Can use K,M, and G suffixes");
+        "Selects files less than specified size.  Uses the estimated size of"
+            + " file in metadata table. Can use K,M, and G suffixes");
     opts.addOption(sizeLtOption);
     sizeGtOption = newLAO("sf-gt-esize",
-        "Selects files greater than specified size.  Uses the estimated size of file in metadata table.  Can use K,M, and G suffixes");
+        "Selects files greater than specified size. Uses the estimated size of"
+            + " file in metadata table. Can use K,M, and G suffixes");
     opts.addOption(sizeGtOption);
     minFilesOption = newLAO("min-files",
-        "Only compacts if at least the specified number of files are selected.  When no file selection criteria are given, all files are selected.");
+        "Only compacts if at least the specified number of files are selected."
+            + " When no file selection criteria are given, all files are selected.");
     opts.addOption(minFilesOption);
     outBlockSizeOpt = newLAO("out-data-bs",
-        "Rfile data block size to use for compaction output file.  Can use K,M, and G suffixes. Uses table settings if not specified.");
+        "Rfile data block size to use for compaction output file. Can use K,M,"
+            + " and G suffixes. Uses table settings if not specified.");
     opts.addOption(outBlockSizeOpt);
     outHdfsBlockSizeOpt = newLAO("out-hdfs-bs",
-        "HDFS block size to use for compaction output file.  Can use K,M, and G suffixes. Uses table settings if not specified.");
+        "HDFS block size to use for compaction output file. Can use K,M, and G"
+            + " suffixes. Uses table settings if not specified.");
     opts.addOption(outHdfsBlockSizeOpt);
     outIndexBlockSizeOpt = newLAO("out-index-bs",
-        "Rfile index block size to use for compaction output file.  Can use K,M, and G suffixes. Uses table settings if not specified.");
+        "Rfile index block size to use for compaction output file. Can use"
+            + " K,M, and G suffixes. Uses table settings if not specified.");
     opts.addOption(outIndexBlockSizeOpt);
     outCompressionOpt = newLAO("out-compress",
-        "Compression to use for compaction output file. Either snappy, gz, lzo, or none. Uses table settings if not specified.");
+        "Compression to use for compaction output file. Either snappy, gz, lzo,"
+            + " or none. Uses table settings if not specified.");
     opts.addOption(outCompressionOpt);
     outReplication = newLAO("out-replication",
-        "HDFS replication to use for compaction output file. Uses table settings if not specified.");
+        "HDFS replication to use for compaction output file. Uses table"
+            + " settings if not specified.");
     opts.addOption(outReplication);
 
     return opts;
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/CreateTableCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/CreateTableCommand.java
index ae2818b..12f5682 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/CreateTableCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/CreateTableCommand.java
@@ -183,7 +183,8 @@ public class CreateTableCommand extends Command {
     createTableNoDefaultIters = new Option("ndi", "no-default-iterators", false,
         "prevent creation of the normal default iterator set");
     createTableOptEVC = new Option("evc", "enable-visibility-constraint", false,
-        "prevent users from writing data they cannot read.  When enabling this, consider disabling bulk import and alter table.");
+        "prevent users from writing data they cannot read. When enabling this,"
+            + " consider disabling bulk import and alter table.");
     createTableOptFormatter = new Option("f", "formatter", true, "default formatter to set");
     createTableOptInitProp = new Option("prop", "init-properties", true,
         "user defined initial properties");
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/DUCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/DUCommand.java
index 76e9060..fc64f9c 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/DUCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/DUCommand.java
@@ -94,8 +94,9 @@ public class DUCommand extends Command {
 
   @Override
   public String description() {
-    return "prints how much space, in bytes, is used by files referenced by a table.  "
-        + "When multiple tables are specified it prints how much space, in bytes, is used by files shared between tables, if any.";
+    return "prints how much space, in bytes, is used by files referenced by a"
+        + " table. When multiple tables are specified it prints how much space, in"
+        + " bytes, is used by files shared between tables, if any.";
   }
 
   @Override
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/DeleteCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/DeleteCommand.java
index b3d8a47..2ce9803 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/DeleteCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/DeleteCommand.java
@@ -100,7 +100,8 @@ public class DeleteCommand extends Command {
     o.addOption(timestampOpt);
 
     timeoutOption = new Option(null, "timeout", true,
-        "time before insert should fail if no data is written. If no unit is given assumes seconds.  Units d,h,m,s,and ms are supported.  e.g. 30s or 100ms");
+        "time before insert should fail if no data is written. If no unit is"
+            + " given assumes seconds. Units d,h,m,s,and ms are supported. e.g. 30s" + " or 100ms");
     timeoutOption.setArgName("timeout");
     o.addOption(timeoutOption);
 
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/DeleteRowsCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/DeleteRowsCommand.java
index c84db75..e9e1655 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/DeleteRowsCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/DeleteRowsCommand.java
@@ -43,7 +43,8 @@ public class DeleteRowsCommand extends Command {
 
   @Override
   public String description() {
-    return "deletes a range of rows in a table.  Note that rows matching the start row ARE NOT deleted, but rows matching the end row ARE deleted.";
+    return "deletes a range of rows in a table. Note that rows matching the"
+        + " start row ARE NOT deleted, but rows matching the end row ARE deleted.";
   }
 
   @Override
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/DeleteScanIterCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/DeleteScanIterCommand.java
index d391c6b..5414cad 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/DeleteScanIterCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/DeleteScanIterCommand.java
@@ -77,7 +77,8 @@ public class DeleteScanIterCommand extends Command {
 
   @Override
   public String description() {
-    return "deletes a table-specific scan iterator so it is no longer used during this shell session";
+    return "deletes a table-specific scan iterator so it is no longer used"
+        + " during this shell session";
   }
 
   @Override
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/EGrepCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/EGrepCommand.java
index 2a9b656..57638fc 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/EGrepCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/EGrepCommand.java
@@ -43,8 +43,9 @@ public class EGrepCommand extends GrepCommand {
 
   @Override
   public String description() {
-    return "searches each row, column family, column qualifier and value, in parallel, on the server side "
-        + "(using a java Matcher, so put .* before and after your term if you're not matching the whole element)";
+    return "searches each row, column family, column qualifier and value, in"
+        + " parallel, on the server side (using a java Matcher, so put .* before"
+        + " and after your term if you're not matching the whole element)";
   }
 
   @Override
@@ -56,7 +57,8 @@ public class EGrepCommand extends GrepCommand {
   public Options getOptions() {
     final Options opts = super.getOptions();
     matchSubstringOption = new Option("g", "global", false,
-        "forces the use of the find() expression matcher, causing substring matches to return true");
+        "forces the use of the find() expression matcher, causing substring"
+            + " matches to return true");
     opts.addOption(matchSubstringOption);
     return opts;
   }
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/FateCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/FateCommand.java
index a9cd37d..b8919dd 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/FateCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/FateCommand.java
@@ -252,7 +252,8 @@ public class FateCommand extends Command {
     secretOption.setOptionalArg(false);
     o.addOption(secretOption);
     statusOption = new Option("t", "status-type", true,
-        "filter 'print' on the transaction status type(s) {NEW, IN_PROGRESS, FAILED_IN_PROGRESS, FAILED, SUCCESSFUL}");
+        "filter 'print' on the transaction status type(s) {NEW, IN_PROGRESS,"
+            + " FAILED_IN_PROGRESS, FAILED, SUCCESSFUL}");
     statusOption.setArgs(Option.UNLIMITED_VALUES);
     statusOption.setOptionalArg(false);
     o.addOption(statusOption);
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/GrepCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/GrepCommand.java
index b1d1f6f..d2ccad1 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/GrepCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/GrepCommand.java
@@ -98,7 +98,8 @@ public class GrepCommand extends ScanCommand {
 
   @Override
   public String description() {
-    return "searches each row, column family, column qualifier and value in a table for a substring (not a regular expression), in parallel, on the server side";
+    return "searches each row, column family, column qualifier and value in a"
+        + " table for a substring (not a regular expression), in parallel, on the" + " server side";
   }
 
   @Override
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/HiddenCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/HiddenCommand.java
index e54b15f..5126113 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/HiddenCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/HiddenCommand.java
@@ -42,12 +42,14 @@ public class HiddenCommand extends Command {
     if (rand.nextInt(10) == 0) {
       shellState.getReader().beep();
       shellState.getReader().println();
-      shellState.getReader().println(new String(Base64.decodeBase64(
-          ("ICAgICAgIC4tLS4KICAgICAgLyAvXCBcCiAgICAgKCAvLS1cICkKICAgICAuPl8gIF88LgogICAgLyB8ICd8ICcgXAog"
-              + "ICAvICB8Xy58Xy4gIFwKICAvIC98ICAgICAgfFwgXAogfCB8IHwgfFwvfCB8IHwgfAogfF98IHwgfCAgfCB8IHxffAogICAgIC8gIF9fICBcCiAgICAvICAv"
-              + "ICBcICBcCiAgIC8gIC8gICAgXCAgXF8KIHwvICAvICAgICAgXCB8IHwKIHxfXy8gICAgICAgIFx8X3wK")
-                  .getBytes(UTF_8)),
-          UTF_8));
+      shellState.getReader()
+          .println(new String(Base64
+              .decodeBase64(("ICAgICAgIC4tLS4KICAgICAgLyAvXCBcCiAgICAgKCAvLS1cICkKICAgICAuPl8gIF88"
+                  + "LgogICAgLyB8ICd8ICcgXAogICAvICB8Xy58Xy4gIFwKICAvIC98ICAgICAgfFwgXAog"
+                  + "fCB8IHwgfFwvfCB8IHwgfAogfF98IHwgfCAgfCB8IHxffAogICAgIC8gIF9fICBcCiAg"
+                  + "ICAvICAvICBcICBcCiAgIC8gIC8gICAgXCAgXF8KIHwvICAvICAgICAgXCB8IHwKIHxf"
+                  + "Xy8gICAgICAgIFx8X3wK").getBytes(UTF_8)),
+              UTF_8));
     } else {
       throw new ShellCommandException(ErrorCode.UNRECOGNIZED_COMMAND, getName());
     }
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/ImportDirectoryCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/ImportDirectoryCommand.java
index 99e125b..ec1576b 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/ImportDirectoryCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/ImportDirectoryCommand.java
@@ -29,7 +29,8 @@ public class ImportDirectoryCommand extends Command {
 
   @Override
   public String description() {
-    return "bulk imports an entire directory of data files to the current table.  The boolean argument determines if accumulo sets the time.";
+    return "bulk imports an entire directory of data files to the current"
+        + " table. The boolean argument determines if accumulo sets the time.";
   }
 
   @Override
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/InsertCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/InsertCommand.java
index c58f7d0..21de8bc 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/InsertCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/InsertCommand.java
@@ -160,7 +160,8 @@ public class InsertCommand extends Command {
     o.addOption(timestampOpt);
 
     timeoutOption = new Option(null, "timeout", true,
-        "time before insert should fail if no data is written. If no unit is given assumes seconds.  Units d,h,m,s,and ms are supported.  e.g. 30s or 100ms");
+        "time before insert should fail if no data is written. If no unit is"
+            + " given assumes seconds. Units d,h,m,s,and ms are supported. e.g. 30s" + " or 100ms");
     timeoutOption.setArgName("timeout");
     o.addOption(timeoutOption);
 
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/ListCompactionsCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/ListCompactionsCommand.java
index 6229d68..3c264f6 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/ListCompactionsCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/ListCompactionsCommand.java
@@ -32,7 +32,9 @@ public class ListCompactionsCommand extends Command {
 
   @Override
   public String description() {
-    return "lists what compactions are currently running in accumulo. See the accumulo.core.client.admin.ActiveCompaciton javadoc for more information about columns.";
+    return "lists what compactions are currently running in accumulo. See the"
+        + " accumulo.core.client.admin.ActiveCompaciton javadoc for more information"
+        + " about columns.";
   }
 
   @Override
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/ListScansCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/ListScansCommand.java
index 36a0bdc..54e57d0 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/ListScansCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/ListScansCommand.java
@@ -32,7 +32,8 @@ public class ListScansCommand extends Command {
 
   @Override
   public String description() {
-    return "lists what scans are currently running in accumulo. See the accumulo.core.client.admin.ActiveScan javadoc for more information about columns.";
+    return "lists what scans are currently running in accumulo. See the"
+        + " accumulo.core.client.admin.ActiveScan javadoc for more information" + " about columns.";
   }
 
   @Override
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/MergeCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/MergeCommand.java
index 53a0920..ffa9fdf 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/MergeCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/MergeCommand.java
@@ -101,7 +101,8 @@ public class MergeCommand extends Command {
     forceOpt = new Option("f", "force", false,
         "merge small tablets to large tablets, even if it goes over the given size");
     allOpt = new Option("", "all", false,
-        "allow an entire table to be merged into one tablet without prompting the user for confirmation");
+        "allow an entire table to be merged into one tablet without prompting"
+            + " the user for confirmation");
     o.addOption(OptUtil.startRowOpt());
     o.addOption(OptUtil.endRowOpt());
     o.addOption(OptUtil.tableOpt("table to be merged"));
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/QuotedStringTokenizer.java b/shell/src/main/java/org/apache/accumulo/shell/commands/QuotedStringTokenizer.java
index 09bfbfc..117dea1 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/QuotedStringTokenizer.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/QuotedStringTokenizer.java
@@ -74,9 +74,8 @@ public class QuotedStringTokenizer implements Iterable<String> {
         } else if (ch == ' ' || ch == '\'' || ch == '"' || ch == '\\') {
           token[tokenLength++] = inputBytes[i];
         } else {
-          throw new BadArgumentException(
-              "can only escape single quotes, double quotes, the space character, the backslash, and hex input",
-              input, i);
+          throw new BadArgumentException("can only escape single quotes, double"
+              + " quotes, the space character, the backslash, and hex input", input, i);
         }
       } else if (hexChars != null) {
         // in a hex escape sequence
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/ScanCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/ScanCommand.java
index 75769e8..5298bbf 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/ScanCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/ScanCommand.java
@@ -342,7 +342,8 @@ public class ScanCommand extends Command {
     formatterInterpeterOpt = new Option("fi", "fmt-interpreter", true,
         "fully qualified name of a class that is a formatter and interpreter");
     timeoutOption = new Option(null, "timeout", true,
-        "time before scan should fail if no data is returned. If no unit is given assumes seconds.  Units d,h,m,s,and ms are supported.  e.g. 30s or 100ms");
+        "time before scan should fail if no data is returned. If no unit is"
+            + " given assumes seconds. Units d,h,m,s,and ms are supported. e.g. 30s" + " or 100ms");
     outputFileOpt = new Option("o", "output", true, "local file to write the scan output to");
     sampleOpt = new Option(null, "sample", false, "Show sample");
     contextOpt = new Option("cc", "context", true, "name of the classloader context");
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/SetIterCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/SetIterCommand.java
index d1603de..42a8418 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/SetIterCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/SetIterCommand.java
@@ -154,9 +154,11 @@ public class SetIterCommand extends Command {
     ScanCommand.ensureTserversCanLoadIterator(shellState, tableName, classname);
 
     final String aggregatorClass = options.get("aggregatorClass");
+    // @formatter:off
     @SuppressWarnings("deprecation")
-    String deprecatedAggregatorClassName = org.apache.accumulo.core.iterators.aggregation.Aggregator.class
-        .getName();
+    String deprecatedAggregatorClassName =
+      org.apache.accumulo.core.iterators.aggregation.Aggregator.class.getName();
+    // @formatter:on
     if (aggregatorClass != null && !shellState.getConnector().tableOperations()
         .testClassLoad(tableName, aggregatorClass, deprecatedAggregatorClassName)) {
       throw new ShellCommandException(ErrorCode.INITIALIZATION_FAILURE,
@@ -203,9 +205,11 @@ public class SetIterCommand extends Command {
     }
 
     final String aggregatorClass = options.get("aggregatorClass");
+    // @formatter:off
     @SuppressWarnings("deprecation")
-    String deprecatedAggregatorClassName = org.apache.accumulo.core.iterators.aggregation.Aggregator.class
-        .getName();
+    String deprecatedAggregatorClassName =
+      org.apache.accumulo.core.iterators.aggregation.Aggregator.class.getName();
+    // @formatter:on
     if (aggregatorClass != null && !shellState.getConnector().namespaceOperations()
         .testClassLoad(namespace, aggregatorClass, deprecatedAggregatorClassName)) {
       throw new ShellCommandException(ErrorCode.INITIALIZATION_FAILURE,
@@ -346,8 +350,8 @@ public class SetIterCommand extends Command {
       } while (!iterOptions.validateOptions(options));
     } else {
       reader.flush();
-      reader.println(
-          "The iterator class does not implement OptionDescriber. Consider this for better iterator configuration using this setiter command.");
+      reader.println("The iterator class does not implement OptionDescriber."
+          + " Consider this for better iterator configuration using this setiter" + " command.");
       iteratorName = reader.readLine("Name for iterator (enter to skip): ");
       if (null == iteratorName) {
         reader.println();
diff --git a/shell/src/test/java/org/apache/accumulo/shell/ShellTest.java b/shell/src/test/java/org/apache/accumulo/shell/ShellTest.java
index 33d208b..93cd707 100644
--- a/shell/src/test/java/org/apache/accumulo/shell/ShellTest.java
+++ b/shell/src/test/java/org/apache/accumulo/shell/ShellTest.java
@@ -396,7 +396,8 @@ public class ShellTest {
 
     input.set("\n\n");
     exec(
-        "setiter -scan -class org.apache.accumulo.core.iterators.ColumnFamilyCounter -p 30 -name foo",
+        "setiter -scan"
+            + " -class org.apache.accumulo.core.iterators.ColumnFamilyCounter -p 30 -name foo",
         true);
 
     input.set("bar\nname value\n");
diff --git a/shell/src/test/java/org/apache/accumulo/shell/commands/SetIterCommandTest.java b/shell/src/test/java/org/apache/accumulo/shell/commands/SetIterCommandTest.java
index ea79935..91324c2 100644
--- a/shell/src/test/java/org/apache/accumulo/shell/commands/SetIterCommandTest.java
+++ b/shell/src/test/java/org/apache/accumulo/shell/commands/SetIterCommandTest.java
@@ -121,7 +121,8 @@ public class SetIterCommandTest {
     EasyMock.replay(conn, cli, shellState, reader, tableOperations);
 
     cmd.execute(
-        "setiter -all -p 21 -t foo -class org.apache.accumulo.core.iterators.user.ColumnAgeOffFilter",
+        "setiter -all -p 21 -t foo"
+            + " -class org.apache.accumulo.core.iterators.user.ColumnAgeOffFilter",
         cli, shellState);
 
     EasyMock.verify(conn, cli, shellState, reader, tableOperations);
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
index 30c4be0..63a7f30 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
@@ -52,17 +52,18 @@ import org.slf4j.LoggerFactory;
  * SystemClassLoader that loads JVM classes
  *       ^
  *       |
- * AccumuloClassLoader loads jars from locations in general.classpaths. Usually the URLs for HADOOP_HOME, ZOOKEEPER_HOME, ACCUMULO_HOME and their associated directories
+ * AccumuloClassLoader loads jars from locations in general.classpaths.
+ * Usually the URLs for HADOOP_HOME, ZOOKEEPER_HOME, ACCUMULO_HOME and their associated directories
  *       ^
  *       |
- * VFSClassLoader that loads jars from locations in general.vfs.classpaths.  Can be used to load accumulo jar from HDFS
+ * VFSClassLoader that loads jars from locations in general.vfs.classpaths.
+ * Can be used to load accumulo jar from HDFS
  *       ^
  *       |
- * AccumuloReloadingVFSClassLoader That loads jars from locations in general.dynamic.classpaths.  Used to load jar dynamically.
- *
+ * AccumuloReloadingVFSClassLoader That loads jars from locations in general.dynamic.classpaths.
+ * Used to load jar dynamically.
  * </pre>
  *
- *
  */
 public class AccumuloVFSClassLoader {
 
@@ -352,12 +353,12 @@ public class AccumuloVFSClassLoader {
                 + ": Accumulo Classloader (loads everything defined by general.classpaths)";
             break;
           case 4:
-            classLoaderDescription = level
-                + ": Accumulo Dynamic Classloader (loads everything defined by general.dynamic.classpaths)";
+            classLoaderDescription = level + ": Accumulo Dynamic Classloader "
+                + "(loads everything defined by general.dynamic.classpaths)";
             break;
           default:
-            classLoaderDescription = level
-                + ": Mystery Classloader (someone probably added a classloader and didn't update the switch statement in "
+            classLoaderDescription = level + ": Mystery Classloader ("
+                + "someone probably added a classloader and didn't update the switch statement in "
                 + AccumuloVFSClassLoader.class.getName() + ")";
             break;
         }
diff --git a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
index f7f00a8..a5a2b5a 100644
--- a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
+++ b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
@@ -134,7 +134,10 @@ public abstract class AccumuloClusterHarness extends AccumuloITBase
         }
         break;
       case STANDALONE:
-        StandaloneAccumuloClusterConfiguration conf = (StandaloneAccumuloClusterConfiguration) clusterConf;
+        // @formatter:off
+        StandaloneAccumuloClusterConfiguration conf =
+          (StandaloneAccumuloClusterConfiguration) clusterConf;
+        // @formatter:on
         ClientConfiguration clientConf = conf.getClientConf();
         StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(
             conf.getInstance(), clientConf, conf.getTmpDirectory(), conf.getUsers(),
diff --git a/test/src/main/java/org/apache/accumulo/harness/MiniClusterHarness.java b/test/src/main/java/org/apache/accumulo/harness/MiniClusterHarness.java
index f44adcf..7ddf114 100644
--- a/test/src/main/java/org/apache/accumulo/harness/MiniClusterHarness.java
+++ b/test/src/main/java/org/apache/accumulo/harness/MiniClusterHarness.java
@@ -54,10 +54,11 @@ public class MiniClusterHarness {
 
   private static final AtomicLong COUNTER = new AtomicLong(0);
 
-  public static final String USE_SSL_FOR_IT_OPTION = "org.apache.accumulo.test.functional.useSslForIT",
-      USE_CRED_PROVIDER_FOR_IT_OPTION = "org.apache.accumulo.test.functional.useCredProviderForIT",
-      USE_KERBEROS_FOR_IT_OPTION = "org.apache.accumulo.test.functional.useKrbForIT",
-      TRUE = Boolean.toString(true);
+  private static final String PROP_PREFIX = "org.apache.accumulo.test.functional.";
+  public static final String USE_SSL_FOR_IT_OPTION = PROP_PREFIX + "useSslForIT";
+  public static final String USE_CRED_PROVIDER_FOR_IT_OPTION = PROP_PREFIX + "useCredProviderForIT";
+  public static final String USE_KERBEROS_FOR_IT_OPTION = PROP_PREFIX + "useKrbForIT";
+  public static final String TRUE = Boolean.toString(true);
 
   // TODO These are defined in MiniKdc >= 2.6.0. Can be removed when minimum Hadoop dependency is
   // increased to that.
diff --git a/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java b/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
index f2821ef..233d7e7 100644
--- a/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
@@ -1325,9 +1325,8 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
       // Conditional-update to a table we only have read on should fail
       try {
         Status status = cw1.write(cm1).getStatus();
-        Assert.fail(
-            "Expected exception writing conditional mutation to table the user doesn't have write access to, Got status: "
-                + status);
+        Assert.fail("Expected exception writing conditional mutation to table"
+            + " the user doesn't have write access to, Got status: " + status);
       } catch (AccumuloSecurityException ase) {
 
       }
@@ -1335,9 +1334,8 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
       // Conditional-update to a table we only have writer on should fail
       try {
         Status status = cw2.write(cm1).getStatus();
-        Assert.fail(
-            "Expected exception writing conditional mutation to table the user doesn't have read access to. Got status: "
-                + status);
+        Assert.fail("Expected exception writing conditional mutation to table"
+            + " the user doesn't have read access to. Got status: " + status);
       } catch (AccumuloSecurityException ase) {
 
       }
@@ -1561,8 +1559,9 @@ public class ConditionalWriterIT extends AccumuloClusterHarness {
       log.info("Trace output:" + traceOutput);
       if (traceCount > 0) {
         int lastPos = 0;
-        for (String part : "traceTest, startScan,startConditionalUpdate,conditionalUpdate,Check conditions,apply conditional mutations"
-            .split(",")) {
+        String[] parts = ("traceTest, startScan,startConditionalUpdate,conditionalUpdate"
+            + ",Check conditions,apply conditional mutations").split(",");
+        for (String part : parts) {
           log.info("Looking in trace output for '" + part + "'");
           int pos = traceOutput.indexOf(part);
           if (-1 == pos) {
diff --git a/test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java b/test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java
index f674779..82b8ae9 100644
--- a/test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java
@@ -95,10 +95,8 @@ public class InMemoryMapIT {
     if (!NativeMap.isLoaded()) {
       fail("Missing the native library from " + nativeMapLocation.getAbsolutePath()
           + "\nYou need to build the libaccumulo binary first. "
-          + "\nTry running 'mvn clean install -Dit.test=InMemoryMapIT -Dtest=foo -DfailIfNoTests=false -Dfindbugs.skip -Dcheckstyle.skip'");
-      // afterwards, you can run the following
-      // mvn clean verify -Dit.test=InMemoryMapIT -Dtest=foo -DfailIfNoTests=false -Dfindbugs.skip
-      // -Dcheckstyle.skip -pl :accumulo-test
+          + "\nTry running 'mvn clean verify -Dit.test=InMemoryMapIT -Dtest=foo"
+          + " -DfailIfNoTests=false -Dfindbugs.skip -Dcheckstyle.skip'");
     }
     log.debug("Native map loaded");
 
diff --git a/test/src/main/java/org/apache/accumulo/test/MetaSplitIT.java b/test/src/main/java/org/apache/accumulo/test/MetaSplitIT.java
index 2f31add..3a247ed 100644
--- a/test/src/main/java/org/apache/accumulo/test/MetaSplitIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/MetaSplitIT.java
@@ -56,8 +56,8 @@ public class MetaSplitIT extends AccumuloClusterHarness {
       Collection<Text> splits = conn.tableOperations().listSplits(MetadataTable.NAME);
       // We expect a single split
       if (!splits.equals(Arrays.asList(new Text("~")))) {
-        log.info(
-            "Existing splits on metadata table. Saving them, and applying single original split of '~'");
+        log.info("Existing splits on metadata table. Saving them, and applying"
+            + " single original split of '~'");
         metadataSplits = splits;
         conn.tableOperations().merge(MetadataTable.NAME, null, null);
         conn.tableOperations().addSplits(MetadataTable.NAME,
diff --git a/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java b/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
index 6f9d084..a5ab431 100644
--- a/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
@@ -86,8 +86,9 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-// Testing default namespace configuration with inheritance requires altering the system state and restoring it back to normal
-// Punt on this for now and just let it use a minicluster.
+// Testing default namespace configuration with inheritance requires altering
+// the system state and restoring it back to normal. Punt on this for now and
+// just let it use a minicluster.
 @Category(MiniClusterOnlyTests.class)
 public class NamespacesIT extends AccumuloClusterHarness {
 
diff --git a/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java b/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java
index 1b666d8..e1b8757 100644
--- a/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/NativeMapPerformanceTest.java
@@ -177,7 +177,8 @@ public class NativeMapPerformanceTest {
     sleepUninterruptibly(3, TimeUnit.SECONDS);
 
     System.out.printf(
-        "mapType:%10s   put rate:%,6.2f  scan rate:%,6.2f  get rate:%,6.2f  delete time : %6.2f  mem : %,d%n",
+        "mapType:%10s   put rate:%,6.2f  scan rate:%,6.2f  get"
+            + " rate:%,6.2f  delete time : %6.2f  mem : %,d%n",
         "" + mapType, (numRows * numCols) / ((tpe - tps) / 1000.0), (size) / ((tie - tis) / 1000.0),
         numLookups / ((tge - tgs) / 1000.0), (tde - tds) / 1000.0, memUsed);
 
diff --git a/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java b/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java
index 9aef95d..f6c1a77 100644
--- a/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java
@@ -77,7 +77,10 @@ public class ShellConfigIT extends AccumuloClusterHarness {
         clientConfFile = mac.getConfig().getClientConfFile();
         break;
       case STANDALONE:
-        StandaloneAccumuloClusterConfiguration standaloneConf = (StandaloneAccumuloClusterConfiguration) getClusterConfiguration();
+        // @formatter:off
+        StandaloneAccumuloClusterConfiguration standaloneConf =
+          (StandaloneAccumuloClusterConfiguration) getClusterConfiguration();
+        // @formatter:on
         clientConfFile = standaloneConf.getClientConfFile();
         break;
       default:
diff --git a/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
index e3f92b7..591e23d 100644
--- a/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
@@ -801,9 +801,8 @@ public class ShellServerIT extends SharedMiniClusterBase {
   @Test
   public void classpath() throws Exception {
     // classpath
-    ts.exec("classpath", true,
-        "Level 2: Java Classloader (loads everything defined by java classpath) URL classpath items are",
-        true);
+    ts.exec("classpath", true, "Level 2: Java Classloader (loads everything"
+        + " defined by java classpath) URL classpath items are", true);
   }
 
   @Test
@@ -1010,9 +1009,9 @@ public class ShellServerIT extends SharedMiniClusterBase {
     assertEquals(3, countFiles(cloneId));
 
     String clone2 = table + "_clone_2";
-    ts.exec(
-        "clonetable -s table.sampler.opt.hasher=murmur3_32,table.sampler.opt.modulus=7,table.sampler="
-            + RowSampler.class.getName() + " " + clone + " " + clone2);
+    ts.exec("clonetable -s"
+        + " table.sampler.opt.hasher=murmur3_32,table.sampler.opt.modulus=7,table.sampler="
+        + RowSampler.class.getName() + " " + clone + " " + clone2);
     String clone2Id = getTableId(clone2);
 
     assertEquals(3, countFiles(clone2Id));
@@ -1060,9 +1059,9 @@ public class ShellServerIT extends SharedMiniClusterBase {
     ts.exec("insert 3900 doc uril file://final_project.txt");
 
     String clone1 = table + "_clone_1";
-    ts.exec(
-        "clonetable -s table.sampler.opt.hasher=murmur3_32,table.sampler.opt.modulus=3,table.sampler="
-            + RowSampler.class.getName() + " " + table + " " + clone1);
+    ts.exec("clonetable -s"
+        + " table.sampler.opt.hasher=murmur3_32,table.sampler.opt.modulus=3,table.sampler="
+        + RowSampler.class.getName() + " " + table + " " + clone1);
 
     ts.exec("compact -t " + clone1 + " -w --sf-no-sample");
 
@@ -1074,9 +1073,9 @@ public class ShellServerIT extends SharedMiniClusterBase {
 
     // create table where table sample config differs from whats in file
     String clone2 = table + "_clone_2";
-    ts.exec(
-        "clonetable -s table.sampler.opt.hasher=murmur3_32,table.sampler.opt.modulus=2,table.sampler="
-            + RowSampler.class.getName() + " " + clone1 + " " + clone2);
+    ts.exec("clonetable -s"
+        + " table.sampler.opt.hasher=murmur3_32,table.sampler.opt.modulus=2,table.sampler="
+        + RowSampler.class.getName() + " " + clone1 + " " + clone2);
 
     ts.exec("table " + clone2);
     ts.exec("scan --sample", false, "SampleNotPresentException", true);
@@ -1342,11 +1341,12 @@ public class ShellServerIT extends SharedMiniClusterBase {
         + "deleteiter deletescaniter listiter setiter setscaniter "
         + "grant revoke systempermissions tablepermissions userpermissions " + "execfile history "
         + "authenticate cls clear notable sleep table user whoami "
-        + "clonetable config createtable deletetable droptable du exporttable importtable offline online renametable tables "
+        + "clonetable config createtable deletetable droptable du exporttable "
+        + "importtable offline online renametable tables "
         + "addsplits compact constraint flush getgropus getsplits merge setgroups "
         + "addauths createuser deleteuser dropuser getauths passwd setauths users "
-        + "delete deletemany deleterows egrep formatter interpreter grep importdirectory insert maxrow scan")
-            .split(" ")) {
+        + "delete deletemany deleterows egrep formatter interpreter grep "
+        + "importdirectory insert maxrow scan").split(" ")) {
       ts.exec("help " + c, true);
     }
   }
@@ -1737,9 +1737,8 @@ public class ShellServerIT extends SharedMiniClusterBase {
     ts.exec("tables", true, "thing2.thingy", false);
 
     // put constraints on a namespace
-    ts.exec(
-        "constraint -ns thing3 -a org.apache.accumulo.examples.simple.constraints.NumericValueConstraint",
-        true);
+    ts.exec("constraint -ns thing3 -a"
+        + " org.apache.accumulo.examples.simple.constraints.NumericValueConstraint", true);
     ts.exec("createtable thing3.constrained", true);
     ts.exec("table thing3.constrained", true);
     ts.exec("constraint -d 1");
@@ -1909,8 +1908,10 @@ public class ShellServerIT extends SharedMiniClusterBase {
   private static final String REAL_CONTEXT = "REAL";
   private static final String REAL_CONTEXT_CLASSPATH = "file://" + System.getProperty("user.dir")
       + "/target/" + ShellServerIT.class.getSimpleName() + "-real-iterators.jar";
-  private static final String VALUE_REVERSING_ITERATOR = "org.apache.accumulo.test.functional.ValueReversingIterator";
-  private static final String SUMMING_COMBINER_ITERATOR = "org.apache.accumulo.core.iterators.user.SummingCombiner";
+  private static final String VALUE_REVERSING_ITERATOR = "org.apache.accumulo.test."
+      + "functional.ValueReversingIterator";
+  private static final String SUMMING_COMBINER_ITERATOR = "org.apache.accumulo.core."
+      + "iterators.user.SummingCombiner";
   private static final String COLUMN_FAMILY_COUNTER_ITERATOR = "org.apache.accumulo.core.iterators"
       + ".ColumnFamilyCounter";
 
diff --git a/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java b/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java
index 9b27895..19eb91f 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java
@@ -76,7 +76,8 @@ public class TestBinaryRows {
 
   public static class Opts extends ClientOnRequiredTable {
     @Parameter(names = "--mode",
-        description = "either 'ingest', 'delete', 'randomLookups', 'split', 'verify', 'verifyDeleted'",
+        description = "either 'ingest', 'delete', 'randomLookups', 'split',"
+            + " 'verify', 'verifyDeleted'",
         required = true)
     public String mode;
     @Parameter(names = "--start", description = "the lowest numbered row")
diff --git a/test/src/main/java/org/apache/accumulo/test/TestIngest.java b/test/src/main/java/org/apache/accumulo/test/TestIngest.java
index 22a01e4..5d3cc57 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestIngest.java
@@ -78,8 +78,8 @@ public class TestIngest {
     @Parameter(names = "--cols", description = "the number of columns to ingest per row")
     public int cols = 1;
 
-    @Parameter(names = "--random",
-        description = "insert random rows and use the given number to seed the psuedo-random number generator")
+    @Parameter(names = "--random", description = "insert random rows and use"
+        + " the given number to seed the psuedo-random number generator")
     public Integer random = null;
 
     @Parameter(names = "--size", description = "the size of the value to ingest")
@@ -346,7 +346,8 @@ public class TestIngest {
     double elapsed = (stopTime - startTime) / 1000.0;
 
     System.out.printf(
-        "%,12d records written | %,8d records/sec | %,12d bytes written | %,8d bytes/sec | %6.3f secs   %n",
+        "%,12d records written | %,8d records/sec | %,12d bytes written"
+            + " | %,8d bytes/sec | %6.3f secs   %n",
         totalValues, (int) (totalValues / elapsed), bytesWritten, (int) (bytesWritten / elapsed),
         elapsed);
   }
diff --git a/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java b/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java
index b2b5f13..4de6ccd 100644
--- a/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java
@@ -239,7 +239,8 @@ public class VerifyIngest {
           + (expectedRow - opts.startRow) + " expected " + opts.rows);
     } else {
       System.out.printf(
-          "%,12d records read | %,8d records/sec | %,12d bytes read | %,8d bytes/sec | %6.3f secs   %n",
+          "%,12d records read | %,8d records/sec | %,12d bytes read |"
+              + " %,8d bytes/sec | %6.3f secs   %n",
           recsRead, (int) ((recsRead) / ((t2 - t1) / 1000.0)), bytesRead,
           (int) (bytesRead / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0);
     }
diff --git a/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java b/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java
index ed1014a..02e4054 100644
--- a/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java
+++ b/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java
@@ -71,10 +71,12 @@ public class ContinuousStatsCollector {
       this.opts = opts;
       this.scanBatchSize = scanBatchSize;
       this.tableId = Tables.getNameToIdMap(opts.getInstance()).get(opts.getTableName());
-      System.out.println(
-          "TIME TABLET_SERVERS TOTAL_ENTRIES TOTAL_INGEST TOTAL_QUERY TABLE_RECS TABLE_RECS_IN_MEM TABLE_INGEST TABLE_QUERY TABLE_TABLETS TABLE_TABLETS_ONLINE"
-              + " ACCUMULO_DU ACCUMULO_DIRS ACCUMULO_FILES TABLE_DU TABLE_DIRS TABLE_FILES"
-              + " MAP_TASK MAX_MAP_TASK REDUCE_TASK MAX_REDUCE_TASK TASK_TRACKERS BLACK_LISTED MIN_FILES/TABLET MAX_FILES/TABLET AVG_FILES/TABLET STDDEV_FILES/TABLET");
+      System.out.println("TIME TABLET_SERVERS TOTAL_ENTRIES TOTAL_INGEST"
+          + " TOTAL_QUERY TABLE_RECS TABLE_RECS_IN_MEM TABLE_INGEST TABLE_QUERY"
+          + " TABLE_TABLETS TABLE_TABLETS_ONLINE ACCUMULO_DU ACCUMULO_DIRS"
+          + " ACCUMULO_FILES TABLE_DU TABLE_DIRS TABLE_FILES MAP_TASK MAX_MAP_TASK"
+          + " REDUCE_TASK MAX_REDUCE_TASK TASK_TRACKERS BLACK_LISTED"
+          + " MIN_FILES/TABLET MAX_FILES/TABLET AVG_FILES/TABLET" + " STDDEV_FILES/TABLET");
     }
 
     @Override
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java b/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
index faaf874..406fb2d 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
@@ -304,8 +304,8 @@ public class GarbageCollectorIT extends ConfigurableMacBase {
     for (int i = 0; i < 100000; ++i) {
       final Text emptyText = new Text("");
       Text row = new Text(String.format("%s/%020d/%s", MetadataSchema.DeletesSection.getRowPrefix(),
-          i,
-          "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj"));
+          i, "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee"
+              + "ffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj"));
       Mutation delFlag = new Mutation(row);
       delFlag.put(emptyText, emptyText, new Value(new byte[] {}));
       bw.addMutation(delFlag);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java
index ab09d0d..2876ef2 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java
@@ -61,8 +61,10 @@ import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-// This test verifies the default permissions so a clean instance must be used. A shared instance might
-// not be representative of a fresh installation.
+/**
+ * This test verifies the default permissions so a clean instance must be used. A shared instance
+ * might not be representative of a fresh installation.
+ */
 @Category(MiniClusterOnlyTests.class)
 public class PermissionsIT extends AccumuloClusterHarness {
   private static final Logger log = LoggerFactory.getLogger(PermissionsIT.class);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java b/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
index 61b0eb1..1533392 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
@@ -67,8 +67,8 @@ public class RecoveryWithEmptyRFileIT extends ConfigurableMacBase {
 
   @Test
   public void replaceMissingRFile() throws Exception {
-    log.info(
-        "Ingest some data, verify it was stored properly, replace an underlying rfile with an empty one and verify we can scan.");
+    log.info("Ingest some data, verify it was stored properly, replace an"
+        + " underlying rfile with an empty one and verify we can scan.");
     Connector connector = getConnector();
     String tableName = getUniqueNames(1)[0];
     ReadWriteIT.ingest(connector, cluster.getClientConfig(), "root", ROWS, COLS, 50, 0, tableName);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SessionBlockVerifyIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SessionBlockVerifyIT.java
index 5c0d159..614c895 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/SessionBlockVerifyIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SessionBlockVerifyIT.java
@@ -170,9 +170,8 @@ public class SessionBlockVerifyIT extends ScanSessionTimeOutIT {
      * will orphan the sessionsToCleanup in the sweep, leading to an inaccurate count within
      * sessionsFound.
      */
-    assertEquals(
-        "Must have ten sessions. Failure indicates a synchronization block within the sweep mechanism",
-        10, sessionsFound);
+    assertEquals("Must have ten sessions. Failure indicates a synchronization"
+        + " block within the sweep mechanism", 10, sessionsFound);
     for (Future<Boolean> callable : callables) {
       callable.cancel(true);
     }
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java b/test/src/main/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java
index 5a25605..90dcb6e 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java
@@ -66,9 +66,8 @@ public class WatchTheWatchCountIT extends ConfigurableMacBase {
         if (total > MIN && total < MAX) {
           break;
         }
-        log.debug(
-            "Expected number of watchers to be contained in ({}, {}), but actually was {}. Sleeping and retrying",
-            MIN, MAX, total);
+        log.debug("Expected number of watchers to be contained in ({}, {}), but"
+            + " actually was {}. Sleeping and retrying", MIN, MAX, total);
         Thread.sleep(5000);
       } finally {
         socket.close();
diff --git a/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloInputFormatIT.java b/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloInputFormatIT.java
index 9a35a95..ba3fc02 100644
--- a/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloInputFormatIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/mapreduce/AccumuloInputFormatIT.java
@@ -514,7 +514,10 @@ public class AccumuloInputFormatIT extends AccumuloClusterHarness {
       List<InputSplit> splits = super.getSplits(context);
 
       for (InputSplit split : splits) {
-        org.apache.accumulo.core.client.mapreduce.RangeInputSplit rangeSplit = (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split;
+        // @formatter:off
+        org.apache.accumulo.core.client.mapreduce.RangeInputSplit rangeSplit =
+          (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split;
+        // @formatter:on
         rangeSplit.setToken(new PasswordToken("anythingelse"));
       }
 
@@ -534,8 +537,11 @@ public class AccumuloInputFormatIT extends AccumuloClusterHarness {
 
       // Copy only the necessary information
       for (InputSplit oldSplit : oldSplits) {
-        org.apache.accumulo.core.client.mapreduce.RangeInputSplit newSplit = new org.apache.accumulo.core.client.mapreduce.RangeInputSplit(
+        // @formatter:off
+        org.apache.accumulo.core.client.mapreduce.RangeInputSplit newSplit =
+          new org.apache.accumulo.core.client.mapreduce.RangeInputSplit(
             (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) oldSplit);
+        // @formatter:on
         newSplits.add(newSplit);
       }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java b/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
index df8349f..2df5abc 100644
--- a/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
@@ -2388,8 +2388,8 @@ public abstract class SimpleProxyBase extends SharedMiniClusterBase {
       assertEquals(1, results.size());
       status = results.get(s2bb("00347"));
       if (ConditionalStatus.VIOLATED != status) {
-        log.info(
-            "ConditionalUpdate was not rejected by server due to table constraint. Sleeping and retrying");
+        log.info("ConditionalUpdate was not rejected by server due to table"
+            + " constraint. Sleeping and retrying");
         Thread.sleep(5000);
         continue;
       }
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyReadWrite.java b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyReadWrite.java
index baba906..3867ba4 100644
--- a/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyReadWrite.java
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyReadWrite.java
@@ -239,8 +239,10 @@ public class TestProxyReadWrite {
 
     String regex = ".*[02468]";
 
-    org.apache.accumulo.core.client.IteratorSetting is = new org.apache.accumulo.core.client.IteratorSetting(
-        50, regex, RegExFilter.class);
+    // @formatter:off
+    org.apache.accumulo.core.client.IteratorSetting is =
+      new org.apache.accumulo.core.client.IteratorSetting(50, regex, RegExFilter.class);
+    // @formatter:on
     RegExFilter.setRegexs(is, regex, null, null, null, false);
 
     IteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
@@ -318,8 +320,10 @@ public class TestProxyReadWrite {
 
     String regex = ".*[02468]";
 
-    org.apache.accumulo.core.client.IteratorSetting is = new org.apache.accumulo.core.client.IteratorSetting(
-        50, regex, RegExFilter.class);
+    // @formatter:off
+    org.apache.accumulo.core.client.IteratorSetting is =
+      new org.apache.accumulo.core.client.IteratorSetting(50, regex, RegExFilter.class);
+    // @formatter:on
     RegExFilter.setRegexs(is, regex, null, null, null, false);
 
     IteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
@@ -403,8 +407,10 @@ public class TestProxyReadWrite {
 
     String regex = ".*[02468]";
 
-    org.apache.accumulo.core.client.IteratorSetting is = new org.apache.accumulo.core.client.IteratorSetting(
-        50, regex, RegExFilter.class);
+    // @formatter:off
+    org.apache.accumulo.core.client.IteratorSetting is =
+      new org.apache.accumulo.core.client.IteratorSetting(50, regex, RegExFilter.class);
+    // @formatter:on
     RegExFilter.setRegexs(is, regex, null, null, null, false);
 
     IteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/Node.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/Node.java
index a6b3795..9ad0bb3 100644
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/Node.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/Node.java
@@ -91,8 +91,9 @@ public abstract class Node {
     }
 
     File libdir = new File(acuHome + "/lib");
-    for (String jar : "accumulo-core accumulo-server-base accumulo-fate accumulo-trace commons-math3 libthrift htrace-core"
-        .split(" ")) {
+    String[] jars = ("accumulo-core accumulo-server-base accumulo-fate accumulo-trace"
+        + " commons-math3 libthrift htrace-core").split(" ");
+    for (String jar : jars) {
       retval += String.format(",%s/%s.jar", libdir.getAbsolutePath(), jar);
     }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/multitable/CopyTool.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/multitable/CopyTool.java
index ba4f2d3..3ad1246 100644
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/multitable/CopyTool.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/multitable/CopyTool.java
@@ -93,10 +93,12 @@ public class CopyTool extends Configured implements Tool {
         // Do the explicit check to see if the user has the permission to get a delegation token
         if (!conn.securityOperations().hasSystemPermission(conn.whoami(),
             SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
-          log.error(principal + " doesn't have the "
-              + SystemPermission.OBTAIN_DELEGATION_TOKEN.name()
-              + " SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's"
-              + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.");
+          log.error(
+              principal + " doesn't have the " + SystemPermission.OBTAIN_DELEGATION_TOKEN.name()
+                  + " SystemPermission neccesary to obtain a delegation token."
+                  + " MapReduce tasks cannot automatically use the client's credentials"
+                  + " on remote servers. Delegation tokens provide a means to run"
+                  + " MapReduce without distributing the user's credentials.");
           throw new IllegalStateException(
               conn.whoami() + " does not have permission to obtain a delegation token");
         }
diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/security/Validate.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/security/Validate.java
index 53375cc..c896f0d 100644
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/security/Validate.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/security/Validate.java
@@ -121,13 +121,15 @@ public class Validate extends Test {
       accuAuths = conn.securityOperations()
           .getUserAuthorizations(WalkingSecurity.get(state, env).getTabUserName());
     } catch (ThriftSecurityException ae) {
-      if (ae
-          .getCode() == org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.USER_DOESNT_EXIST) {
+      // @formatter:off
+      if (ae.getCode()
+          == org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.USER_DOESNT_EXIST) {
         if (tableUserExists)
           throw new AccumuloException("Table user didn't exist when they should.", ae);
         else
           return;
       }
+      // @formatter:on
       throw new AccumuloException("Unexpected exception!", ae);
     }
     if (!auths.equals(accuAuths))
diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/sequential/MapRedVerifyTool.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/sequential/MapRedVerifyTool.java
index 83ddfb9..6717fa8 100644
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/sequential/MapRedVerifyTool.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/sequential/MapRedVerifyTool.java
@@ -121,10 +121,12 @@ public class MapRedVerifyTool extends Configured implements Tool {
         // Do the explicit check to see if the user has the permission to get a delegation token
         if (!conn.securityOperations().hasSystemPermission(conn.whoami(),
             SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
-          log.error(newPrincipal + " doesn't have the "
-              + SystemPermission.OBTAIN_DELEGATION_TOKEN.name()
-              + " SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's"
-              + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.");
+          log.error(
+              newPrincipal + " doesn't have the " + SystemPermission.OBTAIN_DELEGATION_TOKEN.name()
+                  + " SystemPermission neccesary to obtain a delegation token."
+                  + " MapReduce tasks cannot automatically use the client's credentials"
+                  + " on remote servers. Delegation tokens provide a means to run"
+                  + " MapReduce without distributing the user's credentials.");
           throw new IllegalStateException(
               conn.whoami() + " does not have permission to obtain a delegation token");
         }
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/FinishedWorkUpdaterIT.java b/test/src/main/java/org/apache/accumulo/test/replication/FinishedWorkUpdaterIT.java
index d9909e8..2cdc853 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/FinishedWorkUpdaterIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/FinishedWorkUpdaterIT.java
@@ -101,14 +101,15 @@ public class FinishedWorkUpdaterIT extends ConfigurableMacBase {
     ReplicationTable.setOnline(conn);
 
     String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
-    // @formatter:off
-    Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true).setInfiniteEnd(false).build(),
-        stat2 = Status.newBuilder().setBegin(500).setEnd(1000).setClosed(true).setInfiniteEnd(false).build(),
-        stat3 = Status.newBuilder().setBegin(1).setEnd(1000).setClosed(true).setInfiniteEnd(false).build();
-    ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", "1"),
-        target2 = new ReplicationTarget("peer2", "table2", "1"),
-        target3 = new ReplicationTarget("peer3", "table3", "1");
-    // @formatter:on
+    Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true)
+        .setInfiniteEnd(false).build();
+    Status stat2 = Status.newBuilder().setBegin(500).setEnd(1000).setClosed(true)
+        .setInfiniteEnd(false).build();
+    Status stat3 = Status.newBuilder().setBegin(1).setEnd(1000).setClosed(true)
+        .setInfiniteEnd(false).build();
+    ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", "1");
+    ReplicationTarget target2 = new ReplicationTarget("peer2", "table2", "1");
+    ReplicationTarget target3 = new ReplicationTarget("peer3", "table3", "1");
 
     // Create a single work record for a file to some peer
     BatchWriter bw = ReplicationTable.getBatchWriter(conn);
@@ -143,14 +144,15 @@ public class FinishedWorkUpdaterIT extends ConfigurableMacBase {
     ReplicationTable.setOnline(conn);
 
     String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
-    // @formatter:off
-    Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true).setInfiniteEnd(true).build(),
-        stat2 = Status.newBuilder().setBegin(1).setEnd(1000).setClosed(true).setInfiniteEnd(true).build(),
-        stat3 = Status.newBuilder().setBegin(500).setEnd(1000).setClosed(true).setInfiniteEnd(true).build();
-    ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", "1"),
-        target2 = new ReplicationTarget("peer2", "table2", "1"),
-        target3 = new ReplicationTarget("peer3", "table3", "1");
-    // @formatter:on
+    Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true)
+        .setInfiniteEnd(true).build();
+    Status stat2 = Status.newBuilder().setBegin(1).setEnd(1000).setClosed(true).setInfiniteEnd(true)
+        .build();
+    Status stat3 = Status.newBuilder().setBegin(500).setEnd(1000).setClosed(true)
+        .setInfiniteEnd(true).build();
+    ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", "1");
+    ReplicationTarget target2 = new ReplicationTarget("peer2", "table2", "1");
+    ReplicationTarget target3 = new ReplicationTarget("peer3", "table3", "1");
 
     // Create a single work record for a file to some peer
     BatchWriter bw = ReplicationTable.getBatchWriter(conn);
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java b/test/src/main/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java
index 33626c7..58eacc8 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java
@@ -414,8 +414,8 @@ public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacB
     Assert.assertNotEquals("Current active WAL on tserver should not be the original WAL we saw",
         walName, activeWal);
 
-    log.info(
-        "Ensuring that replication status does get closed after WAL is no longer in use by Tserver");
+    log.info("Ensuring that replication status does get closed after WAL is no"
+        + " longer in use by Tserver");
 
     do {
       Map<String,Status> replicationStatuses = getMetadataStatusForTable(table);
diff --git a/test/src/main/java/org/apache/accumulo/test/stress/random/WriteOptions.java b/test/src/main/java/org/apache/accumulo/test/stress/random/WriteOptions.java
index 00851d8..b82eb2d 100644
--- a/test/src/main/java/org/apache/accumulo/test/stress/random/WriteOptions.java
+++ b/test/src/main/java/org/apache/accumulo/test/stress/random/WriteOptions.java
@@ -129,9 +129,8 @@ class WriteOptions extends ClientOnDefaultTable {
 
     if (min_ref == null && max_ref != null) {
       // we don't support just specifying a max yet
-      throw new IllegalArgumentException(String.format(
-          "[%s] Maximum value supplied, but no minimum. Must supply a minimum with a maximum value.",
-          label));
+      throw new IllegalArgumentException(String.format("[%s] Maximum value supplied,"
+          + " but no minimum. Must supply a minimum with a maximum value.", label));
     } else if (min_ref != null && max_ref != null) {
       // if a user supplied lower and upper bounds, we need to verify
       // that min <= max
diff --git a/test/src/main/java/org/apache/accumulo/test/util/CertUtils.java b/test/src/main/java/org/apache/accumulo/test/util/CertUtils.java
index d4e60ee..85579c7 100644
--- a/test/src/main/java/org/apache/accumulo/test/util/CertUtils.java
+++ b/test/src/main/java/org/apache/accumulo/test/util/CertUtils.java
@@ -86,8 +86,8 @@ public class CertUtils {
     @Parameter(names = {"--local-keystore"}, description = "Target path for generated keystore")
     String localKeystore = null;
 
-    @Parameter(names = {"--root-keystore"},
-        description = "Path to root truststore, generated with generate-all, or used for signing with generate-local")
+    @Parameter(names = {"--root-keystore"}, description = "Path to root truststore,"
+        + " generated with generate-all, or used for signing with generate-local")
     String rootKeystore = null;
 
     @Parameter(names = {"--root-truststore"},
@@ -97,12 +97,14 @@ public class CertUtils {
     @Parameter(names = {"--keystore-type"}, description = "Type of keystore file to use")
     String keystoreType = "JKS";
 
-    @Parameter(names = {"--root-keystore-password"},
-        description = "Password for root keystore, falls back to --keystore-password if not provided")
+    @Parameter(names = {"--root-keystore-password"}, description = "Password for root keystore,"
+        + " falls back to --keystore-password if not provided")
     String rootKeystorePassword = null;
 
     @Parameter(names = {"--keystore-password"},
-        description = "Password used to encrypt keystores.  If omitted, the instance-wide secret will be used.  If specified, the password must also be explicitly configured in Accumulo.")
+        description = "Password used to encrypt keystores."
+            + " If omitted, the instance-wide secret will be used. If specified, the"
+            + " password must also be explicitly configured in Accumulo.")
     String keystorePassword = null;
 
     @Parameter(names = {"--truststore-password"},
diff --git a/test/src/test/java/org/apache/accumulo/test/TraceRepoDeserializationTest.java b/test/src/test/java/org/apache/accumulo/test/TraceRepoDeserializationTest.java
index 5999c65..0a64815 100644
--- a/test/src/test/java/org/apache/accumulo/test/TraceRepoDeserializationTest.java
+++ b/test/src/test/java/org/apache/accumulo/test/TraceRepoDeserializationTest.java
@@ -28,7 +28,8 @@ import org.junit.Test;
 public class TraceRepoDeserializationTest {
 
   // Zookeeper data for a merge request
-  static private final String oldValue = "rO0ABXNyAC1vcmcuYXBhY2hlLmFjY3VtdWxvLm1hc3Rlci50YWJsZU9wcy5UcmFjZVJlc"
+  static private final String oldValue = "rO0ABXNyAC1vcmcuYXBhY2hlLmFjY3VtdWx"
+      + "vLm1hc3Rlci50YWJsZU9wcy5UcmFjZVJlc"
       + "G8AAAAAAAAAAQIAAkwABHJlcG90AB9Mb3JnL2FwYWNoZS9hY2N1bXVsby9mYXRlL1Jl"
       + "cG87TAAFdGluZm90AChMb3JnL2FwYWNoZS9hY2N1bXVsby90cmFjZS90aHJpZnQvVEl"
       + "uZm87eHBzcgAwb3JnLmFwYWNoZS5hY2N1bXVsby5tYXN0ZXIudGFibGVPcHMuVGFibG"

-- 
To stop receiving notification emails like this one, please contact
ctubbsii@apache.org.

Mime
View raw message