accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ktur...@apache.org
Subject [accumulo] branch master updated: Fixes #564 adds support multiple compaction executors (#1605)
Date Thu, 18 Jun 2020 01:18:59 GMT
This is an automated email from the ASF dual-hosted git repository.

kturner pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/master by this push:
     new c44aca3  Fixes #564 adds support multiple compaction executors (#1605)
c44aca3 is described below

commit c44aca38bdb776a444410a5b0f97d31c2a46d105
Author: Keith Turner <kturner@apache.org>
AuthorDate: Wed Jun 17 21:18:51 2020 -0400

    Fixes #564 adds support multiple compaction executors (#1605)
    
    This change adds support for multiple compaction executors and
    multiple concurrent compactions per tablet.
    
    The best way to understand these changes is to look at the documentation
    at core/src/main/java/org/apache/accumulo/core/spi/compaction/package-info.java
    
    * reduce default number of compaction threads
    
    * Code review updates
    
    * Fix build bug and improve compaction plan checking
    
    * Added trivial todo for something I am too tired to do now
    
    * Improve log message
    
    * When compacting a subset, avoid doing more than logarithmic work for user compactions.
---
 .../accumulo/core/client/PluginEnvironment.java    |  10 +
 .../core/client/admin/ActiveCompaction.java        |   4 +-
 .../core/client/admin/CompactionConfig.java        | 117 +++-
 .../client/admin/CompactionStrategyConfig.java     |  24 +-
 ...actionStrategyConfig.java => PluginConfig.java} |  63 +-
 .../client/admin/compaction/CompactableFile.java   |  27 +-
 .../admin/compaction/CompactionConfigurer.java     |  79 +++
 .../admin/compaction/CompactionSelector.java       |  82 +++
 .../admin/compaction/CompressionConfigurer.java    |  89 +++
 .../admin/compaction/TooManyDeletesSelector.java   |  94 +--
 .../summary/summarizers/DeletesSummarizer.java     |   7 +-
 .../clientImpl/CompactionStrategyConfigUtil.java   |  76 +--
 .../core/clientImpl/TableOperationsImpl.java       |  45 +-
 .../core/clientImpl/UserCompactionUtils.java       | 291 ++++++++
 .../core/compaction/CompactionSettings.java        |  37 +-
 .../accumulo/core/conf/AccumuloConfiguration.java  |  11 +
 .../org/apache/accumulo/core/conf/Property.java    |  97 ++-
 .../apache/accumulo/core/logging/TabletLogger.java |  54 +-
 .../core/metadata/CompactableFileImpl.java         |  94 +++
 .../accumulo/core/singletons/SingletonManager.java |   1 -
 .../core/spi/compaction/CompactionDirectives.java  |  33 +-
 .../core/spi/compaction/CompactionDispatcher.java  |  91 +++
 .../core/spi/compaction/CompactionExecutorId.java  |  27 +-
 .../core/spi/compaction/CompactionJob.java         |  39 +-
 .../core/spi/compaction/CompactionKind.java        |  40 +-
 .../core/spi/compaction/CompactionPlan.java        |  58 ++
 .../core/spi/compaction/CompactionPlanner.java     | 191 ++++++
 .../core/spi/compaction/CompactionServiceId.java   |  23 +-
 .../core/spi/compaction/CompactionServices.java    |  15 +-
 .../spi/compaction/CompactionsDirectiveImpl.java   |  85 +++
 .../spi/compaction/DefaultCompactionPlanner.java   | 425 ++++++++++++
 .../core/spi/compaction/ExecutorManager.java       |  19 +-
 .../spi/compaction/SimpleCompactionDispatcher.java | 120 ++++
 .../compaction/doc-files/compaction-spi-design.png | Bin 0 -> 113441 bytes
 .../accumulo/core/spi/compaction/package-info.java |  80 +++
 .../core/util/compaction/CompactionJobImpl.java    | 109 +++
 .../util/compaction/CompactionJobPrioritizer.java  |  56 ++
 .../core/util/compaction/CompactionPlanImpl.java   |  93 +++
 .../apache/accumulo/core/conf/PropertyTest.java    |   2 +-
 .../compaction/DefaultCompactionPlannerTest.java   | 406 +++++++++++
 .../util/compaction/CompactionPrioritizerTest.java |  69 ++
 pom.xml                                            |   1 +
 .../accumulo/server/ServiceEnvironmentImpl.java    |  10 +
 .../accumulo/server/conf/TableConfiguration.java   |  44 +-
 .../apache/accumulo/server/init/Initialize.java    |  82 ++-
 .../master/tableOps/UserCompactionConfig.java      | 123 ----
 .../accumulo/server/util/MasterMetadataUtil.java   |   8 +-
 .../apache/accumulo/master/FateServiceHandler.java |  20 +-
 .../master/tableOps/compact/CompactRange.java      |  38 +-
 .../tableOps/compact/cancel/CancelCompactions.java |  14 +-
 .../accumulo/master/upgrade/Upgrader9to10.java     |  22 +
 .../tserver/TabletIteratorEnvironment.java         |   6 +-
 .../org/apache/accumulo/tserver/TabletServer.java  |  34 +-
 .../tserver/TabletServerResourceManager.java       |  90 +--
 .../accumulo/tserver/ThriftClientHandler.java      |  31 +-
 .../tserver/compaction/CompactionPlan.java         |   1 +
 .../tserver/compaction/CompactionStrategy.java     |  11 +
 .../compaction/DefaultCompactionStrategy.java      |   1 +
 .../compaction/EverythingCompactionStrategy.java   |   2 +-
 .../tserver/compaction/MajorCompactionReason.java  |   2 +
 .../tserver/compaction/MajorCompactionRequest.java |   1 +
 .../compaction/SizeLimitCompactionStrategy.java    |   1 +
 .../tserver/compaction/WriteParameters.java        |   1 +
 .../strategies/BasicCompactionStrategy.java        |   7 +
 .../strategies/ConfigurableCompactionStrategy.java | 255 +++----
 .../TooManyDeletesCompactionStrategy.java          |   3 +
 .../accumulo/tserver/compactions/Compactable.java  |  91 +++
 .../tserver/compactions/CompactionExecutor.java    | 134 ++++
 .../tserver/compactions/CompactionManager.java     | 164 +++++
 .../tserver/compactions/CompactionService.java     | 266 ++++++++
 .../tserver/compactions/PrintableTable.java        |  97 +++
 .../SubmittedJob.java}                             |  26 +-
 .../accumulo/tserver/tablet/CompactableImpl.java   | 759 +++++++++++++++++++++
 .../accumulo/tserver/tablet/CompactableUtils.java  | 602 ++++++++++++++++
 .../accumulo/tserver/tablet/CompactionInfo.java    |   6 +-
 .../accumulo/tserver/tablet/CompactionRunner.java  |  90 ---
 .../apache/accumulo/tserver/tablet/Compactor.java  |  12 +-
 .../accumulo/tserver/tablet/DatafileManager.java   | 146 +---
 .../tserver/tablet/MinorCompactionTask.java        |  14 +-
 .../accumulo/tserver/tablet/MinorCompactor.java    |  19 +-
 .../org/apache/accumulo/tserver/tablet/Tablet.java | 680 ++----------------
 ...erResourceManagerDynamicCompactionPoolTest.java | 161 -----
 .../tserver/compaction/CompactionPlanTest.java     |   1 +
 .../compaction/DefaultCompactionStrategyTest.java  |   1 +
 .../SizeLimitCompactionStrategyTest.java           |   1 +
 .../strategies/BasicCompactionStrategyTest.java    |   1 +
 .../ConfigurableCompactionStrategyTest.java        | 108 +--
 .../tserver/tablet/DatafileManagerTest.java        | 151 ----
 .../apache/accumulo/tserver/tablet/TabletTest.java |   4 +-
 .../accumulo/shell/commands/CompactCommand.java    |  76 ++-
 .../org/apache/accumulo/test/CompactionIT.java     | 607 ++++++++++++++++
 .../test/ConfigurableMajorCompactionIT.java        |   1 +
 .../accumulo/test/SizeCompactionStrategy.java      |   1 +
 .../accumulo/test/TestCompactionStrategy.java      |   1 +
 .../accumulo/test/UserCompactionStrategyIT.java    |   1 +
 .../accumulo/test/functional/CompactionIT.java     |   1 +
 .../test/functional/ConfigurableCompactionIT.java  |   3 +
 .../apache/accumulo/test/functional/MaxOpenIT.java |   1 +
 .../accumulo/test/functional/RowDeleteIT.java      |   1 -
 .../apache/accumulo/test/functional/SummaryIT.java |  56 +-
 .../accumulo/test/functional/TooManyDeletesIT.java |   1 +
 101 files changed, 6337 insertions(+), 2038 deletions(-)

diff --git a/core/src/main/java/org/apache/accumulo/core/client/PluginEnvironment.java b/core/src/main/java/org/apache/accumulo/core/client/PluginEnvironment.java
index 2a78bb2..f58d29a 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/PluginEnvironment.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/PluginEnvironment.java
@@ -39,6 +39,16 @@ public interface PluginEnvironment {
   public interface Configuration extends Iterable<Entry<String,String>> {
 
     /**
+     * Properties with a default value will always return something when calling
+     * {@link #get(String)}, even if a user never set the property. The method allows checking if a
+     * user set a property.
+     *
+     * @return true if a user set this property and false if a user did not set it.
+     * @since 2.1.0
+     */
+    boolean isSet(String key);
+
+    /**
      * @return The value for a single property or null if not present. Sensitive properties are
      *         intentionally not returned in order to prevent inadvertent logging of them. If your
      *         plugin needs sensitive properties a getSensitive method could be added.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/ActiveCompaction.java b/core/src/main/java/org/apache/accumulo/core/client/admin/ActiveCompaction.java
index f22e258..5a404e1 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/ActiveCompaction.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/ActiveCompaction.java
@@ -35,9 +35,9 @@ public abstract class ActiveCompaction {
      */
     MINOR,
     /**
-     * compaction to flush a tablets memory and merge it with the tablets smallest file. This type
-     * compaction is done when a tablet has too many files
+     * Accumulo no longer does merging minor compactions.
      */
+    @Deprecated(since = "2.1.0", forRemoval = true)
     MERGE,
     /**
      * compaction that merges a subset of a tablets files into one file
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionConfig.java b/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionConfig.java
index b09c695..2ac65f9 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionConfig.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionConfig.java
@@ -21,25 +21,37 @@ package org.apache.accumulo.core.client.admin;
 import static java.util.Objects.requireNonNull;
 import static org.apache.accumulo.core.clientImpl.CompactionStrategyConfigUtil.DEFAULT_STRATEGY;
 
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
+import java.util.function.BooleanSupplier;
 
 import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer;
+import org.apache.accumulo.core.client.admin.compaction.CompactionSelector;
+import org.apache.accumulo.core.clientImpl.CompactionStrategyConfigUtil;
+import org.apache.accumulo.core.clientImpl.UserCompactionUtils;
 import org.apache.hadoop.io.Text;
 
+import com.google.common.base.Preconditions;
+
 /**
  * This class exist to pass parameters to {@link TableOperations#compact(String, CompactionConfig)}
  *
  * @since 1.7.0
  */
 public class CompactionConfig {
+
   private Text start = null;
   private Text end = null;
   private boolean flush = true;
   private boolean wait = true;
   private List<IteratorSetting> iterators = Collections.emptyList();
+  @SuppressWarnings("removal")
   private CompactionStrategyConfig compactionStrategy = DEFAULT_STRATEGY;
+  private Map<String,String> hints = Map.of();
+  private PluginConfig selectorConfig = UserCompactionUtils.DEFAULT_SELECTOR;
+  private PluginConfig configurerConfig = UserCompactionUtils.DEFAULT_CONFIGURER;
 
   /**
    * @param start
@@ -124,7 +136,7 @@ public class CompactionConfig {
    * @return this
    */
   public CompactionConfig setIterators(List<IteratorSetting> iterators) {
-    this.iterators = new ArrayList<>(iterators);
+    this.iterators = List.copyOf(iterators);
     return this;
   }
 
@@ -141,9 +153,16 @@ public class CompactionConfig {
    *          configures the strategy that will be used by each tablet to select files. If no
    *          strategy is set, then all files will be compacted.
    * @return this
+   * @deprecated since 2.1.0 use {@link #setSelector(PluginConfig)} and
+   *             {@link #setConfigurer(PluginConfig)} instead. See {@link CompactionStrategyConfig}
+   *             for details about why this was deprecated.
    */
+  @Deprecated(since = "2.1.0", forRemoval = true)
   public CompactionConfig setCompactionStrategy(CompactionStrategyConfig csConfig) {
     requireNonNull(csConfig);
+    Preconditions.checkArgument(!csConfig.getClassName().isBlank());
+    Preconditions.checkState(
+        selectorConfig.getClassName().isEmpty() && configurerConfig.getClassName().isEmpty());
     this.compactionStrategy = csConfig;
     return this;
   }
@@ -152,8 +171,102 @@ public class CompactionConfig {
    * @return The previously set compaction strategy. Defaults to a configuration of
    *         org.apache.accumulo.tserver.compaction.EverythingCompactionStrategy which always
    *         compacts all files.
+   * @deprecated since 2.1.0
    */
+  @Deprecated(since = "2.1.0", forRemoval = true)
   public CompactionStrategyConfig getCompactionStrategy() {
     return compactionStrategy;
   }
+
+  /**
+   * Configure a {@link CompactionSelector} plugin to run for this compaction. Specify the class
+   * name and options here.
+   *
+   * @return this;
+   * @since 2.1.0
+   */
+  @SuppressWarnings("removal")
+  public CompactionConfig setSelector(PluginConfig selectorConfig) {
+    Preconditions.checkState(compactionStrategy.getClassName().isEmpty());
+    Preconditions.checkArgument(!selectorConfig.getClassName().isBlank());
+    this.selectorConfig = requireNonNull(selectorConfig);
+    return this;
+  }
+
+  /**
+   * @since 2.1.0
+   */
+  public PluginConfig getSelector() {
+    return selectorConfig;
+  }
+
+  /**
+   * @since 2.1.0
+   */
+  @SuppressWarnings("removal")
+  public CompactionConfig setExecutionHints(Map<String,String> hints) {
+    if (!hints.isEmpty())
+      Preconditions.checkState(compactionStrategy.getClassName().isEmpty());
+    this.hints = Map.copyOf(hints);
+    return this;
+  }
+
+  /**
+   * @since 2.1.0
+   */
+  public Map<String,String> getExecutionHints() {
+    return hints;
+  }
+
+  /**
+   * Enables a {@link CompactionConfigurer} to run for this compaction on the server side. Specify
+   * the class name and options here.
+   *
+   * @since 2.1.0
+   */
+  @SuppressWarnings("removal")
+  public CompactionConfig setConfigurer(PluginConfig configurerConfig) {
+    Preconditions.checkState(compactionStrategy.getClassName().isEmpty());
+    this.configurerConfig = configurerConfig;
+    return this;
+  }
+
+  /**
+   * @since 2.1.0
+   */
+  public PluginConfig getConfigurer() {
+    return configurerConfig;
+  }
+
+  private String append(StringBuilder sb, String prefix, BooleanSupplier test, String name,
+      Object val) {
+    if (test.getAsBoolean()) {
+      sb.append(prefix);
+      sb.append(name);
+      sb.append("=");
+      sb.append(val);
+      return ", ";
+    }
+    return prefix;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("[");
+    var prefix = append(sb, "", () -> start != null, "start", start);
+    prefix = append(sb, prefix, () -> end != null, "end", end);
+    prefix = append(sb, prefix, () -> !flush, "flush", flush);
+    prefix = append(sb, prefix, () -> !wait, "wait", wait);
+    prefix = append(sb, prefix, () -> !iterators.isEmpty(), "iterators", iterators);
+    prefix = append(sb, prefix, () -> !CompactionStrategyConfigUtil.isDefault(compactionStrategy),
+        "strategy", compactionStrategy);
+    prefix = append(sb, prefix, () -> !UserCompactionUtils.isDefault(selectorConfig), "selector",
+        selectorConfig);
+    prefix = append(sb, prefix, () -> !UserCompactionUtils.isDefault(configurerConfig),
+        "configurer", configurerConfig);
+    prefix = append(sb, prefix, () -> !hints.isEmpty(), "hints", hints);
+    sb.append("]");
+    return sb.toString();
+  }
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionStrategyConfig.java b/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionStrategyConfig.java
index 718e39c..5a61695 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionStrategyConfig.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionStrategyConfig.java
@@ -20,8 +20,6 @@ package org.apache.accumulo.core.client.admin;
 
 import static java.util.Objects.requireNonNull;
 
-import java.util.Collections;
-import java.util.HashMap;
 import java.util.Map;
 
 /**
@@ -30,10 +28,22 @@ import java.util.Map;
  * {@link CompactionConfig}.
  *
  * @since 1.7.0
+ * @deprecated since 2.1.0 CompactionStrategies were deprecated for multiple reasons. First, they do
+ *             not support the new compaction execution model. Second, they bind selection and
+ *             output file configuration into a single entity when users need to configure these
+ *             independently. Third, they use internal Accumulo types and ensuring their stability
+ *             requires manual effort that may never happen. Fourth, writing a correct compaction
+ *             strategy was exceedingly difficult as it required knowledge of internal tablet server
+ *             synchronization in order to avoid causing scans to hang. Fifth although measure were
+ *             taken to execute compaction strategies in the same manner as before, their execution
+ *             in the new model has subtle differences that may result in suboptimal compactions.
+ *             Please migrate to using {@link CompactionConfig#setSelector(PluginConfig)} and
+ *             {@link CompactionConfig#setConfigurer(PluginConfig)} as soon as possible.
  */
+@Deprecated(since = "2.1.0", forRemoval = true)
 public class CompactionStrategyConfig {
   private String className;
-  private Map<String,String> options = Collections.emptyMap();
+  private Map<String,String> options = Map.of();
 
   /**
    * @param className
@@ -42,8 +52,7 @@ public class CompactionStrategyConfig {
    *          tservers.
    */
   public CompactionStrategyConfig(String className) {
-    requireNonNull(className);
-    this.className = className;
+    this.className = requireNonNull(className);
   }
 
   /**
@@ -61,8 +70,7 @@ public class CompactionStrategyConfig {
    * @return this
    */
   public CompactionStrategyConfig setOptions(Map<String,String> opts) {
-    requireNonNull(opts);
-    this.options = new HashMap<>(opts);
+    this.options = Map.copyOf(opts);
     return this;
   }
 
@@ -70,7 +78,7 @@ public class CompactionStrategyConfig {
    * @return The previously set options. Returns an unmodifiable map. The default is an empty map.
    */
   public Map<String,String> getOptions() {
-    return Collections.unmodifiableMap(options);
+    return options;
   }
 
   @Override
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionStrategyConfig.java b/core/src/main/java/org/apache/accumulo/core/client/admin/PluginConfig.java
similarity index 56%
copy from core/src/main/java/org/apache/accumulo/core/client/admin/CompactionStrategyConfig.java
copy to core/src/main/java/org/apache/accumulo/core/client/admin/PluginConfig.java
index 718e39c..acbd28e 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/CompactionStrategyConfig.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/PluginConfig.java
@@ -20,57 +20,55 @@ package org.apache.accumulo.core.client.admin;
 
 import static java.util.Objects.requireNonNull;
 
-import java.util.Collections;
-import java.util.HashMap;
 import java.util.Map;
 
 /**
- * Configuration object which describes how a Compaction is run. Configuration objects are dependent
- * upon the CompactionStrategy running insider the server. This class is used in conjunction with
- * {@link CompactionConfig}.
+ * Encapsulates the configuration of an Accumulo server side plugin, which consist of a class name
+ * and options.
  *
- * @since 1.7.0
+ * @since 2.1.0
  */
-public class CompactionStrategyConfig {
-  private String className;
-  private Map<String,String> options = Collections.emptyMap();
+public class PluginConfig {
+
+  private final String className;
+  private final Map<String,String> options;
 
   /**
    * @param className
-   *          The name of a class that implements
-   *          org.apache.accumulo.tserver.compaction.CompactionStrategy. This class must be exist on
-   *          tservers.
+   *          The name of a class that implements a server side plugin. This class must exist on the
+   *          server side classpath.
    */
-  public CompactionStrategyConfig(String className) {
-    requireNonNull(className);
-    this.className = className;
+  public PluginConfig(String className) {
+    this.className = requireNonNull(className);
+    this.options = Map.of();
   }
 
   /**
-   * @return the class name passed to the constructor.
+   *
+   * @param className
+   *          The name of a class that implements a server side plugin. This class must exist on the
+   *          server side classpath.
+   * @param options
+   *          The options that will be passed to the init() method of the plugin when its
+   *          instantiated server side. This method will copy the map. The default is an empty map.
    */
-  public String getClassName() {
-    return className;
+  public PluginConfig(String className, Map<String,String> options) {
+    this.className = requireNonNull(className);
+    this.options = Map.copyOf(options);
   }
 
   /**
-   * @param opts
-   *          The options that will be passed to the init() method of the compaction strategy when
-   *          its instantiated on a tserver. This method will copy the map. The default is an empty
-   *          map.
-   * @return this
+   * @return the class name passed to the constructor.
    */
-  public CompactionStrategyConfig setOptions(Map<String,String> opts) {
-    requireNonNull(opts);
-    this.options = new HashMap<>(opts);
-    return this;
+  public String getClassName() {
+    return className;
   }
 
   /**
    * @return The previously set options. Returns an unmodifiable map. The default is an empty map.
    */
   public Map<String,String> getOptions() {
-    return Collections.unmodifiableMap(options);
+    return options;
   }
 
   @Override
@@ -80,11 +78,16 @@ public class CompactionStrategyConfig {
 
   @Override
   public boolean equals(Object o) {
-    if (o instanceof CompactionStrategyConfig) {
-      CompactionStrategyConfig ocsc = (CompactionStrategyConfig) o;
+    if (o instanceof PluginConfig) {
+      PluginConfig ocsc = (PluginConfig) o;
       return className.equals(ocsc.className) && options.equals(ocsc.options);
     }
 
     return false;
   }
+
+  @Override
+  public String toString() {
+    return "[className=" + className + ", options=" + options + "]";
+  }
 }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java b/core/src/main/java/org/apache/accumulo/core/client/admin/compaction/CompactableFile.java
similarity index 62%
copy from server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java
copy to core/src/main/java/org/apache/accumulo/core/client/admin/compaction/CompactableFile.java
index 792c0b6..3e0a21e 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/compaction/CompactableFile.java
@@ -16,10 +16,27 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.accumulo.tserver.compaction;
+package org.apache.accumulo.core.client.admin.compaction;
+
+import java.net.URI;
+
+import org.apache.accumulo.core.metadata.CompactableFileImpl;
+
+/**
+ * @since 2.1.0
+ */
+public interface CompactableFile {
+
+  public String getFileName();
+
+  public URI getUri();
+
+  public long getEstimatedSize();
+
+  public long getEstimatedEntries();
+
+  static CompactableFile create(URI uri, long estimatedSize, long estimatedEntries) {
+    return new CompactableFileImpl(uri, estimatedSize, estimatedEntries);
+  }
 
-public enum MajorCompactionReason {
-  // do not change the order, the order of this enum determines the order
-  // in which queued major compactions are executed
-  USER, CHOP, NORMAL, IDLE
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/compaction/CompactionConfigurer.java b/core/src/main/java/org/apache/accumulo/core/client/admin/compaction/CompactionConfigurer.java
new file mode 100644
index 0000000..cd9bd7f
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/compaction/CompactionConfigurer.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.client.admin.compaction;
+
+import java.util.Collection;
+import java.util.Map;
+
+import org.apache.accumulo.core.client.PluginEnvironment;
+import org.apache.accumulo.core.data.TableId;
+
+/**
+ * Enables dynamically overriding of per table properties used to create the output file for a
+ * compaction. For example it could override the per table property for compression.
+ *
+ * @since 2.1.0
+ */
+public interface CompactionConfigurer {
+  /**
+   * @since 2.1.0
+   */
+  public interface InitParamaters {
+    TableId getTableId();
+
+    Map<String,String> getOptions();
+
+    PluginEnvironment getEnvironment();
+  }
+
+  void init(InitParamaters iparams);
+
+  /**
+   * @since 2.1.0
+   */
+  public interface InputParameters {
+    TableId getTableId();
+
+    public Collection<CompactableFile> getInputFiles();
+
+    PluginEnvironment getEnvironment();
+  }
+
+  /**
+   * Specifies how the output file should be created for a compaction.
+   *
+   * @since 2.1.0
+   */
+  public class Overrides {
+    private final Map<String,String> tablePropertyOverrides;
+
+    public Overrides(Map<String,String> tablePropertyOverrides) {
+      this.tablePropertyOverrides = Map.copyOf(tablePropertyOverrides);
+    }
+
+    /**
+     * @return Table properties to override.
+     */
+    public Map<String,String> getOverrides() {
+      return tablePropertyOverrides;
+    }
+  }
+
+  Overrides override(InputParameters params);
+}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/compaction/CompactionSelector.java b/core/src/main/java/org/apache/accumulo/core/client/admin/compaction/CompactionSelector.java
new file mode 100644
index 0000000..f542dce
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/compaction/CompactionSelector.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.client.admin.compaction;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.function.Predicate;
+
+import org.apache.accumulo.core.client.PluginEnvironment;
+import org.apache.accumulo.core.client.sample.SamplerConfiguration;
+import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
+import org.apache.accumulo.core.client.summary.Summary;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+
+/**
+ * This class selects which files a user compaction will compact. It can also be configured per
+ * table to periodically select files to compact.
+ *
+ * @since 2.1.0
+ */
+public interface CompactionSelector {
+
+  public interface InitParamaters {
+    Map<String,String> getOptions();
+
+    TableId getTableId();
+
+    PluginEnvironment getEnvironment();
+  }
+
+  void init(InitParamaters iparams);
+
+  public interface SelectionParameters {
+    PluginEnvironment getEnvironment();
+
+    Collection<CompactableFile> getAvailableFiles();
+
+    Collection<Summary> getSummaries(Collection<CompactableFile> files,
+        Predicate<SummarizerConfiguration> summarySelector);
+
+    TableId getTableId();
+
+    Optional<SortedKeyValueIterator<Key,Value>> getSample(CompactableFile cf,
+        SamplerConfiguration sc);
+
+  }
+
+  public static class Selection {
+    private final Collection<CompactableFile> filesToCompact;
+
+    public Selection(Collection<CompactableFile> filesToCompact) {
+      this.filesToCompact = Set.copyOf(filesToCompact);
+    }
+
+    public Collection<CompactableFile> getFilesToCompact() {
+      return filesToCompact;
+    }
+  }
+
+  Selection select(SelectionParameters sparams);
+}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/compaction/CompressionConfigurer.java b/core/src/main/java/org/apache/accumulo/core/client/admin/compaction/CompressionConfigurer.java
new file mode 100644
index 0000000..00098d2
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/compaction/CompressionConfigurer.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.client.admin.compaction;
+
+import java.util.Map;
+
+import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
+import org.apache.accumulo.core.conf.Property;
+
+/**
+ * A compaction configurer that can adjust the compression configuration for a compaction when the
+ * sum of the input file sizes exceeds a threshold.
+ *
+ * <p>
+ * To use compression type CL for large files and CS for small files, set the following table
+ * properties.
+ *
+ * <ul>
+ * <li>Set {@code table.compaction.configurer} to the fully qualified name of this class.
+ * <li>Set {@code table.compaction.configurer.opts.large.compress.threshold } to a size in bytes.
+ * The suffixes K,M,and G can be used. When the inputs exceed this size, the following compression
+ * is used.
+ * <li>Set {@code  table.compaction.configurer.opts.large.compress.type} to CL.
+ * <li>Set {@code table.file.compress.type=CS}
+ * </ul>
+ *
+ * <p>
+ * With the above config, minor compaction and small compaction will use CS for compression.
+ * Everything else will use CL. For example CS could be snappy and CL could be gzip. Using a faster
+ * compression for small files and slower compression for larger files can increase ingestion
+ * throughput without using a lot of extra space.
+ *
+ * @since 2.1.0
+ */
+public class CompressionConfigurer implements CompactionConfigurer {
+
+  public static final String LARGE_FILE_COMPRESSION_THRESHOLD = "large.compress.threshold";
+
+  public static final String LARGE_FILE_COMPRESSION_TYPE = "large.compress.type";
+
+  private Long largeThresh;
+  private String largeCompress;
+
+  @Override
+  public void init(InitParamaters iparams) {
+    var options = iparams.getOptions();
+
+    String largeThresh = options.get(LARGE_FILE_COMPRESSION_THRESHOLD);
+    String largeCompress = options.get(LARGE_FILE_COMPRESSION_TYPE);
+    if (largeThresh != null && largeCompress != null) {
+      this.largeThresh = ConfigurationTypeHelper.getFixedMemoryAsBytes(largeThresh);
+      this.largeCompress = largeCompress;
+    } else if (largeThresh != null ^ largeCompress != null) {
+      throw new IllegalArgumentException(
+          "Must set both of " + Property.TABLE_COMPACTION_CONFIGURER_OPTS.getKey() + " ("
+              + LARGE_FILE_COMPRESSION_TYPE + " and " + LARGE_FILE_COMPRESSION_THRESHOLD
+              + ") or neither for " + this.getClass().getName());
+    }
+  }
+
+  @Override
+  public Overrides override(InputParameters params) {
+    long inputsSum =
+        params.getInputFiles().stream().mapToLong(CompactableFile::getEstimatedSize).sum();
+
+    if (inputsSum > largeThresh) {
+      return new Overrides(Map.of(Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), largeCompress));
+    }
+
+    return new Overrides(Map.of());
+  }
+
+}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/TooManyDeletesCompactionStrategy.java b/core/src/main/java/org/apache/accumulo/core/client/admin/compaction/TooManyDeletesSelector.java
similarity index 61%
copy from server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/TooManyDeletesCompactionStrategy.java
copy to core/src/main/java/org/apache/accumulo/core/client/admin/compaction/TooManyDeletesSelector.java
index f9ca9bf..1c9a419 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/TooManyDeletesCompactionStrategy.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/compaction/TooManyDeletesSelector.java
@@ -16,30 +16,23 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.accumulo.tserver.compaction.strategies;
+
+package org.apache.accumulo.core.client.admin.compaction;
 
 import static org.apache.accumulo.core.client.summary.summarizers.DeletesSummarizer.DELETES_STAT;
 import static org.apache.accumulo.core.client.summary.summarizers.DeletesSummarizer.TOTAL_STAT;
 
-import java.io.IOException;
 import java.util.Collection;
-import java.util.Collections;
-import java.util.Map;
-import java.util.Map.Entry;
+import java.util.List;
 import java.util.function.Predicate;
 
 import org.apache.accumulo.core.client.rfile.RFile.WriterOptions;
 import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
 import org.apache.accumulo.core.client.summary.Summary;
 import org.apache.accumulo.core.client.summary.summarizers.DeletesSummarizer;
-import org.apache.accumulo.core.metadata.StoredTabletFile;
-import org.apache.accumulo.core.metadata.schema.DataFileValue;
-import org.apache.accumulo.tserver.compaction.CompactionPlan;
-import org.apache.accumulo.tserver.compaction.DefaultCompactionStrategy;
-import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
 
 /**
- * This compaction strategy works in concert with the {@link DeletesSummarizer}. Using the
+ * This compaction selector works in concert with the {@link DeletesSummarizer}. Using the
  * statistics from DeleteSummarizer this strategy will compact all files in a table when the number
  * of deletes/non-deletes exceeds a threshold.
  *
@@ -56,7 +49,7 @@ import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
  * back to using Accumulo's estimated entries per file in this case. For the files without summary
  * information the estimated number of deletes will be zero. This fall back method will
  * underestimate deletes which will not lead to false positives, except for the case of bulk
- * imported files. Accumulo estimates that bulk imported files have zero entires. The second option
+ * imported files. Accumulo estimates that bulk imported files have zero entries. The second option
  * {@value #PROCEED_ZERO_NO_SUMMARY_OPT} determines if this strategy should proceed when it sees
  * bulk imported files that do not have summary data. This option defaults to
  * {@value #PROCEED_ZERO_NO_SUMMARY_OPT_DEFAULT}.
@@ -67,21 +60,12 @@ import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
  * {@link WriterOptions#withSummarizers(SummarizerConfiguration...)}
  *
  * <p>
- * When this strategy does not decide to compact based on the number of deletes, then it will defer
- * the decision to the {@link DefaultCompactionStrategy}.
- *
- * <p>
- * Configuring this compaction strategy for a table will cause it to always queue compactions, even
- * though it may not decide to compact. These queued compactions may show up on the Accumulo monitor
- * page. This is because summary data can not be read until after compaction is queued and dequeued.
- * When the compaction is dequeued it can then decide not to compact. See <a
- * href=https://issues.apache.org/jira/browse/ACCUMULO-4573>ACCUMULO-4573</a>
+ * When using this feature, its important to ensure summary cache is on and the summaries fit in the
+ * cache.
  *
- * @since 2.0.0
+ * @since 2.1.0
  */
-public class TooManyDeletesCompactionStrategy extends DefaultCompactionStrategy {
-
-  private boolean shouldCompact = false;
+public class TooManyDeletesSelector implements CompactionSelector {
 
   private double threshold;
 
@@ -102,7 +86,8 @@ public class TooManyDeletesCompactionStrategy extends DefaultCompactionStrategy
   public static final String PROCEED_ZERO_NO_SUMMARY_OPT_DEFAULT = "false";
 
   @Override
-  public void init(Map<String,String> options) {
+  public void init(InitParamaters iparams) {
+    var options = iparams.getOptions();
     this.threshold = Double.parseDouble(options.getOrDefault(THRESHOLD_OPT, THRESHOLD_OPT_DEFAULT));
     if (threshold <= 0.0 || threshold > 1.0) {
       throw new IllegalArgumentException(
@@ -114,27 +99,18 @@ public class TooManyDeletesCompactionStrategy extends DefaultCompactionStrategy
   }
 
   @Override
-  public boolean shouldCompact(MajorCompactionRequest request) {
+  public Selection select(SelectionParameters sparams) {
+
+    var tableConf = sparams.getEnvironment().getConfiguration(sparams.getTableId());
+
     Collection<SummarizerConfiguration> configuredSummarizers =
-        SummarizerConfiguration.fromTableProperties(request.getTableProperties());
+        SummarizerConfiguration.fromTableProperties(tableConf);
 
     // check if delete summarizer is configured for table
-    if (configuredSummarizers.stream().map(SummarizerConfiguration::getClassName)
-        .anyMatch(cn -> cn.equals(DeletesSummarizer.class.getName()))) {
-      // This is called before gatherInformation, so need to always queue for compaction until
-      // context
-      // can be gathered. Also its not safe to request summary
-      // information here as its a blocking operation. Blocking operations are not allowed in
-      // shouldCompact.
-      return true;
-    } else {
-      return super.shouldCompact(request);
+    if (configuredSummarizers.stream().map(sc -> sc.getClassName())
+        .noneMatch(cn -> cn.equals(DeletesSummarizer.class.getName()))) {
+      return new Selection(List.of());
     }
-  }
-
-  @Override
-  public void gatherInformation(MajorCompactionRequest request) throws IOException {
-    super.gatherInformation(request);
 
     Predicate<SummarizerConfiguration> summarizerPredicate =
         conf -> conf.getClassName().equals(DeletesSummarizer.class.getName())
@@ -143,21 +119,20 @@ public class TooManyDeletesCompactionStrategy extends DefaultCompactionStrategy
     long total = 0;
     long deletes = 0;
 
-    for (Entry<StoredTabletFile,DataFileValue> entry : request.getFiles().entrySet()) {
-      Collection<Summary> summaries =
-          request.getSummaries(Collections.singleton(entry.getKey()), summarizerPredicate);
+    for (CompactableFile file : sparams.getAvailableFiles()) {
+      Collection<Summary> summaries = sparams.getSummaries(List.of(file), summarizerPredicate);
+
       if (summaries.size() == 1) {
         Summary summary = summaries.iterator().next();
         total += summary.getStatistics().get(TOTAL_STAT);
         deletes += summary.getStatistics().get(DELETES_STAT);
       } else {
-        long numEntries = entry.getValue().getNumEntries();
+        long numEntries = file.getEstimatedEntries();
         if (numEntries == 0 && !proceed_bns) {
-          shouldCompact = false;
-          return;
+          return new Selection(List.of());
         } else {
           // no summary data so use Accumulo's estimate of total entries in file
-          total += entry.getValue().getNumEntries();
+          total += numEntries;
         }
       }
     }
@@ -169,21 +144,12 @@ public class TooManyDeletesCompactionStrategy extends DefaultCompactionStrategy
       // estimates are off
 
       double ratio = deletes / (double) nonDeletes;
-      shouldCompact = ratio >= threshold;
-    } else {
-      shouldCompact = false;
-    }
-  }
-
-  @Override
-  public CompactionPlan getCompactionPlan(MajorCompactionRequest request) {
-    if (shouldCompact) {
-      CompactionPlan cp = new CompactionPlan();
-      cp.inputFiles.addAll(request.getFiles().keySet());
-      return cp;
+      if (ratio >= threshold) {
+        return new Selection(sparams.getAvailableFiles());
+      }
     }
 
-    // fall back to default
-    return super.getCompactionPlan(request);
+    return new Selection(List.of());
   }
+
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/summary/summarizers/DeletesSummarizer.java b/core/src/main/java/org/apache/accumulo/core/client/summary/summarizers/DeletesSummarizer.java
index 7ffa094..e94ddca 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/summary/summarizers/DeletesSummarizer.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/summary/summarizers/DeletesSummarizer.java
@@ -19,6 +19,7 @@
 package org.apache.accumulo.core.client.summary.summarizers;
 
 import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.admin.compaction.TooManyDeletesSelector;
 import org.apache.accumulo.core.client.summary.Summarizer;
 import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
 import org.apache.accumulo.core.data.Key;
@@ -27,14 +28,10 @@ import org.apache.accumulo.core.data.Value;
 /**
  * This summarizer tracks the total number of delete Keys seen and the total number of keys seen.
  *
- * <p>
- * This summarizer is used by
- * org.apache.accumulo.tserver.compaction.strategies.TooManyDeletesCompactionStrategy to make
- * compaction decisions based on the number of deletes.
- *
  * @since 2.0.0
  * @see TableOperations#addSummarizers(String,
  *      org.apache.accumulo.core.client.summary.SummarizerConfiguration...)
+ * @see TooManyDeletesSelector
  */
 public class DeletesSummarizer implements Summarizer {
 
diff --git a/core/src/main/java/org/apache/accumulo/core/clientImpl/CompactionStrategyConfigUtil.java b/core/src/main/java/org/apache/accumulo/core/clientImpl/CompactionStrategyConfigUtil.java
index eea139d..e3f3a82 100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/CompactionStrategyConfigUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/CompactionStrategyConfigUtil.java
@@ -18,23 +18,17 @@
  */
 package org.apache.accumulo.core.clientImpl;
 
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
 import java.io.DataInput;
-import java.io.DataInputStream;
 import java.io.DataOutput;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.util.HashMap;
 import java.util.Map;
-import java.util.Map.Entry;
 
+import org.apache.accumulo.core.client.admin.CompactionConfig;
 import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
 
+@SuppressWarnings("removal")
 public class CompactionStrategyConfigUtil {
 
-  public static final CompactionStrategyConfig DEFAULT_STRATEGY = new CompactionStrategyConfig(
-      "org.apache.accumulo.tserver.compaction.EverythingCompactionStrategy") {
+  public static final CompactionStrategyConfig DEFAULT_STRATEGY = new CompactionStrategyConfig("") {
     @Override
     public CompactionStrategyConfig setOptions(Map<String,String> opts) {
       throw new UnsupportedOperationException();
@@ -43,66 +37,26 @@ public class CompactionStrategyConfigUtil {
 
   private static final int MAGIC = 0xcc5e6024;
 
-  public static void encode(DataOutput dout, CompactionStrategyConfig csc) throws IOException {
-
-    dout.writeInt(MAGIC);
-    dout.writeByte(1);
-
-    dout.writeUTF(csc.getClassName());
-    dout.writeInt(csc.getOptions().size());
-
-    for (Entry<String,String> entry : csc.getOptions().entrySet()) {
-      dout.writeUTF(entry.getKey());
-      dout.writeUTF(entry.getValue());
-    }
-
+  public static void encode(DataOutput dout, CompactionConfig cc) {
+    var cs = cc.getCompactionStrategy();
+    UserCompactionUtils.encode(dout, MAGIC, 1, cs.getClassName(), cs.getOptions());
   }
 
-  public static byte[] encode(CompactionStrategyConfig csc) {
-    ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    DataOutputStream dos = new DataOutputStream(baos);
-
-    try {
-      encode(dos, csc);
-      dos.close();
+  public static void decode(CompactionConfig cc, DataInput din) {
+    var pcd = UserCompactionUtils.decode(din, MAGIC, 1);
+    var csc = new CompactionStrategyConfig(pcd.className).setOptions(pcd.opts);
 
-      return baos.toByteArray();
-    } catch (IOException ioe) {
-      throw new RuntimeException(ioe);
+    if (!isDefault(csc)) {
+      cc.setCompactionStrategy(csc);
     }
   }
 
-  public static CompactionStrategyConfig decode(DataInput din) throws IOException {
-    if (din.readInt() != MAGIC) {
-      throw new IllegalArgumentException("Unexpected MAGIC ");
-    }
+  public static boolean isDefault(CompactionStrategyConfig compactionStrategy) {
+    return compactionStrategy.equals(DEFAULT_STRATEGY);
 
-    if (din.readByte() != 1) {
-      throw new IllegalArgumentException("Unexpected version");
-    }
-
-    String classname = din.readUTF();
-    int numEntries = din.readInt();
-
-    HashMap<String,String> opts = new HashMap<>();
-
-    for (int i = 0; i < numEntries; i++) {
-      String k = din.readUTF();
-      String v = din.readUTF();
-      opts.put(k, v);
-    }
-
-    return new CompactionStrategyConfig(classname).setOptions(opts);
   }
 
-  public static CompactionStrategyConfig decode(byte[] encodedCsc) {
-    ByteArrayInputStream bais = new ByteArrayInputStream(encodedCsc);
-    DataInputStream dis = new DataInputStream(bais);
-
-    try {
-      return decode(dis);
-    } catch (IOException ioe) {
-      throw new RuntimeException(ioe);
-    }
+  public static boolean isDefault(CompactionConfig compactionConfig) {
+    return isDefault(compactionConfig.getCompactionStrategy());
   }
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsImpl.java b/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsImpl.java
index 3f5783b..46dada5 100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsImpl.java
@@ -84,6 +84,8 @@ import org.apache.accumulo.core.client.admin.Locations;
 import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.client.admin.SummaryRetriever;
 import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer;
+import org.apache.accumulo.core.client.admin.compaction.CompactionSelector;
 import org.apache.accumulo.core.client.sample.SamplerConfiguration;
 import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
 import org.apache.accumulo.core.client.summary.Summary;
@@ -111,7 +113,6 @@ import org.apache.accumulo.core.dataImpl.thrift.TSummarizerConfiguration;
 import org.apache.accumulo.core.dataImpl.thrift.TSummaryRequest;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.core.iteratorsImpl.system.SystemIteratorUtil;
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.core.master.thrift.FateOperation;
 import org.apache.accumulo.core.master.thrift.MasterClientService;
@@ -838,7 +839,6 @@ public class TableOperationsImpl extends TableOperationsHelper {
   public void compact(String tableName, CompactionConfig config)
       throws AccumuloSecurityException, TableNotFoundException, AccumuloException {
     checkArgument(tableName != null, "tableName is null");
-    ByteBuffer EMPTY = ByteBuffer.allocate(0);
 
     // Ensure compaction iterators exist on a tabletserver
     final String skviName = SortedKeyValueIterator.class.getName();
@@ -849,14 +849,23 @@ public class TableOperationsImpl extends TableOperationsHelper {
       }
     }
 
-    // Make sure the specified compaction strategy exists on a tabletserver
-    final String compactionStrategyName = config.getCompactionStrategy().getClassName();
-    if (!CompactionStrategyConfigUtil.DEFAULT_STRATEGY.getClassName()
-        .equals(compactionStrategyName)) {
-      if (!testClassLoad(tableName, compactionStrategyName,
-          "org.apache.accumulo.tserver.compaction.CompactionStrategy")) {
+    ensureStrategyCanLoad(tableName, config);
+
+    if (!UserCompactionUtils.isDefault(config.getConfigurer())) {
+      if (!testClassLoad(tableName, config.getConfigurer().getClassName(),
+          CompactionConfigurer.class.getName())) {
+        throw new AccumuloException(
+            "TabletServer could not load " + CompactionConfigurer.class.getSimpleName() + " class "
+                + config.getConfigurer().getClassName());
+      }
+    }
+
+    if (!UserCompactionUtils.isDefault(config.getSelector())) {
+      if (!testClassLoad(tableName, config.getSelector().getClassName(),
+          CompactionSelector.class.getName())) {
         throw new AccumuloException(
-            "TabletServer could not load CompactionStrategy class " + compactionStrategyName);
+            "TabletServer could not load " + CompactionSelector.class.getSimpleName() + " class "
+                + config.getSelector().getClassName());
       }
     }
 
@@ -869,10 +878,7 @@ public class TableOperationsImpl extends TableOperationsHelper {
       _flush(tableId, start, end, true);
 
     List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.canonical().getBytes(UTF_8)),
-        start == null ? EMPTY : TextUtil.getByteBuffer(start),
-        end == null ? EMPTY : TextUtil.getByteBuffer(end),
-        ByteBuffer.wrap(SystemIteratorUtil.encodeIteratorSettings(config.getIterators())),
-        ByteBuffer.wrap(CompactionStrategyConfigUtil.encode(config.getCompactionStrategy())));
+        ByteBuffer.wrap(UserCompactionUtils.encode(config)));
 
     Map<String,String> opts = new HashMap<>();
     try {
@@ -885,6 +891,19 @@ public class TableOperationsImpl extends TableOperationsHelper {
     }
   }
 
+  @SuppressWarnings("removal")
+  private void ensureStrategyCanLoad(String tableName, CompactionConfig config)
+      throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
+    // Make sure the specified compaction strategy exists on a tabletserver
+    if (!CompactionStrategyConfigUtil.isDefault(config.getCompactionStrategy())) {
+      if (!testClassLoad(tableName, config.getCompactionStrategy().getClassName(),
+          "org.apache.accumulo.tserver.compaction.CompactionStrategy")) {
+        throw new AccumuloException("TabletServer could not load CompactionStrategy class "
+            + config.getCompactionStrategy().getClassName());
+      }
+    }
+  }
+
   @Override
   public void cancelCompaction(String tableName)
       throws AccumuloSecurityException, TableNotFoundException, AccumuloException {
diff --git a/core/src/main/java/org/apache/accumulo/core/clientImpl/UserCompactionUtils.java b/core/src/main/java/org/apache/accumulo/core/clientImpl/UserCompactionUtils.java
new file mode 100644
index 0000000..cbb7e31
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/UserCompactionUtils.java
@@ -0,0 +1,291 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.clientImpl;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.admin.CompactionConfig;
+import org.apache.accumulo.core.client.admin.PluginConfig;
+import org.apache.hadoop.io.Text;
+
+import com.google.common.base.Preconditions;
+
+public class UserCompactionUtils {
+
+  private static final int MAGIC = 0x02040810;
+  private static final int SELECTOR_MAGIC = 0xae9270bf;
+  private static final int CONFIGURER_MAGIC = 0xf93e570a;
+
+  public static final PluginConfig DEFAULT_CONFIGURER = new PluginConfig("", Map.of());
+  public static final PluginConfig DEFAULT_SELECTOR = new PluginConfig("", Map.of());
+
+  public static void encode(DataOutput dout, Map<String,String> options) {
+    try {
+      dout.writeInt(options.size());
+
+      for (Entry<String,String> entry : options.entrySet()) {
+        dout.writeUTF(entry.getKey());
+        dout.writeUTF(entry.getValue());
+      }
+    } catch (IOException e) {
+      throw new UncheckedIOException(e);
+    }
+  }
+
+  public static void encode(DataOutput dout, int magic, int version, String className,
+      Map<String,String> options) {
+
+    try {
+      dout.writeInt(magic);
+      dout.writeByte(version);
+
+      dout.writeUTF(className);
+      encode(dout, options);
+
+    } catch (IOException e) {
+      throw new UncheckedIOException(e);
+    }
+  }
+
+  public static interface Encoder<T> {
+    public void encode(DataOutput dout, T p);
+  }
+
+  public static <T> byte[] encode(T csc, Encoder<T> encoder) {
+
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    DataOutputStream dos = new DataOutputStream(baos);
+
+    try {
+      encoder.encode(dos, csc);
+      dos.close();
+      return baos.toByteArray();
+    } catch (IOException ioe) {
+      throw new UncheckedIOException(ioe);
+    }
+  }
+
+  public static class PluginConfigData {
+    String className;
+    Map<String,String> opts;
+  }
+
+  public static Map<String,String> decodeMap(DataInput din) {
+    try {
+      int numEntries = din.readInt();
+
+      var opts = new HashMap<String,String>();
+
+      for (int i = 0; i < numEntries; i++) {
+        String k = din.readUTF();
+        String v = din.readUTF();
+        opts.put(k, v);
+      }
+
+      return opts;
+    } catch (IOException ioe) {
+      throw new UncheckedIOException(ioe);
+    }
+  }
+
+  public static PluginConfigData decode(DataInput din, int magic, int version) {
+
+    try {
+      if (din.readInt() != magic) {
+        throw new IllegalArgumentException("Unexpected MAGIC ");
+      }
+
+      if (din.readByte() != version) {
+        throw new IllegalArgumentException("Unexpected version");
+      }
+
+      var pcd = new PluginConfigData();
+
+      pcd.className = din.readUTF();
+      int numEntries = din.readInt();
+
+      pcd.opts = new HashMap<>();
+
+      for (int i = 0; i < numEntries; i++) {
+        String k = din.readUTF();
+        String v = din.readUTF();
+        pcd.opts.put(k, v);
+      }
+
+      return pcd;
+    } catch (IOException e) {
+      throw new UncheckedIOException(e);
+    }
+  }
+
+  public static interface Decoder<T> {
+    T decode(DataInput di);
+  }
+
+  public static <T> T decode(byte[] encodedCsc, Decoder<T> decoder) {
+    ByteArrayInputStream bais = new ByteArrayInputStream(encodedCsc);
+    DataInputStream dis = new DataInputStream(bais);
+    return decoder.decode(dis);
+  }
+
+  public static void encodeSelector(DataOutput dout, PluginConfig csc) {
+    encode(dout, SELECTOR_MAGIC, 1, csc.getClassName(), csc.getOptions());
+  }
+
+  public static byte[] encodeSelector(PluginConfig csc) {
+    return encode(csc, UserCompactionUtils::encodeSelector);
+  }
+
+  public static PluginConfig decodeSelector(DataInput di) {
+    var pcd = decode(di, SELECTOR_MAGIC, 1);
+    return new PluginConfig(pcd.className, pcd.opts);
+  }
+
+  public static PluginConfig decodeSelector(byte[] bytes) {
+    return decode(bytes, UserCompactionUtils::decodeSelector);
+  }
+
+  public static void encodeConfigurer(DataOutput dout, PluginConfig ccc) {
+    encode(dout, CONFIGURER_MAGIC, 1, ccc.getClassName(), ccc.getOptions());
+  }
+
+  public static byte[] encodeConfigurer(PluginConfig ccc) {
+    return encode(ccc, UserCompactionUtils::encodeConfigurer);
+  }
+
+  public static PluginConfig decodeConfigurer(DataInput di) {
+    var pcd = decode(di, CONFIGURER_MAGIC, 1);
+    return new PluginConfig(pcd.className, pcd.opts);
+  }
+
+  public static PluginConfig decodeConfigurer(byte[] bytes) {
+    return decode(bytes, UserCompactionUtils::decodeConfigurer);
+  }
+
+  public static byte[] encode(Map<String,String> options) {
+    return encode(options, UserCompactionUtils::encode);
+  }
+
+  public static Map<String,String> decodeMap(byte[] bytes) {
+    return decode(bytes, UserCompactionUtils::decodeMap);
+  }
+
+  public static void encode(DataOutput dout, CompactionConfig cc) {
+    try {
+      dout.writeInt(MAGIC);
+
+      dout.writeBoolean(cc.getStartRow() != null);
+      if (cc.getStartRow() != null) {
+        cc.getStartRow().write(dout);
+      }
+
+      dout.writeBoolean(cc.getEndRow() != null);
+      if (cc.getEndRow() != null) {
+        cc.getEndRow().write(dout);
+      }
+
+      dout.writeInt(cc.getIterators().size());
+      for (IteratorSetting is : cc.getIterators()) {
+        is.write(dout);
+      }
+
+      CompactionStrategyConfigUtil.encode(dout, cc);
+
+      encodeConfigurer(dout, cc.getConfigurer());
+      encodeSelector(dout, cc.getSelector());
+      encode(dout, cc.getExecutionHints());
+
+    } catch (IOException ioe) {
+      throw new UncheckedIOException(ioe);
+    }
+
+  }
+
+  public static byte[] encode(CompactionConfig cc) {
+    return encode(cc, UserCompactionUtils::encode);
+  }
+
+  public static CompactionConfig decodeCompactionConfig(DataInput din) {
+    try {
+      Preconditions.checkArgument(MAGIC == din.readInt());
+
+      CompactionConfig cc = new CompactionConfig();
+
+      if (din.readBoolean()) {
+        Text startRow = new Text();
+        startRow.readFields(din);
+        cc.setStartRow(startRow);
+      }
+
+      if (din.readBoolean()) {
+        Text endRow = new Text();
+        endRow.readFields(din);
+        cc.setEndRow(endRow);
+      }
+
+      int num = din.readInt();
+      var iterators = new ArrayList<IteratorSetting>(num);
+
+      for (int i = 0; i < num; i++) {
+        iterators.add(new IteratorSetting(din));
+      }
+
+      cc.setIterators(iterators);
+
+      CompactionStrategyConfigUtil.decode(cc, din);
+
+      var configurer = decodeConfigurer(din);
+      if (!isDefault(configurer)) {
+        cc.setConfigurer(configurer);
+      }
+
+      var selector = decodeSelector(din);
+      if (!isDefault(selector)) {
+        cc.setSelector(selector);
+      }
+
+      var hints = decodeMap(din);
+      cc.setExecutionHints(hints);
+
+      return cc;
+    } catch (IOException ioe) {
+      throw new UncheckedIOException(ioe);
+    }
+  }
+
+  public static boolean isDefault(PluginConfig configurer) {
+    return configurer.equals(DEFAULT_CONFIGURER);
+  }
+
+  public static CompactionConfig decodeCompactionConfig(byte[] bytes) {
+    return decode(bytes, UserCompactionUtils::decodeCompactionConfig);
+  }
+}
diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java b/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java
index 3da8830..08952d1 100644
--- a/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java
+++ b/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java
@@ -22,27 +22,32 @@ import java.util.Map;
 
 public enum CompactionSettings {
 
-  SF_NO_SUMMARY(new NullType()),
-  SF_EXTRA_SUMMARY(new NullType()),
-  SF_NO_SAMPLE(new NullType()),
-  SF_GT_ESIZE_OPT(new SizeType()),
-  SF_LT_ESIZE_OPT(new SizeType()),
-  SF_NAME_RE_OPT(new PatternType()),
-  SF_PATH_RE_OPT(new PatternType()),
-  MIN_FILES_OPT(new UIntType()),
-  OUTPUT_COMPRESSION_OPT(new StringType()),
-  OUTPUT_BLOCK_SIZE_OPT(new SizeType()),
-  OUTPUT_HDFS_BLOCK_SIZE_OPT(new SizeType()),
-  OUTPUT_INDEX_BLOCK_SIZE_OPT(new SizeType()),
-  OUTPUT_REPLICATION_OPT(new UIntType());
+  SF_NO_SUMMARY(new NullType(), true),
+  SF_EXTRA_SUMMARY(new NullType(), true),
+  SF_NO_SAMPLE(new NullType(), true),
+  SF_GT_ESIZE_OPT(new SizeType(), true),
+  SF_LT_ESIZE_OPT(new SizeType(), true),
+  SF_NAME_RE_OPT(new PatternType(), true),
+  SF_PATH_RE_OPT(new PatternType(), true),
+  MIN_FILES_OPT(new UIntType(), true),
+  OUTPUT_COMPRESSION_OPT(new StringType(), false),
+  OUTPUT_BLOCK_SIZE_OPT(new SizeType(), false),
+  OUTPUT_HDFS_BLOCK_SIZE_OPT(new SizeType(), false),
+  OUTPUT_INDEX_BLOCK_SIZE_OPT(new SizeType(), false),
+  OUTPUT_REPLICATION_OPT(new UIntType(), false);
 
   private Type type;
+  private boolean selectorOpt;
 
-  private CompactionSettings(Type type) {
+  private CompactionSettings(Type type, boolean selectorOpt) {
     this.type = type;
+    this.selectorOpt = selectorOpt;
   }
 
-  public void put(Map<String,String> options, String val) {
-    options.put(name(), type.convert(val));
+  public void put(Map<String,String> selectorOpts, Map<String,String> configurerOpts, String val) {
+    if (selectorOpt)
+      selectorOpts.put(name(), type.convert(val));
+    else
+      configurerOpts.put(name(), type.convert(val));
   }
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java b/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java
index 785e44a..890f817 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java
@@ -43,6 +43,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableMap;
 
 /**
  * A configuration object.
@@ -198,6 +199,16 @@ public abstract class AccumuloConfiguration implements Iterable<Entry<String,Str
     return prefixProps.props;
   }
 
+  public Map<String,String> getAllPropertiesWithPrefixStripped(Property prefix) {
+    var builder = ImmutableMap.<String,String>builder();
+    getAllPropertiesWithPrefix(prefix).forEach((k, v) -> {
+      String optKey = k.substring(prefix.getKey().length());
+      builder.put(optKey, v);
+    });
+
+    return builder.build();
+  }
+
   /**
    * Gets a property of type {@link PropertyType#BYTES} or {@link PropertyType#MEMORY}, interpreting
    * the value properly.
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index 494541c..c93402e 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -32,6 +32,8 @@ import org.apache.accumulo.core.file.rfile.RFile;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.iteratorsImpl.system.DeletingIterator;
 import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.spi.compaction.DefaultCompactionPlanner;
+import org.apache.accumulo.core.spi.compaction.SimpleCompactionDispatcher;
 import org.apache.accumulo.core.spi.scan.ScanDispatcher;
 import org.apache.accumulo.core.spi.scan.ScanPrioritizer;
 import org.apache.accumulo.core.spi.scan.SimpleScanDispatcher;
@@ -359,10 +361,6 @@ public enum Property {
   TSERV_WALOG_TOLERATED_MAXIMUM_WAIT_DURATION("tserver.walog.maximum.wait.duration", "5m",
       PropertyType.TIMEDURATION,
       "The maximum amount of time to wait after a failure to create or write a write-ahead log."),
-  TSERV_MAJC_DELAY("tserver.compaction.major.delay", "30s", PropertyType.TIMEDURATION,
-      "Time a tablet server will sleep between checking which tablets need compaction."),
-  TSERV_MAJC_THREAD_MAXOPEN("tserver.compaction.major.thread.files.open.max", "10",
-      PropertyType.COUNT, "Max number of RFiles a major compaction thread can open at once. "),
   TSERV_SCAN_MAX_OPENFILES("tserver.scan.files.open.max", "100", PropertyType.COUNT,
       "Maximum total RFiles that all tablets in a tablet server can open for scans. "),
   TSERV_MAX_IDLE("tserver.files.open.idle", "1m", PropertyType.TIMEDURATION,
@@ -410,6 +408,53 @@ public enum Property {
       "The number of threads for the metadata table scan executor."),
   TSERV_MIGRATE_MAXCONCURRENT("tserver.migrations.concurrent.max", "1", PropertyType.COUNT,
       "The maximum number of concurrent tablet migrations for a tablet server"),
+  TSERV_MAJC_DELAY("tserver.compaction.major.delay", "30s", PropertyType.TIMEDURATION,
+      "Time a tablet server will sleep between checking which tablets need compaction."),
+  TSERV_COMPACTION_SERVICE_PREFIX("tserver.compaction.major.service.", null, PropertyType.PREFIX,
+      "Prefix for compaction services."),
+  TSERV_COMPACTION_SERVICE_ROOT_PLANNER("tserver.compaction.major.service.root.planner",
+      DefaultCompactionPlanner.class.getName(), PropertyType.CLASSNAME,
+      "Compaction planner for root tablet service"),
+  TSERV_COMPACTION_SERVICE_ROOT_MAX_OPEN(
+      "tserver.compaction.major.service.root.planner.opts.maxOpen", "30", PropertyType.COUNT,
+      "The maximum number of files a compaction will open"),
+  TSERV_COMPACTION_SERVICE_ROOT_EXECUTORS(
+      "tserver.compaction.major.service.root.planner.opts.executors",
+      "[{'name':'small','maxSize':'32M','numThreads':1},"
+          + "{'name':'huge','numThreads':1}]".replaceAll("'", "\""),
+      PropertyType.STRING,
+      "See {% jlink -f org.apache.accumulo.core.spi.compaction.DefaultCompactionPlanner %} "),
+  TSERV_COMPACTION_SERVICE_META_PLANNER("tserver.compaction.major.service.meta.planner",
+      DefaultCompactionPlanner.class.getName(), PropertyType.CLASSNAME,
+      "Compaction planner for metadata table"),
+  TSERV_COMPACTION_SERVICE_META_MAX_OPEN(
+      "tserver.compaction.major.service.meta.planner.opts.maxOpen", "30", PropertyType.COUNT,
+      "The maximum number of files a compaction will open"),
+  TSERV_COMPACTION_SERVICE_META_EXECUTORS(
+      "tserver.compaction.major.service.meta.planner.opts.executors",
+      "[{'name':'small','maxSize':'32M','numThreads':2},"
+          + "{'name':'huge','numThreads':2}]".replaceAll("'", "\""),
+      PropertyType.STRING,
+      "See {% jlink -f org.apache.accumulo.core.spi.compaction.DefaultCompactionPlanner %} "),
+  TSERV_COMPACTION_SERVICE_DEFAULT_PLANNER("tserver.compaction.major.service.default.planner",
+      DefaultCompactionPlanner.class.getName(), PropertyType.CLASSNAME,
+      "Planner for default compaction service."),
+  TSERV_COMPACTION_SERVICE_DEFAULT_MAX_OPEN(
+      "tserver.compaction.major.service.default.planner.opts.maxOpen", "10", PropertyType.COUNT,
+      "The maximum number of files a compaction will open"),
+  TSERV_COMPACTION_SERVICE_DEFAULT_EXECUTORS(
+      "tserver.compaction.major.service.default.planner.opts.executors",
+      "[{'name':'small','maxSize':'32M','numThreads':2},"
+          + "{'name':'medium','maxSize':'128M','numThreads':2},"
+          + "{'name':'large','numThreads':2}]".replaceAll("'", "\""),
+      PropertyType.STRING,
+      "See {% jlink -f org.apache.accumulo.core.spi.compaction.DefaultCompactionPlanner %} "),
+  @Deprecated(since = "2.1.0", forRemoval = true)
+  @ReplacedBy(property = Property.TSERV_COMPACTION_SERVICE_DEFAULT_MAX_OPEN)
+  TSERV_MAJC_THREAD_MAXOPEN("tserver.compaction.major.thread.files.open.max", "10",
+      PropertyType.COUNT, "Max number of RFiles a major compaction thread can open at once. "),
+  @Deprecated(since = "2.1.0", forRemoval = true)
+  @ReplacedBy(property = Property.TSERV_COMPACTION_SERVICE_DEFAULT_EXECUTORS)
   TSERV_MAJC_MAXCONCURRENT("tserver.compaction.major.concurrent.max", "3", PropertyType.COUNT,
       "The maximum number of concurrent major compactions for a tablet server"),
   TSERV_MAJC_THROUGHPUT("tserver.compaction.major.throughput", "0B", PropertyType.BYTES,
@@ -624,9 +669,8 @@ public enum Property {
       "Prefix to be used for user defined arbitrary properties."),
   TABLE_MAJC_RATIO("table.compaction.major.ratio", "3", PropertyType.FRACTION,
       "Minimum ratio of total input size to maximum input RFile size for"
-          + " running a major compaction. When adjusting this property you may want to"
-          + " also adjust table.file.max. Want to avoid the situation where only"
-          + " merging minor compactions occur."),
+          + " running a major compaction. "),
+  @Deprecated(since = "2.1.0", forRemoval = true)
   TABLE_MAJC_COMPACTALL_IDLETIME("table.compaction.major.everything.idle", "1h",
       PropertyType.TIMEDURATION,
       "After a tablet has been idle (no mutations) for this time period it may"
@@ -646,10 +690,30 @@ public enum Property {
       "After a tablet has been idle (no mutations) for this time period it may have its "
           + "in-memory map flushed to disk in a minor compaction. There is no guarantee an idle "
           + "tablet will be compacted."),
-  TABLE_MINC_MAX_MERGE_FILE_SIZE("table.compaction.minor.merge.file.size.max", "0",
-      PropertyType.BYTES,
-      "The max RFile size used for a merging minor compaction. The default"
-          + " value of 0 disables a max file size."),
+  TABLE_COMPACTION_DISPATCHER("table.compaction.dispatcher",
+      SimpleCompactionDispatcher.class.getName(), PropertyType.CLASSNAME,
+      "A configurable dispatcher that decides what comaction service a table should use."),
+  TABLE_COMPACTION_DISPATCHER_OPTS("table.compaction.dispatcher.opts.", null, PropertyType.PREFIX,
+      "Options for the table compaction dispatcher"),
+  TABLE_COMPACTION_SELECTOR("table.compaction.selector", "", PropertyType.CLASSNAME,
+      "A configurable selector for a table that can periodically select file for mandatory "
+          + "compaction, even if the files do not meet the compaction ratio."),
+  TABLE_COMPACTION_SELECTOR_OPTS("table.compaction.selector.opts.", null, PropertyType.PREFIX,
+      "Options for the table compaction dispatcher"),
+  TABLE_COMPACTION_CONFIGURER("table.compaction.configurer", "", PropertyType.CLASSNAME,
+      "A plugin that can dynamically configure compaction output files based on input files."),
+  TABLE_COMPACTION_CONFIGURER_OPTS("table.compaction.configurer.opts.", null, PropertyType.PREFIX,
+      "Options for the table compaction configuror"),
+  @Deprecated(since = "2.1.0", forRemoval = true)
+  @ReplacedBy(property = TABLE_COMPACTION_SELECTOR)
+  TABLE_COMPACTION_STRATEGY("table.majc.compaction.strategy",
+      "org.apache.accumulo.tserver.compaction.DefaultCompactionStrategy", PropertyType.CLASSNAME,
+      "Deprecated since 2.1.0 See {% jlink -f org.apache.accumulo.core.spi.compaction}"),
+  @Deprecated(since = "2.1.0", forRemoval = true)
+  @ReplacedBy(property = TABLE_COMPACTION_SELECTOR_OPTS)
+  TABLE_COMPACTION_STRATEGY_PREFIX("table.majc.compaction.strategy.opts.", null,
+      PropertyType.PREFIX,
+      "Properties in this category are used to configure the compaction strategy."),
   TABLE_SCAN_DISPATCHER("table.scan.dispatcher", SimpleScanDispatcher.class.getName(),
       PropertyType.CLASSNAME,
       "This class is used to dynamically dispatch scans to configured scan executors.  Configured "
@@ -789,12 +853,6 @@ public enum Property {
       PropertyType.STRING, "The ScanInterpreter class to apply on scan arguments in the shell"),
   TABLE_CLASSPATH("table.classpath.context", "", PropertyType.STRING,
       "Per table classpath context"),
-  TABLE_COMPACTION_STRATEGY("table.majc.compaction.strategy",
-      "org.apache.accumulo.tserver.compaction.DefaultCompactionStrategy", PropertyType.CLASSNAME,
-      "A customizable major compaction strategy."),
-  TABLE_COMPACTION_STRATEGY_PREFIX("table.majc.compaction.strategy.opts.", null,
-      PropertyType.PREFIX,
-      "Properties in this category are used to configure the compaction strategy."),
   TABLE_REPLICATION("table.replication", "false", PropertyType.BOOLEAN,
       "Is replication enabled for the given table"),
   TABLE_REPLICATION_TARGET("table.replication.target.", null, PropertyType.PREFIX,
@@ -1203,7 +1261,10 @@ public enum Property {
             || key.startsWith(Property.TABLE_ARBITRARY_PROP_PREFIX.getKey())
             || key.startsWith(TABLE_SAMPLER_OPTS.getKey())
             || key.startsWith(TABLE_SUMMARIZER_PREFIX.getKey())
-            || key.startsWith(TABLE_SCAN_DISPATCHER_OPTS.getKey())));
+            || key.startsWith(TABLE_SCAN_DISPATCHER_OPTS.getKey())
+            || key.startsWith(TABLE_COMPACTION_DISPATCHER_OPTS.getKey())
+            || key.startsWith(TABLE_COMPACTION_CONFIGURER_OPTS.getKey())
+            || key.startsWith(TABLE_COMPACTION_SELECTOR_OPTS.getKey())));
   }
 
   private static final EnumSet<Property> fixedProperties =
diff --git a/core/src/main/java/org/apache/accumulo/core/logging/TabletLogger.java b/core/src/main/java/org/apache/accumulo/core/logging/TabletLogger.java
index ae5fa25..f67a256 100644
--- a/core/src/main/java/org/apache/accumulo/core/logging/TabletLogger.java
+++ b/core/src/main/java/org/apache/accumulo/core/logging/TabletLogger.java
@@ -24,14 +24,21 @@ import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.accumulo.core.client.admin.CompactionConfig;
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
 import org.apache.accumulo.core.dataImpl.KeyExtent;
 import org.apache.accumulo.core.metadata.TabletFile;
 import org.apache.accumulo.core.metadata.schema.Ample;
+import org.apache.accumulo.core.spi.compaction.CompactionJob;
+import org.apache.accumulo.core.spi.compaction.CompactionKind;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
 import org.apache.accumulo.core.util.HostAndPort;
+import org.apache.commons.io.FileUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.collect.Collections2;
+
 /**
  * This class contains source for logs messages about a tablets internal state, like its location,
  * set of files, metadata.
@@ -99,16 +106,47 @@ public class TabletLogger {
     }
   }
 
-  public static void compacted(KeyExtent extent, Collection<? extends TabletFile> inputs,
-      TabletFile output) {
-    fileLog.debug("Compacted {} created {} from {}", extent, output, inputs);
+  private static String getSize(Collection<CompactableFile> files) {
+    long sum = files.stream().mapToLong(CompactableFile::getEstimatedSize).sum();
+    return FileUtils.byteCountToDisplaySize(sum);
+  }
+
+  /**
+   * Lazily converts TableFile to file names. The lazy part is really important because when it is
+   * not called with log.isDebugEnabled().
+   *
+   * @return
+   */
+  private static Collection<String> asFileNames(Collection<CompactableFile> files) {
+    return Collections2.transform(files, CompactableFile::getFileName);
+  }
+
+  public static void selected(KeyExtent extent, CompactionKind kind,
+      Collection<? extends TabletFile> inputs) {
+    fileLog.trace("{} changed compaction selection set for {} new set {}", extent, kind,
+        Collections2.transform(inputs, TabletFile::getFileName));
+  }
+
+  public static void compacting(KeyExtent extent, CompactionJob job, CompactionConfig config) {
+    if (fileLog.isDebugEnabled()) {
+      if (config == null) {
+        fileLog.debug("Compacting {} on {} for {} from {} size {}", extent, job.getExecutor(),
+            job.getKind(), asFileNames(job.getFiles()), getSize(job.getFiles()));
+      } else {
+        fileLog.debug("Compacting {} on {} for {} from {} size {} config {}", extent,
+            job.getExecutor(), job.getKind(), asFileNames(job.getFiles()), getSize(job.getFiles()),
+            config);
+      }
+    }
+  }
+
+  public static void compacted(KeyExtent extent, CompactionJob job, TabletFile output) {
+    fileLog.debug("Compacted {} for {} created {} from {}", extent, job.getKind(), output,
+        asFileNames(job.getFiles()));
   }
 
-  public static void flushed(KeyExtent extent, TabletFile absMergeFile, TabletFile newDatafile) {
-    if (absMergeFile == null)
-      fileLog.debug("Flushed {} created {} from [memory]", extent, newDatafile);
-    else
-      fileLog.debug("Flushed {} created {} from [memory,{}]", extent, newDatafile, absMergeFile);
+  public static void flushed(KeyExtent extent, TabletFile newDatafile) {
+    fileLog.debug("Flushed {} created {} from [memory]", extent, newDatafile);
   }
 
   public static void bulkImported(KeyExtent extent, TabletFile file) {
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/CompactableFileImpl.java b/core/src/main/java/org/apache/accumulo/core/metadata/CompactableFileImpl.java
new file mode 100644
index 0000000..a81d2c6
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/CompactableFileImpl.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import java.net.URI;
+import java.util.Objects;
+
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+
+public class CompactableFileImpl implements CompactableFile {
+
+  private final StoredTabletFile storedTabletFile;
+  private final DataFileValue dataFileValue;
+
+  public CompactableFileImpl(URI uri, long size, long entries) {
+    this.storedTabletFile = new StoredTabletFile(uri.toString());
+    this.dataFileValue = new DataFileValue(size, entries);
+  }
+
+  public CompactableFileImpl(StoredTabletFile storedTabletFile, DataFileValue dataFileValue) {
+    this.storedTabletFile = Objects.requireNonNull(storedTabletFile);
+    this.dataFileValue = Objects.requireNonNull(dataFileValue);
+  }
+
+  @Override
+  public URI getUri() {
+    return storedTabletFile.getPath().toUri();
+  }
+
+  @Override
+  public String getFileName() {
+    return storedTabletFile.getFileName();
+  }
+
+  @Override
+  public long getEstimatedSize() {
+    return dataFileValue.getSize();
+  }
+
+  @Override
+  public long getEstimatedEntries() {
+    return dataFileValue.getNumEntries();
+  }
+
+  public StoredTabletFile getStortedTabletFile() {
+    return storedTabletFile;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (o instanceof CompactableFileImpl) {
+      var ocfi = (CompactableFileImpl) o;
+
+      return storedTabletFile.equals(ocfi.storedTabletFile);
+    }
+
+    return false;
+  }
+
+  @Override
+  public int hashCode() {
+    return storedTabletFile.hashCode();
+  }
+
+  public static StoredTabletFile toStoredTabletFile(CompactableFile cf) {
+    if (cf instanceof CompactableFileImpl) {
+      return ((CompactableFileImpl) cf).storedTabletFile;
+    } else {
+      throw new IllegalArgumentException("Can not convert " + cf.getClass());
+    }
+  }
+
+  @Override
+  public String toString() {
+    return "[" + storedTabletFile.getFileName() + ", " + dataFileValue + "]";
+  }
+}
diff --git a/core/src/main/java/org/apache/accumulo/core/singletons/SingletonManager.java b/core/src/main/java/org/apache/accumulo/core/singletons/SingletonManager.java
index 1b4fe1b..bb929d4 100644
--- a/core/src/main/java/org/apache/accumulo/core/singletons/SingletonManager.java
+++ b/core/src/main/java/org/apache/accumulo/core/singletons/SingletonManager.java
@@ -202,5 +202,4 @@ public class SingletonManager {
       enabled = true;
     }
   }
-
 }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionDirectives.java
similarity index 57%
copy from server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java
copy to core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionDirectives.java
index 4a1b5ba..e3370eb 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionDirectives.java
@@ -16,24 +16,33 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.accumulo.tserver.compaction;
+package org.apache.accumulo.core.spi.compaction;
 
 /**
- * The default compaction strategy for user initiated compactions. This strategy will always select
- * all files.
+ * The directions of a {@link CompactionDispatcher}
+ *
+ * @since 2.1.0
+ * @see org.apache.accumulo.core.spi.compaction
  */
+public interface CompactionDirectives {
+
+  /**
+   * @return The service where a compaction should run.
+   */
+  CompactionServiceId getService();
+
+  /**
+   * @since 2.1.0
+   */
+  public static interface Builder {
+    Builder setService(CompactionServiceId service);
 
-public class EverythingCompactionStrategy extends CompactionStrategy {
+    Builder setService(String compactionServiceId);
 
-  @Override
-  public boolean shouldCompact(MajorCompactionRequest request) {
-    return true; // ACCUMULO-3645 compact for empty files too
+    CompactionDirectives build();
   }
 
-  @Override
-  public CompactionPlan getCompactionPlan(MajorCompactionRequest request) {
-    CompactionPlan plan = new CompactionPlan();
-    plan.inputFiles.addAll(request.getFiles().keySet());
-    return plan;
+  public static Builder builder() {
+    return CompactionsDirectiveImpl.DEFAULT_BUILDER;
   }
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionDispatcher.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionDispatcher.java
new file mode 100644
index 0000000..7d6960c
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionDispatcher.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.spi.compaction;
+
+import java.util.Map;
+
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.spi.common.ServiceEnvironment;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Can be configured per table to dispatch compactions to different compaction services. For a given
+ * table the dispatcher can choose a different compaction service for each kind of compaction. For
+ * example, user and chop compactions could be dispatched to service_A while system compactions are
+ * dispatched to service_B.
+ *
+ * @since 2.1.0
+ * @see org.apache.accumulo.core.spi.compaction
+ */
+public interface CompactionDispatcher {
+  /**
+   * The method parameters for {@link CompactionDispatcher#init(InitParameters)}. This interface
+   * exists so the API can evolve and additional parameters can be passed to the method in the
+   * future.
+   *
+   * @since 2.1.0
+   */
+  public interface InitParameters {
+    /**
+     *
+     * @return The configured options. For example if the table properties
+     *         {@code table.compaction.dispatcher.opts.p1=abc} and
+     *         {@code table.compaction.dispatcher.opts.p9=123} were set, then this map would contain
+     *         {@code p1=abc} and {@code p9=123}.
+     */
+    Map<String,String> getOptions();
+
+    TableId getTableId();
+
+    ServiceEnvironment getServiceEnv();
+  }
+
+  /**
+   * This method is called once after a CompactionDispatcher is instantiated.
+   */
+  default void init(InitParameters params) {
+    Preconditions.checkArgument(params.getOptions().isEmpty(), "No options expected");
+  }
+
+  /**
+   * The method parameters for {@link CompactionDispatcher#dispatch(DispatchParameters)}. This
+   * interface exists so the API can evolve and additional parameters can be passed to the method in
+   * the future.
+   *
+   * @since 2.1.0
+   */
+  public interface DispatchParameters {
+    /**
+     * @return the currently configured compaction services
+     */
+    CompactionServices getCompactionServices();
+
+    ServiceEnvironment getServiceEnv();
+
+    CompactionKind getCompactionKind();
+
+    Map<String,String> getExecutionHints();
+  }
+
+  /**
+   * Accumulo calls this method for compactions to determine what service to use.
+   */
+  CompactionDirectives dispatch(DispatchParameters params);
+}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionExecutorId.java
similarity index 57%
copy from server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java
copy to core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionExecutorId.java
index 4a1b5ba..91e8bb0 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionExecutorId.java
@@ -16,24 +16,25 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.accumulo.tserver.compaction;
+package org.apache.accumulo.core.spi.compaction;
+
+import org.apache.accumulo.core.data.AbstractId;
 
 /**
- * The default compaction strategy for user initiated compactions. This strategy will always select
- * all files.
+ * A unique identifier for a a compaction executor that a {@link CompactionPlanner} can schedule
+ * compactions on using a {@link CompactionJob}.
+ *
+ * @since 2.1.0
+ * @see org.apache.accumulo.core.spi.compaction
  */
+public class CompactionExecutorId extends AbstractId<CompactionExecutorId> {
+  private static final long serialVersionUID = 1L;
 
-public class EverythingCompactionStrategy extends CompactionStrategy {
-
-  @Override
-  public boolean shouldCompact(MajorCompactionRequest request) {
-    return true; // ACCUMULO-3645 compact for empty files too
+  private CompactionExecutorId(String canonical) {
+    super(canonical);
   }
 
-  @Override
-  public CompactionPlan getCompactionPlan(MajorCompactionRequest request) {
-    CompactionPlan plan = new CompactionPlan();
-    plan.inputFiles.addAll(request.getFiles().keySet());
-    return plan;
+  public static CompactionExecutorId of(String canonical) {
+    return new CompactionExecutorId(canonical);
   }
 }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionJob.java
similarity index 57%
copy from server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java
copy to core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionJob.java
index 4a1b5ba..d651a70 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionJob.java
@@ -16,24 +16,35 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.accumulo.tserver.compaction;
+package org.apache.accumulo.core.spi.compaction;
+
+import java.util.Set;
+
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
 
 /**
- * The default compaction strategy for user initiated compactions. This strategy will always select
- * all files.
+ * An immutable object that describes what files to compact and where to compact them.
+ *
+ * @since 2.1.0
+ * @see org.apache.accumulo.core.spi.compaction
  */
+public interface CompactionJob {
+
+  long getPriority();
+
+  /**
+   * @return The executor to run the job.
+   */
+  CompactionExecutorId getExecutor();
 
-public class EverythingCompactionStrategy extends CompactionStrategy {
+  /**
+   * @return The files to compact
+   */
+  Set<CompactableFile> getFiles();
 
-  @Override
-  public boolean shouldCompact(MajorCompactionRequest request) {
-    return true; // ACCUMULO-3645 compact for empty files too
-  }
+  /**
+   * @return The kind of compaction this is.
+   */
+  CompactionKind getKind();
 
-  @Override
-  public CompactionPlan getCompactionPlan(MajorCompactionRequest request) {
-    CompactionPlan plan = new CompactionPlan();
-    plan.inputFiles.addAll(request.getFiles().keySet());
-    return plan;
-  }
 }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionKind.java
similarity index 58%
copy from server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java
copy to core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionKind.java
index 4a1b5ba..3b79051 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionKind.java
@@ -16,24 +16,30 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.accumulo.tserver.compaction;
+package org.apache.accumulo.core.spi.compaction;
+
+import org.apache.accumulo.core.client.admin.compaction.CompactionSelector;
 
 /**
- * The default compaction strategy for user initiated compactions. This strategy will always select
- * all files.
+ * @since 2.1.0
+ * @see org.apache.accumulo.core.spi.compaction
  */
-
-public class EverythingCompactionStrategy extends CompactionStrategy {
-
-  @Override
-  public boolean shouldCompact(MajorCompactionRequest request) {
-    return true; // ACCUMULO-3645 compact for empty files too
-  }
-
-  @Override
-  public CompactionPlan getCompactionPlan(MajorCompactionRequest request) {
-    CompactionPlan plan = new CompactionPlan();
-    plan.inputFiles.addAll(request.getFiles().keySet());
-    return plan;
-  }
+public enum CompactionKind {
+  /**
+   * A system initiated routine compaction.
+   */
+  SYSTEM,
+  /**
+   * Set of files selected by a {@link CompactionSelector} or CompactionStrategy configured for a
+   * table.
+   */
+  SELECTOR,
+  /**
+   * A user initiated a one time compaction using an Accumulo client.
+   */
+  USER,
+  /**
+   * A compaction executed prior to merging tablets.
+   */
+  CHOP
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionPlan.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionPlan.java
new file mode 100644
index 0000000..17ff653
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionPlan.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.spi.compaction;
+
+import java.util.Collection;
+
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.spi.compaction.CompactionPlanner.PlanningParameters;
+
+/**
+ * The return value of {@link CompactionPlanner#makePlan(PlanningParameters)} that is created using
+ * {@link PlanningParameters#createPlanBuilder()}
+ *
+ * @since 2.1.0
+ * @see CompactionPlanner
+ * @see org.apache.accumulo.core.spi.compaction
+ */
+public interface CompactionPlan {
+
+  /**
+   * @since 2.1.0
+   * @see PlanningParameters#createPlanBuilder()
+   */
+  interface Builder {
+    /**
+     * @param priority
+     *          This determines the order in which the job is taken off the execution queue. Larger
+     *          numbers are taken off the queue first. If two jobs are on the queue, one with a
+     *          priority of 4 and another with 5, then the one with 5 will be taken first.
+     * @param executor
+     *          Where the job should run.
+     * @param group
+     *          The files to compact.
+     * @return this
+     */
+    Builder addJob(long priority, CompactionExecutorId executor, Collection<CompactableFile> group);
+
+    CompactionPlan build();
+  }
+
+  Collection<CompactionJob> getJobs();
+}
diff --git a/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionPlanner.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionPlanner.java
new file mode 100644
index 0000000..523c65a
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionPlanner.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.spi.compaction;
+
+import java.util.Collection;
+import java.util.Map;
+
+import org.apache.accumulo.core.client.admin.CompactionConfig;
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.spi.common.ServiceEnvironment;
+
+/**
+ * Plans compaction work for a compaction service.
+ *
+ * @since 2.1.0
+ * @see org.apache.accumulo.core.spi.compaction
+ */
+public interface CompactionPlanner {
+
+  /*
+   * This interface exists so the API can evolve and additional parameters can be passed to the
+   * method in the future.
+   *
+   * @since 2.1.0
+   */
+  public interface InitParameters {
+    ServiceEnvironment getServiceEnvironment();
+
+    /**
+     * @return The configured options. For example if the system properties
+     *         {@code tserver.compaction.major.service.s1.planner.opts.p1=abc} and
+     *         {@code tserver.compaction.major.service.s1.planner.opts.p9=123} were set, then this
+     *         map would contain {@code p1=abc} and {@code p9=123}. In this example {@code s1} is
+     *         the identifier for the compaction service. Each compaction service has a single
+     *         planner.
+     */
+    Map<String,String> getOptions();
+
+    /**
+     * @return For a given key from the map returned by {@link #getOptions()} determines the fully
+     *         qualified tablet property for that key. For example if a planner was being
+     *         initialized for compaction service {@code CS9} and this method were passed
+     *         {@code prop1} then it would return
+     *         {@code tserver.compaction.major.service.CS9.planner.opts.prop1}.
+     */
+    String getFullyQualifiedOption(String key);
+
+    /**
+     * @return an execution mananger that can be used to created thread pools within a compaction
+     *         service.
+     */
+    ExecutorManager getExecutorManager();
+  }
+
+  public void init(InitParameters params);
+
+  /**
+   * This interface exists so the API can evolve and additional parameters can be passed to the
+   * method in the future.
+   *
+   * @since 2.1.0
+   */
+  public interface PlanningParameters {
+
+    /**
+     * @return The id of the table that compactions are being planned for.
+     * @see ServiceEnvironment#getTableName(TableId)
+     */
+    TableId getTableId();
+
+    ServiceEnvironment getServiceEnvironment();
+
+    CompactionKind getKind();
+
+    /**
+     * @return the compaction ratio configured for the table
+     */
+    double getRatio();
+
+    /**
+     * @return the set of all files a tablet has.
+     */
+    Collection<CompactableFile> getAll();
+
+    /**
+     * @return the set of files that could be compacted depending on what {@link #getKind()}
+     *         returns.
+     */
+    Collection<CompactableFile> getCandidates();
+
+    /**
+     * @return jobs that are currently running
+     */
+    Collection<CompactionJob> getRunningCompactions();
+
+    /**
+     * @return For a user compaction (when {@link #getKind()} returns {@link CompactionKind#USER})
+     *         where the user set execution hints via
+     *         {@link CompactionConfig#setExecutionHints(Map)} this will return those hints.
+     *         Otherwise this will return an immutable empty map.
+     */
+    Map<String,String> getExecutionHints();
+
+    /**
+     * @return A compaction plan builder that must be used to create a compaction plan.
+     */
+    CompactionPlan.Builder createPlanBuilder();
+  }
+
+  /**
+   * <p>
+   * Plan what work a compaction service should do. The kind of compaction returned by
+   * {@link PlanningParameters#getKind()} determines what must be done with the files returned by
+   * {@link PlanningParameters#getCandidates()}. The following are the expectations for the
+   * candidates for each kind.
+   *
+   * <ul>
+   * <li>CompactionKind.SYSTEM The planner is not required to do anything with the candidates and
+   * can choose to compact zero or more of them. The candidates may represent a subset of all the
+   * files in the case where a user compaction is in progress or other compactions are running.
+   * <li>CompactionKind.USER and CompactionKind.SELECTED. The planner is required to eventually
+   * compact all candidates. Its ok to return a compaction plan that compacts a subset. When the
+   * planner compacts a subset, it will eventually be called again later. When it is called later
+   * the candidates will contain the files it did not compact and the results of any previous
+   * compactions it scheduled. The planner must eventually compact all of the files in the candidate
+   * set down to a single file. The compaction service will keep calling the planner until it does.
+   * <li>CompactionKind.CHOP. The planner is required to eventually compact all candidates. One
+   * major difference with USER compactions is this kind is not required to compact all files to a
+   * single file. It is ok to return a compaction plan that compacts a subset of the candidates.
+   * When the planner compacts a subset, it will eventually be called later. When it is called later
+   * the candidates will contain the files it did not compact.
+   * </ul>
+   *
+   * <p>
+   * For a chop compaction assume the following happens.
+   * <ol>
+   * <li>The candidate set passed to makePlan contains the files {@code [F1,F2,F3,F4]} and kind is
+   * CHOP
+   * <li>The planner returns a job to compact files {@code [F1,F2]} on executor E1
+   * <li>The compaction runs compacting {@code [F1,F2]} into file {@code [F5]}
+   * </ol>
+   *
+   * <p>
+   * For the case above, eventually the planner will called again with a candidate set of
+   * {@code [F3,F4]} and it must eventually compact those two files.
+   *
+   * <p>
+   * For a user and selector compaction assume the same thing happens, it will result in a slightly
+   * different outcome.
+   * <ol>
+   * <li>The candidate set passed to makePlan contains the files {@code [F1,F2,F3,F4]} and kind is
+   * USER
+   * <li>The planner returns a job to compact files {@code [F1,F2]} on executor E1
+   * <li>The compaction runs compacting {@code [F1,F2]} into file {@code [F5]}
+   * </ol>
+   *
+   * <p>
+   * For the case above, eventually the planner will called again with a candidate set of
+   * {@code [F3,F4,F5]} and it must eventually compact those three files to one. The difference with
+   * CHOP compactions is that the result of intermediate compactions are included in the candidate
+   * set.
+   *
+   * <p>
+   * When a planner returns a compactions plan, task will be queued on executors. Previously queued
+   * task that do not match the latest plan are removed. The planner is called periodically,
+   * whenever a new file is added, and whenever a compaction finishes.
+   *
+   * <p>
+   * Use {@link PlanningParameters#createPlanBuilder()} to build the plan this function returns.
+   *
+   * @see CompactionKind
+   */
+  CompactionPlan makePlan(PlanningParameters params);
+}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionServiceId.java
similarity index 62%
copy from server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java
copy to core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionServiceId.java
index 792c0b6..d285051 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionServiceId.java
@@ -16,10 +16,23 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.accumulo.tserver.compaction;
+package org.apache.accumulo.core.spi.compaction;
 
-public enum MajorCompactionReason {
-  // do not change the order, the order of this enum determines the order
-  // in which queued major compactions are executed
-  USER, CHOP, NORMAL, IDLE
+import org.apache.accumulo.core.data.AbstractId;
+
+/**
+ * A unique identifier for a compaction service
+ *
+ * @since 2.1.0
+ */
+public class CompactionServiceId extends AbstractId<CompactionServiceId> {
+  private static final long serialVersionUID = 1L;
+
+  private CompactionServiceId(String canonical) {
+    super(canonical);
+  }
+
+  public static CompactionServiceId of(String canonical) {
+    return new CompactionServiceId(canonical);
+  }
 }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionServices.java
similarity index 77%
copy from server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java
copy to core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionServices.java
index 792c0b6..93e39fc 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionServices.java
@@ -16,10 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.accumulo.tserver.compaction;
+package org.apache.accumulo.core.spi.compaction;
 
-public enum MajorCompactionReason {
-  // do not change the order, the order of this enum determines the order
-  // in which queued major compactions are executed
-  USER, CHOP, NORMAL, IDLE
+import java.util.Set;
+
+/**
+ * Provider of information about configured compaction services.
+ *
+ * @since 2.1.0
+ */
+public interface CompactionServices {
+  Set<CompactionServiceId> getIds();
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionsDirectiveImpl.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionsDirectiveImpl.java
new file mode 100644
index 0000000..ae0e0ca
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionsDirectiveImpl.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.spi.compaction;
+
+import java.util.Objects;
+
+import org.apache.accumulo.core.spi.compaction.CompactionDirectives.Builder;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * This class intentionally package private. This implementation is odd because it supports zero
+ * object allocations for {@code CompactionDirectives.builder().build()}.
+ */
+class CompactionsDirectiveImpl implements Builder, CompactionDirectives {
+
+  private static final CompactionDirectives DEFAULT =
+      new CompactionsDirectiveImpl().setService(CompactionServiceId.of("default")).build();
+
+  static final Builder DEFAULT_BUILDER = new Builder() {
+    @Override
+    public Builder setService(CompactionServiceId service) {
+      return new CompactionsDirectiveImpl().setService(service);
+    }
+
+    @Override
+    public Builder setService(String compactionServiceId) {
+      return new CompactionsDirectiveImpl().setService(compactionServiceId);
+    }
+
+    @Override
+    public CompactionDirectives build() {
+      return DEFAULT;
+    }
+  };
+
+  boolean built = false;
+  private CompactionServiceId service;
+
+  @Override
+  public Builder setService(CompactionServiceId service) {
+    Objects.requireNonNull(service);
+    Preconditions.checkState(!built);
+    this.service = service;
+    return this;
+  }
+
+  @Override
+  public Builder setService(String compactionServiceId) {
+    return setService(CompactionServiceId.of(compactionServiceId));
+  }
+
+  @Override
+  public CompactionServiceId getService() {
+    Preconditions.checkState(built);
+    return service;
+  }
+
+  @Override
+  public CompactionDirectives build() {
+    built = true;
+    return this;
+  }
+
+  @Override
+  public String toString() {
+    return "service=" + service;
+  }
+}
diff --git a/core/src/main/java/org/apache/accumulo/core/spi/compaction/DefaultCompactionPlanner.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/DefaultCompactionPlanner.java
new file mode 100644
index 0000000..64cc4f8
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/DefaultCompactionPlanner.java
@@ -0,0 +1,425 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.spi.compaction;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.util.compaction.CompactionJobPrioritizer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+import com.google.gson.Gson;
+
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+
+/**
+ * Finds the largest continuous set of small files that meet the compaction ratio and do not prevent
+ * future compactions.
+ *
+ * <p>
+ * The following configuration options are supported. Replace {@code <service>} with the name of the
+ * compaction service you are configuring.
+ *
+ * <ul>
+ * <li>{@code tserver.compaction.major.service.<service>.opts.executors} This is a json array of
+ * objects where each object has the fields: name, maxSize, and numThreads. The maxSize field
+ * determine the maximum size of compaction that will run on an executor. The maxSize field can have
+ * a suffix of K,M,G for kilobytes, megabytes, or gigabytes. One executor can have no max size and
+ * it will run everything that is too large for the other executors. If all executors have a max
+ * size, then system compactions will only run for compactions smaller than the largest max size.
+ * User, chop, and selector compactions will always run, even if there is no executor for their
+ * size. These compaction will run on the executor with the largest max size. The value for this
+ * field should look like
+ * {@code [{"name":"executor1","maxSize":"100M","numThreads":3},{"name":"executor2","maxSize":"500M","numThreads":3},{"executor3":"huge","numThreads":3}]}.
+ * This configuration would run compactions less than 100M on executor1, compactions less than 500M
+ * on executor2, and all other on executor3.
+ * <li>{@code tserver.compaction.major.service.<service>.opts.maxOpen} This determines the maximum
+ * number of files that will be included in a single compaction.
+ * </ul>
+ *
+ * @since 2.1.0
+ * @see org.apache.accumulo.core.spi.compaction
+ */
+
+public class DefaultCompactionPlanner implements CompactionPlanner {
+
+  private static Logger log = LoggerFactory.getLogger(DefaultCompactionPlanner.class);
+
+  public static class ExecutorConfig {
+    String name;
+    String maxSize;
+    int numThreads;
+  }
+
+  private static class Executor {
+    final CompactionExecutorId ceid;
+    final Long maxSize;
+
+    public Executor(CompactionExecutorId ceid, Long maxSize) {
+      Preconditions.checkArgument(maxSize == null || maxSize > 0);
+      this.ceid = Objects.requireNonNull(ceid);
+      this.maxSize = maxSize;
+    }
+
+    Long getMaxSize() {
+      return maxSize;
+    }
+
+    @Override
+    public String toString() {
+      return "[ceid=" + ceid + ", maxSize=" + maxSize + "]";
+    }
+  }
+
+  private List<Executor> executors;
+  private int maxFilesToCompact;
+
+  @SuppressFBWarnings(value = "UWF_UNWRITTEN_FIELD", justification = "Field is written by Gson")
+  @Override
+  public void init(InitParameters params) {
+    ExecutorConfig[] execConfigs =
+        new Gson().fromJson(params.getOptions().get("executors"), ExecutorConfig[].class);
+
+    List<Executor> tmpExec = new ArrayList<>();
+
+    for (ExecutorConfig executorConfig : execConfigs) {
+      var ceid = params.getExecutorManager().createExecutor(executorConfig.name,
+          executorConfig.numThreads);
+      Long maxSize = executorConfig.maxSize == null ? null
+          : ConfigurationTypeHelper.getFixedMemoryAsBytes(executorConfig.maxSize);
+      tmpExec.add(new Executor(ceid, maxSize));
+    }
+
+    Collections.sort(tmpExec, Comparator.comparing(Executor::getMaxSize,
+        Comparator.nullsLast(Comparator.naturalOrder())));
+
+    executors = List.copyOf(tmpExec);
+
+    if (executors.stream().filter(e -> e.getMaxSize() == null).count() > 1) {
+      throw new IllegalArgumentException(
+          "Can only have one executor w/o a maxSize. " + params.getOptions().get("executors"));
+    }
+
+    determineMaxFilesToCompact(params);
+  }
+
+  @SuppressWarnings("removal")
+  private void determineMaxFilesToCompact(InitParameters params) {
+    String fqo = params.getFullyQualifiedOption("maxOpen");
+    if (!params.getServiceEnvironment().getConfiguration().isSet(fqo)
+        && params.getServiceEnvironment().getConfiguration()
+            .isSet(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey())) {
+      log.warn("The property " + Property.TSERV_MAJC_THREAD_MAXOPEN.getKey()
+          + " was set, it is deperecated.  Set the " + fqo + " option instead.");
+      this.maxFilesToCompact = Integer.parseInt(params.getServiceEnvironment().getConfiguration()
+          .get(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey()));
+    } else {
+      this.maxFilesToCompact = Integer.parseInt(params.getOptions().getOrDefault("maxOpen", "10"));
+    }
+  }
+
+  @Override
+  public CompactionPlan makePlan(PlanningParameters params) {
+    try {
+
+      if (params.getCandidates().isEmpty()) {
+        return params.createPlanBuilder().build();
+      }
+
+      Set<CompactableFile> filesCopy = new HashSet<>(params.getCandidates());
+
+      long maxSizeToCompact = getMaxSizeToCompact(params.getKind());
+
+      Collection<CompactableFile> group;
+      if (params.getRunningCompactions().isEmpty()) {
+        group = findMapFilesToCompact(filesCopy, params.getRatio(), maxFilesToCompact,
+            maxSizeToCompact);
+
+        if (!group.isEmpty() && group.size() < params.getCandidates().size()
+            && params.getCandidates().size() <= maxFilesToCompact
+            && (params.getKind() == CompactionKind.USER
+                || params.getKind() == CompactionKind.SELECTOR)) {
+          // USER and SELECTOR compactions must eventually compact all files. When a subset of files
+          // that meets the compaction ratio is selected, look ahead and see if the next compaction
+          // would also meet the compaction ratio. If not then compact everything to avoid doing
+          // more than logarithmic work across multiple comapctions.
+
+          filesCopy.removeAll(group);
+          filesCopy.add(getExpected(group, 0));
+
+          if (findMapFilesToCompact(filesCopy, params.getRatio(), maxFilesToCompact,
+              maxSizeToCompact).isEmpty()) {
+            // The next possible compaction does not meet the compaction ratio, so compact
+            // everything.
+            group = Set.copyOf(params.getCandidates());
+          }
+
+        }
+
+      } else if (params.getKind() == CompactionKind.SYSTEM) {
+        // This code determines if once the files compacting finish would they be included in a
+        // compaction with the files smaller than them? If so, then wait for the running compaction
+        // to complete.
+
+        // The set of files running compactions may produce
+        var expectedFiles = getExpected(params.getRunningCompactions());
+
+        if (!Collections.disjoint(filesCopy, expectedFiles)) {
+          throw new AssertionError();
+        }
+
+        filesCopy.addAll(expectedFiles);
+
+        group = findMapFilesToCompact(filesCopy, params.getRatio(), maxFilesToCompact,
+            maxSizeToCompact);
+
+        if (!Collections.disjoint(group, expectedFiles)) {
+          // file produced by running compaction will eventually compact with existing files, so
+          // wait.
+          group = Set.of();
+        }
+      } else {
+        group = Set.of();
+      }
+
+      if (group.isEmpty()
+          && (params.getKind() == CompactionKind.USER || params.getKind() == CompactionKind.SELECTOR
+              || params.getKind() == CompactionKind.CHOP)
+          && params.getRunningCompactions().stream()
+              .noneMatch(job -> job.getKind() == params.getKind())) {
+        group = findMaximalRequiredSetToCompact(params.getCandidates(), maxFilesToCompact);
+      }
+
+      if (group.isEmpty()) {
+        return params.createPlanBuilder().build();
+      } else {
+        // determine which executor to use based on the size of the files
+        var ceid = getExecutor(group);
+
+        return params.createPlanBuilder().addJob(createPriority(params), ceid, group).build();
+      }
+    } catch (RuntimeException e) {
+      throw e;
+    }
+  }
+
+  private static long createPriority(PlanningParameters params) {
+    return CompactionJobPrioritizer.createPriority(params.getKind(), params.getAll().size());
+  }
+
+  private long getMaxSizeToCompact(CompactionKind kind) {
+    if (kind == CompactionKind.SYSTEM) {
+      Long max = executors.get(executors.size() - 1).maxSize;
+      if (max != null)
+        return max;
+    }
+    return Long.MAX_VALUE;
+  }
+
+  private CompactableFile getExpected(Collection<CompactableFile> files, int count) {
+    long size = files.stream().mapToLong(CompactableFile::getEstimatedSize).sum();
+    try {
+      return CompactableFile.create(
+          new URI("hdfs://fake/accumulo/tables/adef/t-zzFAKEzz/FAKE-0000" + count + ".rf"), size,
+          0);
+    } catch (URISyntaxException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * @return the expected files sizes for sets of compacting files.
+   */
+  private Set<CompactableFile> getExpected(Collection<CompactionJob> compacting) {
+
+    Set<CompactableFile> expected = new HashSet<>();
+
+    int count = 0;
+
+    for (CompactionJob job : compacting) {
+      count++;
+      expected.add(getExpected(job.getFiles(), count));
+    }
+
+    return expected;
+  }
+
+  public static Collection<CompactableFile>
+      findMaximalRequiredSetToCompact(Collection<CompactableFile> files, int maxFilesToCompact) {
+
+    if (files.size() <= maxFilesToCompact)
+      return files;
+
+    List<CompactableFile> sortedFiles = sortByFileSize(files);
+
+    int numToCompact = maxFilesToCompact;
+
+    if (sortedFiles.size() > maxFilesToCompact && sortedFiles.size() < 2 * maxFilesToCompact) {
+      // on the second to last compaction pass, compact the minimum amount of files possible
+      numToCompact = sortedFiles.size() - maxFilesToCompact + 1;
+    }
+
+    return sortedFiles.subList(0, numToCompact);
+  }
+
+  public static Collection<CompactableFile> findMapFilesToCompact(Set<CompactableFile> files,
+      double ratio, int maxFilesToCompact, long maxSizeToCompact) {
+    if (files.size() <= 1)
+      return Collections.emptySet();
+
+    // sort files from smallest to largest. So position 0 has the smallest file.
+    List<CompactableFile> sortedFiles = sortByFileSize(files);
+
+    int maxSizeIndex = sortedFiles.size();
+    long sum = 0;
+    for (int i = 0; i < sortedFiles.size(); i++) {
+      sum += sortedFiles.get(i).getEstimatedSize();
+      if (sum > maxSizeToCompact) {
+        maxSizeIndex = i;
+        break;
+      }
+    }
+
+    if (maxSizeIndex < sortedFiles.size()) {
+      sortedFiles = sortedFiles.subList(0, maxSizeIndex);
+      if (sortedFiles.size() <= 1)
+        return Collections.emptySet();
+    }
+
+    var loops = Math.max(1, sortedFiles.size() - maxFilesToCompact + 1);
+    for (int i = 0; i < loops; i++) {
+      var filesToCompact = findMapFilesToCompact(
+          sortedFiles.subList(i, Math.min(sortedFiles.size(), maxFilesToCompact) + i), ratio);
+      if (!filesToCompact.isEmpty())
+        return filesToCompact;
+    }
+
+    return Collections.emptySet();
+
+  }
+
+  /**
+   * Find the largest set of contiguous small files that meet the compaction ratio. For a set of
+   * file size like [101M,102M,103M,104M,4M,3M,3M,3M,3M], it would be nice compact the smaller files
+   * [4M,3M,3M,3M,3M] followed by the larger ones. The reason to do the smaller ones first is to
+   * more quickly reduce the number of files. However, all compactions should still follow the
+   * compaction ratio in order to ensure the amount of data rewriting is logarithmic.
+   *
+   * <p>
+   * A set of files meets the compaction ratio when the largestFileinSet * compactionRatio <
+   * sumOfFileSizesInSet. This algorithm grows the set of small files until it meets the compaction
+   * ratio, then keeps growing it while it continues to meet the ratio. Once a set does not meet the
+   * compaction ratio, the last set that did is returned. Growing the set of small files means
+   * adding the smallest file not in the set.
+   *
+   * <p>
+   * There is one caveat to the algorithm mentioned above, if a smaller set of files would prevent a
+   * future compaction then do not select it. This code in this function performs a look ahead to
+   * see if a candidate set will prevent future compactions.
+   *
+   * <p>
+   * As an example of a small set of files that could prevent a future compaction, consider the
+   * files sizes [100M,99M,33M,33M,33M,33M]. For a compaction ratio of 3, the set
+   * [100M,99M,33M,33M,33M,33M] and [33M,33M,33M,33M] both meet the compaction ratio. If the set
+   * [33M,33M,33M,33M] is compacted, then it will result in a tablet having [132M, 100M, 99M] which
+   * does not meet the compaction ration. So in this case, choosing the set [33M,33M,33M,33M]
+   * prevents a future compaction that could have occurred. This function will not choose the
+   * smaller set because of it would prevent the future compaction.
+   */
+  private static Collection<CompactableFile>
+      findMapFilesToCompact(List<CompactableFile> sortedFiles, double ratio) {
+
+    int larsmaIndex = -1;
+    long larsmaSum = Long.MIN_VALUE;
+
+    // index into sortedFiles, everything at and below this index meets the compaction ratio
+    int goodIndex = -1;
+
+    long sum = sortedFiles.get(0).getEstimatedSize();
+
+    for (int c = 1; c < sortedFiles.size(); c++) {
+      long currSize = sortedFiles.get(c).getEstimatedSize();
+
+      // ensure data is sorted
+      Preconditions.checkArgument(currSize >= sortedFiles.get(c - 1).getEstimatedEntries());
+
+      sum += currSize;
+
+      if (currSize * ratio < sum) {
+        goodIndex = c;
+      } else if (c - 1 == goodIndex) {
+        // The previous file met the compaction ratio, but the current file does not. So all of the
+        // previous files are candidates. However we must ensure that any candidate set produces a
+        // file smaller than the next largest file in the next candidate set to ensure future
+        // compactions are not prevented.
+        if (larsmaIndex == -1 || larsmaSum > sortedFiles.get(goodIndex).getEstimatedSize()) {
+          larsmaIndex = goodIndex;
+          larsmaSum = sum - currSize;
+        } else {
+          break;
+        }
+      }
+    }
+
+    if (sortedFiles.size() - 1 == goodIndex
+        && (larsmaIndex == -1 || larsmaSum > sortedFiles.get(goodIndex).getEstimatedSize())) {
+      larsmaIndex = goodIndex;
+    }
+
+    if (larsmaIndex == -1)
+      return Collections.emptySet();
+
+    return sortedFiles.subList(0, larsmaIndex + 1);
+  }
+
+  CompactionExecutorId getExecutor(Collection<CompactableFile> files) {
+
+    long size = files.stream().mapToLong(CompactableFile::getEstimatedSize).sum();
+
+    for (Executor executor : executors) {
+      if (executor.maxSize == null || size < executor.maxSize)
+        return executor.ceid;
+    }
+
+    return executors.get(executors.size() - 1).ceid;
+  }
+
+  public static List<CompactableFile> sortByFileSize(Collection<CompactableFile> files) {
+    ArrayList<CompactableFile> sortedFiles = new ArrayList<>(files);
+
+    // sort from smallest file to largest
+    Collections.sort(sortedFiles, Comparator.comparingLong(CompactableFile::getEstimatedSize)
+        .thenComparing(CompactableFile::getUri));
+
+    return sortedFiles;
+  }
+}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/ExecutorManager.java
similarity index 61%
copy from server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java
copy to core/src/main/java/org/apache/accumulo/core/spi/compaction/ExecutorManager.java
index 792c0b6..4740ee6 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/ExecutorManager.java
@@ -16,10 +16,19 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.accumulo.tserver.compaction;
+package org.apache.accumulo.core.spi.compaction;
 
-public enum MajorCompactionReason {
-  // do not change the order, the order of this enum determines the order
-  // in which queued major compactions are executed
-  USER, CHOP, NORMAL, IDLE
+/**
+ * Offered to a Compaction Planner at initialization time so it can create executors.
+ *
+ *
+ * @since 2.1.0
+ * @see CompactionPlanner#init(org.apache.accumulo.core.spi.compaction.CompactionPlanner.InitParameters)
+ * @see org.apache.accumulo.core.spi.compaction
+ */
+public interface ExecutorManager {
+  /**
+   * Create a thread pool executor within a compaction service.
+   */
+  public CompactionExecutorId createExecutor(String name, int threads);
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/spi/compaction/SimpleCompactionDispatcher.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/SimpleCompactionDispatcher.java
new file mode 100644
index 0000000..c985714
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/SimpleCompactionDispatcher.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.spi.compaction;
+
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.accumulo.core.client.admin.CompactionConfig;
+
+/**
+ * Dispatcher that supports simple configuration for making tables use compaction services. By
+ * default it dispatches to a compaction service named default.
+ *
+ * <p>
+ * The following schema is supported for configuration options.
+ *
+ * <p>
+ * {@code table.compaction.dispatcher.opts.service[.user[.<user type>]|selected|system|chop]=
+ * <service>}
+ *
+ * <p>
+ * The following configuration will make a table use compaction service cs9 for user compactions,
+ * service cs4 for chop compactions, and service cs7 for everything else.
+ *
+ * <p>
+ * {@code
+ *   table.compaction.dispatcher.opts.service=cs7
+ *   table.compaction.dispatcher.opts.service.user=cs9
+ *   table.compaction.dispatcher.opts.service.chop=cs4
+ * }
+ *
+ * <p>
+ * Compactions started using the client API are called user compactions and can set execution hints
+ * using {@link CompactionConfig#setExecutionHints(Map)}. Hints of the form
+ * {@code compaction_type=<user type>} can be used by this dispatcher. For example the following
+ * will use service cs2 when the hint {@code compaction_type=urgent} is seen, service cs3 when hint
+ * {@code compaction_type=trifling}, everything else uses cs9.
+ *
+ * <p>
+ * {@code
+ *   table.compaction.dispatcher.opts.service=cs9
+ *   table.compaction.dispatcher.opts.service.user.urgent=cs2
+ *   table.compaction.dispatcher.opts.service.user.trifling=cs3
+ * }
+ *
+ * @see org.apache.accumulo.core.spi.compaction
+ */
+
+public class SimpleCompactionDispatcher implements CompactionDispatcher {
+
+  private Map<CompactionKind,CompactionDirectives> services;
+  private Map<String,CompactionDirectives> userServices;
+
+  @Override
+  public void init(InitParameters params) {
+    services = new EnumMap<>(CompactionKind.class);
+
+    var defaultService = CompactionDirectives.builder().build();
+
+    if (params.getOptions().containsKey("service")) {
+      defaultService =
+          CompactionDirectives.builder().setService(params.getOptions().get("service")).build();
+    }
+
+    for (CompactionKind ctype : CompactionKind.values()) {
+      String service = params.getOptions().get("service." + ctype.name().toLowerCase());
+      if (service == null)
+        services.put(ctype, defaultService);
+      else
+        services.put(ctype, CompactionDirectives.builder().setService(service).build());
+    }
+
+    if (params.getOptions().isEmpty()) {
+      userServices = Map.of();
+    } else {
+      Map<String,CompactionDirectives> tmpUS = new HashMap<>();
+      params.getOptions().forEach((k, v) -> {
+        if (k.startsWith("service.user.")) {
+          String type = k.substring("service.user.".length());
+          tmpUS.put(type, CompactionDirectives.builder().setService(v).build());
+        }
+      });
+
+      userServices = Map.copyOf(tmpUS);
+    }
+  }
+
+  @Override
+  public CompactionDirectives dispatch(DispatchParameters params) {
+
+    if (params.getCompactionKind() == CompactionKind.USER) {
+      String hintType = params.getExecutionHints().get("compaction_type");
+      if (hintType != null) {
+        var userDirectives = userServices.get(hintType);
+        if (userDirectives != null) {
+          return userDirectives;
+        }
+      }
+    }
+    return services.get(params.getCompactionKind());
+  }
+
+}
diff --git a/core/src/main/java/org/apache/accumulo/core/spi/compaction/doc-files/compaction-spi-design.png b/core/src/main/java/org/apache/accumulo/core/spi/compaction/doc-files/compaction-spi-design.png
new file mode 100644
index 0000000..6e83983
Binary files /dev/null and b/core/src/main/java/org/apache/accumulo/core/spi/compaction/doc-files/compaction-spi-design.png differ
diff --git a/core/src/main/java/org/apache/accumulo/core/spi/compaction/package-info.java b/core/src/main/java/org/apache/accumulo/core/spi/compaction/package-info.java
new file mode 100644
index 0000000..293f990
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/spi/compaction/package-info.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
+ * This package provides a place for plugin interfaces related to executing compactions. The diagram
+ * below shows the functional components in Accumulo related to compactions. Not all of these
+ * components are pluggable, but understanding how everything fits together is important for writing
+ * a plugin.
+ *
+ * <p>
+ * <img src="doc-files/compaction-spi-design.png" alt="Compaction design diagram">
+ *
+ * <p>
+ * The following is a description of each functional component.
+ *
+ * <ul>
+ * <li><b>Compaction Manager</b> A non pluggable component within the tablet server that brings all
+ * other components together. The manager will route compactables to compaction services. For each
+ * kind of compaction, an individual compactable will be routed to a single compaction service. For
+ * example its possible that compactable C1 is routed to service S1 for user compactions and service
+ * S2 for system compactions.
+ * <ul>
+ * <li><b>Compaction Service</b> A non pluggable component that compacts tablets. One or more of
+ * these are created based on user configuration. Users can assign a table to a compaction service.
+ * Has a single compaction planner and one ore more compaction executors.
+ * <ul>
+ * <li><b>Compaction Planner</b> A pluggable component that can be configured by users when they
+ * configure a compaction service. It makes decisions about which files to compact on which
+ * executors. See {@link org.apache.accumulo.core.spi.compaction.CompactionPlanner},
+ * {@link org.apache.accumulo.core.spi.compaction.CompactionPlanner#makePlan(org.apache.accumulo.core.spi.compaction.CompactionPlanner.PlanningParameters)},
+ * and {@link org.apache.accumulo.core.spi.compaction.DefaultCompactionPlanner}
+ * <li><b>Compaction Executor</b> A non pluggable component that executes compactions using multiple
+ * threads and has a priority queue.</li>
+ * </ul>
+ * </li>
+ * <li><b>Compactable</b> A non pluggable component that wraps a Tablet and per table pluggable
+ * compaction components. It tracks all information about one or more running compactions that is
+ * needed by a compaction service in a thread safe manor. There is a 1 to 1 relationship between
+ * compactables and tablets.
+ * <ul>
+ * <li><b>Compaction Dispatcher</b> A pluggable component component that decides which compaction
+ * service a table should use for different kinds of compactions. This is configurable by users per
+ * table. See {@link org.apache.accumulo.core.spi.compaction.CompactionDispatcher}</li>
+ * <li><b>Compaction Selector</b> A pluggable component that can optionally be configured per table
+ * to periodically select files to compact. This supports use cases like periodically compacting all
+ * files because there are too many deletes. See
+ * {@link org.apache.accumulo.core.client.admin.compaction.CompactionSelector}</li>
+ * <li><b>Compaction Configurer</b> A pluggable component that can optionally be configured per
+ * table to dynamically configure file output settings. This supports use cases like using snappy
+ * for small files and gzip for large files. See
+ * {@link org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer}</li>
+ * <li><b>Compaction Strategy</b> A deprecated pluggable component replaced by the Selector and
+ * Configurer. See {@link org.apache.accumulo.core.client.admin.CompactionStrategyConfig} for more
+ * information about why this was deprecated.
+ * </ul>
+ * </li>
+ * </ul>
+ * </li>
+ * </ul>
+ *
+ *
+ *
+ * @see org.apache.accumulo.core.spi
+ */
+package org.apache.accumulo.core.spi.compaction;
diff --git a/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionJobImpl.java b/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionJobImpl.java
new file mode 100644
index 0000000..62ba955
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionJobImpl.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.util.compaction;
+
+import java.util.Collection;
+import java.util.Objects;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.spi.compaction.CompactionExecutorId;
+import org.apache.accumulo.core.spi.compaction.CompactionJob;
+import org.apache.accumulo.core.spi.compaction.CompactionKind;
+
+/**
+ * An immutable object that describes what files to compact and where to compact them.
+ *
+ * @since 2.1.0
+ * @see org.apache.accumulo.core.spi.compaction
+ */
+public class CompactionJobImpl implements CompactionJob {
+
+  private final long priority;
+  private final CompactionExecutorId executor;
+  private final Set<CompactableFile> files;
+  private final CompactionKind kind;
+  private boolean selectedAll;
+
+  CompactionJobImpl(long priority, CompactionExecutorId executor, Collection<CompactableFile> files,
+      CompactionKind kind, boolean selectedAllFiles) {
+    this.priority = priority;
+    this.executor = Objects.requireNonNull(executor);
+    this.files = Set.copyOf(files);
+    this.kind = kind;
+    this.selectedAll = selectedAllFiles;
+  }
+
+  @Override
+  public long getPriority() {
+    return priority;
+  }
+
+  /**
+   * @return The executor to run the job.
+   */
+  @Override
+  public CompactionExecutorId getExecutor() {
+    return executor;
+  }
+
+  /**
+   * @return The files to compact
+   */
+  @Override
+  public Set<CompactableFile> getFiles() {
+    return files;
+  }
+
+  /**
+   * @return The kind of compaction this is.
+   */
+  @Override
+  public CompactionKind getKind() {
+    return kind;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(priority, executor, files, kind);
+  }
+
+  public boolean selectedAll() {
+    return selectedAll;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (o instanceof CompactionJobImpl) {
+      CompactionJobImpl ocj = (CompactionJobImpl) o;
+
+      return priority == ocj.priority && executor.equals(ocj.executor) && files.equals(ocj.files)
+          && kind == ocj.kind;
+    }
+
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return "CompactionJob [priority=" + priority + ", executor=" + executor + ", files=" + files
+        + ", kind=" + kind + "]";
+  }
+
+}
diff --git a/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionJobPrioritizer.java b/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionJobPrioritizer.java
new file mode 100644
index 0000000..f9480a8
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionJobPrioritizer.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.accumulo.core.util.compaction;
+
+import java.util.Comparator;
+
+import org.apache.accumulo.core.spi.compaction.CompactionJob;
+import org.apache.accumulo.core.spi.compaction.CompactionKind;
+
+public class CompactionJobPrioritizer {
+
+  public static final Comparator<CompactionJob> JOB_COMPARATOR =
+      Comparator.comparingLong(CompactionJob::getPriority)
+          .thenComparingInt(job -> job.getFiles().size()).reversed();
+
+  public static long createPriority(CompactionKind kind, int totalFiles) {
+    long kindPrio;
+
+    switch (kind) {
+      case USER:
+        kindPrio = 4;
+        break;
+      case SELECTOR:
+        kindPrio = 2;
+        break;
+      case CHOP:
+        kindPrio = 3;
+        break;
+      case SYSTEM:
+        kindPrio = 1;
+        break;
+      default:
+        throw new AssertionError("Unknown kind " + kind);
+    }
+
+    return (kindPrio << 56) | totalFiles;
+  }
+
+}
diff --git a/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionPlanImpl.java b/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionPlanImpl.java
new file mode 100644
index 0000000..4b678d9
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionPlanImpl.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.util.compaction;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.spi.compaction.CompactionExecutorId;
+import org.apache.accumulo.core.spi.compaction.CompactionJob;
+import org.apache.accumulo.core.spi.compaction.CompactionKind;
+import org.apache.accumulo.core.spi.compaction.CompactionPlan;
+
+import com.google.common.base.Preconditions;
+
+public class CompactionPlanImpl implements CompactionPlan {
+
+  private final Collection<CompactionJob> jobs;
+
+  private CompactionPlanImpl(Collection<CompactionJob> jobs) {
+    this.jobs = List.copyOf(jobs);
+  }
+
+  @Override
+  public Collection<CompactionJob> getJobs() {
+    return jobs;
+  }
+
+  @Override
+  public String toString() {
+    return "jobs: " + jobs;
+  }
+
+  public static class BuilderImpl implements CompactionPlan.Builder {
+
+    private CompactionKind kind;
+    private ArrayList<CompactionJob> jobs = new ArrayList<>();
+    private Set<CompactableFile> allFiles;
+    private Set<CompactableFile> seenFiles = new HashSet<>();
+    private Set<CompactableFile> candidates;
+
+    public BuilderImpl(CompactionKind kind, Set<CompactableFile> allFiles,
+        Set<CompactableFile> candidates) {
+      this.kind = kind;
+      this.allFiles = allFiles;
+      this.candidates = candidates;
+    }
+
+    @Override
+    public Builder addJob(long priority, CompactionExecutorId executor,
+        Collection<CompactableFile> files) {
+      Set<CompactableFile> filesSet =
+          files instanceof Set ? (Set<CompactableFile>) files : Set.copyOf(files);
+
+      Preconditions.checkArgument(Collections.disjoint(filesSet, seenFiles),
+          "Job files overlaps with previous job %s %s", files, jobs);
+      Preconditions.checkArgument(candidates.containsAll(filesSet),
+          "Job files are not compaction candidates %s %s", files, candidates);
+
+      seenFiles.addAll(filesSet);
+
+      jobs.add(
+          new CompactionJobImpl(priority, executor, filesSet, kind, filesSet.equals(allFiles)));
+      return this;
+    }
+
+    @Override
+    public CompactionPlan build() {
+      return new CompactionPlanImpl(jobs);
+    }
+  }
+
+}
diff --git a/core/src/test/java/org/apache/accumulo/core/conf/PropertyTest.java b/core/src/test/java/org/apache/accumulo/core/conf/PropertyTest.java
index 7228724..562a8f0 100644
--- a/core/src/test/java/org/apache/accumulo/core/conf/PropertyTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/conf/PropertyTest.java
@@ -175,7 +175,7 @@ public class PropertyTest {
   public void testIsValidTablePropertyKey() {
     for (Property prop : Property.values()) {
       if (prop.getKey().startsWith("table.") && !prop.getKey().equals("table.")) {
-        assertTrue(Property.isValidTablePropertyKey(prop.getKey()));
+        assertTrue(prop.getKey(), Property.isValidTablePropertyKey(prop.getKey()));
 
         if (prop.getType().equals(PropertyType.PREFIX)) {
           assertTrue(Property.isValidTablePropertyKey(prop.getKey() + "foo9"));
diff --git a/core/src/test/java/org/apache/accumulo/core/spi/compaction/DefaultCompactionPlannerTest.java b/core/src/test/java/org/apache/accumulo/core/spi/compaction/DefaultCompactionPlannerTest.java
new file mode 100644
index 0000000..9316f58
--- /dev/null
+++ b/core/src/test/java/org/apache/accumulo/core/spi/compaction/DefaultCompactionPlannerTest.java
@@ -0,0 +1,406 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.spi.compaction;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.spi.common.ServiceEnvironment;
+import org.apache.accumulo.core.spi.common.ServiceEnvironment.Configuration;
+import org.apache.accumulo.core.spi.compaction.CompactionPlan.Builder;
+import org.apache.accumulo.core.util.compaction.CompactionPlanImpl;
+import org.apache.hadoop.shaded.com.google.common.collect.Iterables;
+import org.easymock.EasyMock;
+import org.junit.Test;
+
+public class DefaultCompactionPlannerTest {
+
+  @Test
+  public void testFindFilesToCompact() {
+
+    testFFtC(createCFs("F4", "1M", "F5", "1M", "F6", "1M"),
+        createCFs("F1", "100M", "F2", "100M", "F3", "100M", "F4", "1M", "F5", "1M", "F6", "1M"),
+        2.0);
+
+    testFFtC(createCFs("F1", "100M", "F2", "100M", "F3", "100M", "F4", "1M"), 2.0);
+
+    testFFtC(
+        createCFs("F1", "100M", "F2", "99M", "F3", "33M", "F4", "33M", "F5", "33M", "F6", "33M"),
+        2.0);
+    testFFtC(
+        createCFs("F1", "100M", "F2", "99M", "F3", "33M", "F4", "33M", "F5", "33M", "F6", "33M"),
+        3.0);
+
+    testFFtC(createCFs("F3", "10M", "F4", "10M", "F5", "10M", "F6", "10M"),
+        createCFs("F1", "50M", "F2", "49M", "F3", "10M", "F4", "10M", "F5", "10M", "F6", "10M"),
+        2.0);
+
+    testFFtC(createCFs("F3", "10M", "F4", "10M", "F5", "10M", "F6", "10M"),
+        createCFs("F1", "50M", "F2", "49M", "F3", "10M", "F4", "10M", "F5", "10M", "F6", "10M"),
+        3.0);
+
+    testFFtC(createCFs("S1", "1M", "S2", "1M", "S3", "1M", "S4", "1M"),
+        createCFs("B1", "100M", "B2", "100M", "B3", "100M", "B4", "100M", "M1", "10M", "M2", "10M",
+            "M3", "10M", "M4", "10M", "S1", "1M", "S2", "1M", "S3", "1M", "S4", "1M"),
+        3.0);
+    testFFtC(createCFs("M1", "10M", "M2", "10M", "M3", "10M", "M4", "10M", "C1", "4M"),
+        createCFs("B1", "100M", "B2", "100M", "B3", "100M", "B4", "100M", "M1", "10M", "M2", "10M",
+            "M3", "10M", "M4", "10M", "C1", "4M"),
+        3.0);
+    testFFtC(createCFs("B1", "100M", "B2", "100M", "B3", "100M", "B4", "100M", "C2", "44M"),
+        createCFs("B1", "100M", "B2", "100M", "B3", "100M", "B4", "100M", "C2", "44M"), 3.0);
+    testFFtC(createCFs(), createCFs("C3", "444M"), 3.0);
+
+    testFFtC(createCFs(), createCFs("A1", "17M", "S1", "11M", "S2", "11M", "S3", "11M"), 3.0);
+    testFFtC(createCFs("A1", "16M", "S1", "11M", "S2", "11M", "S3", "11M"), 3.0);
+
+    testFFtC(
+        createCFs("A1", "1M", "A2", "1M", "A3", "1M", "A4", "1M", "A5", "3M", "A6", "3M", "A7",
+            "5M", "A8", "5M"),
+        createCFs("A1", "1M", "A2", "1M", "A3", "1M", "A4", "1M", "A5", "3M", "A6", "3M", "A7",
+            "5M", "A8", "5M", "A9", "100M", "A10", "100M", "A11", "100M", "A12", "500M"),
+        3.0);
+
+    testFFtC(
+        createCFs("F1", "100M", "F2", "99M", "F3", "33M", "F4", "33M", "F5", "33M", "F6", "33M"),
+        3.0);
+
+    testFFtC(createCFs("F3", "10M", "F4", "9M", "F5", "8M", "F6", "7M"),
+        createCFs("F1", "12M", "F2", "11M", "F3", "10M", "F4", "9M", "F5", "8M", "F6", "7M"), 3.0,
+        4);
+
+    testFFtC(createCFs("F3", "4M", "F4", "8M", "F5", "9M", "F6", "10M"),
+        createCFs("F1", "1M", "F2", "2M", "F3", "4M", "F4", "8M", "F5", "9M", "F6", "10M"), 3.0, 4);
+
+    testFFtC(createCFs(),
+        createCFs("F1", "1M", "F2", "2M", "F3", "4M", "F4", "8M", "F5", "16M", "F6", "32M"), 3.0,
+        4);
+
+    testFFtC(createCFs(), createCFs("F1", "200M", "F2", "200M", "F3", "200M", "F4", "200M", "F5",
+        "200M", "F6", "200M"), 3.0, 4, 100_000_000L);
+
+    testFFtC(createCFs("F2", "2M", "F3", "30M", "F4", "30M", "F5", "30M"),
+        createCFs("F1", "1M", "F2", "2M", "F3", "30M", "F4", "30M", "F5", "30M", "F6", "30M"), 3.0,
+        4, 100_000_000L);
+
+    testFFtC(createCFs("F1", "1M", "F2", "2M", "F3", "30M", "F4", "30M", "F5", "30M"),
+        createCFs("F1", "1M", "F2", "2M", "F3", "30M", "F4", "30M", "F5", "30M", "F6", "30M"), 3.0,
+        8, 100_000_000L);
+
+    testFFtC(createCFs("F1", "1M", "F2", "2M", "F3", "30M", "F4", "30M", "F5", "30M", "F6", "30M"),
+        createCFs("F1", "1M", "F2", "2M", "F3", "30M", "F4", "30M", "F5", "30M", "F6", "30M"), 3.0,
+        8, 200_000_000L);
+
+  }
+
+  @Test
+  public void testRunningCompaction() {
+    var planner = createPlanner(true);
+    var all = createCFs("F1", "3M", "F2", "3M", "F3", "11M", "F4", "12M", "F5", "13M");
+    var candidates = createCFs("F3", "11M", "F4", "12M", "F5", "13M");
+    var compacting =
+        Set.of(createJob(CompactionKind.SYSTEM, all, createCFs("F1", "3M", "F2", "3M")));
+    var params = createPlanningParams(all, candidates, compacting, 2, CompactionKind.SYSTEM);
+    var plan = planner.makePlan(params);
+
+    // The result of the running compaction could be included in a future compaction, so the planner
+    // should wait.
+    assertTrue(plan.getJobs().isEmpty());
+
+    all = createCFs("F1", "30M", "F2", "30M", "F3", "11M", "F4", "12M", "F5", "13M");
+    candidates = createCFs("F3", "11M", "F4", "12M", "F5", "13M");
+    compacting = Set.of(createJob(CompactionKind.SYSTEM, all, createCFs("F1", "30M", "F2", "30M")));
+    params = createPlanningParams(all, candidates, compacting, 2, CompactionKind.SYSTEM);
+    plan = planner.makePlan(params);
+
+    // The result of the running compaction would not be included in future compactions, so the
+    // planner should compact.
+    var job = Iterables.getOnlyElement(plan.getJobs());
+    assertEquals(candidates, job.getFiles());
+    assertEquals(CompactionExecutorId.of("medium"), job.getExecutor());
+  }
+
+  @Test
+  public void testUserCompaction() {
+    var planner = createPlanner(true);
+    var all = createCFs("F1", "3M", "F2", "3M", "F3", "11M", "F4", "12M", "F5", "13M");
+    var candidates = createCFs("F3", "11M", "F4", "12M", "F5", "13M");
+    var compacting =
+        Set.of(createJob(CompactionKind.SYSTEM, all, createCFs("F1", "3M", "F2", "3M")));
+    var params = createPlanningParams(all, candidates, compacting, 2, CompactionKind.USER);
+    var plan = planner.makePlan(params);
+
+    // a running non-user compaction should not prevent a user compaction
+    var job = Iterables.getOnlyElement(plan.getJobs());
+    assertEquals(candidates, job.getFiles());
+    assertEquals(CompactionExecutorId.of("medium"), job.getExecutor());
+
+    // should only run one user compaction at a time
+    compacting = Set.of(createJob(CompactionKind.USER, all, createCFs("F1", "3M", "F2", "3M")));
+    params = createPlanningParams(all, candidates, compacting, 2, CompactionKind.USER);
+    plan = planner.makePlan(params);
+    assertTrue(plan.getJobs().isEmpty());
+
+    // 17 files that do not meet the compaction ratio, when max files to compact is 15 should do 3
+    // files then 15
+    all = createCFs("F1", "1M", "F2", "2M", "F3", "4M", "F4", "8M", "F5", "16M", "F6", "32M", "F7",
+        "64M", "F8", "128M", "F9", "256M", "FA", "512M", "FB", "1G", "FC", "2G", "FD", "4G", "FE",
+        "8G", "FF", "16G", "FG", "32G", "FH", "64G");
+    compacting = Set.of();
+    params = createPlanningParams(all, all, compacting, 2, CompactionKind.USER);
+    plan = planner.makePlan(params);
+    job = Iterables.getOnlyElement(plan.getJobs());
+    assertEquals(createCFs("F1", "1M", "F2", "2M", "F3", "4M"), job.getFiles());
+    assertEquals(CompactionExecutorId.of("small"), job.getExecutor());
+
+    // should compact all 15
+    all = createCFs("FI", "7M", "F4", "8M", "F5", "16M", "F6", "32M", "F7", "64M", "F8", "128M",
+        "F9", "256M", "FA", "512M", "FB", "1G", "FC", "2G", "FD", "4G", "FE", "8G", "FF", "16G",
+        "FG", "32G", "FH", "64G");
+    params = createPlanningParams(all, all, compacting, 2, CompactionKind.USER);
+    plan = planner.makePlan(params);
+    job = Iterables.getOnlyElement(plan.getJobs());
+    assertEquals(all, job.getFiles());
+    assertEquals(CompactionExecutorId.of("huge"), job.getExecutor());
+
+    // For user compaction, can compact a subset that meets the compaction ratio if there is also a
+    // larger set of files the meets the compaction ratio
+    all = createCFs("F1", "3M", "F2", "4M", "F3", "5M", "F4", "6M", "F5", "50M", "F6", "51M", "F7",
+        "52M");
+    params = createPlanningParams(all, all, compacting, 2, CompactionKind.USER);
+    plan = planner.makePlan(params);
+    job = Iterables.getOnlyElement(plan.getJobs());
+    assertEquals(createCFs("F1", "3M", "F2", "4M", "F3", "5M", "F4", "6M"), job.getFiles());
+    assertEquals(CompactionExecutorId.of("small"), job.getExecutor());
+
+    // There is a subset of small files that meets the compaction ratio, but the larger set does not
+    // so compact everything to avoid doing more than logarithmic work
+    all = createCFs("F1", "3M", "F2", "4M", "F3", "5M", "F4", "6M", "F5", "50M");
+    params = createPlanningParams(all, all, compacting, 2, CompactionKind.USER);
+    plan = planner.makePlan(params);
+    job = Iterables.getOnlyElement(plan.getJobs());
+    assertEquals(all, job.getFiles());
+    assertEquals(CompactionExecutorId.of("medium"), job.getExecutor());
+
+  }
+
+  @Test
+  public void testMaxSize() {
+    var planner = createPlanner(false);
+    var all = createCFs("F1", "128M", "F2", "129M", "F3", "130M", "F4", "131M", "F5", "132M");
+    var params = createPlanningParams(all, all, Set.of(), 2, CompactionKind.SYSTEM);
+    var plan = planner.makePlan(params);
+
+    // should only compact files less than max size
+    var job = Iterables.getOnlyElement(plan.getJobs());
+    assertEquals(createCFs("F1", "128M", "F2", "129M", "F3", "130M"), job.getFiles());
+    assertEquals(CompactionExecutorId.of("large"), job.getExecutor());
+
+    // user compaction can exceed the max size
+    params = createPlanningParams(all, all, Set.of(), 2, CompactionKind.USER);
+    plan = planner.makePlan(params);
+    job = Iterables.getOnlyElement(plan.getJobs());
+    assertEquals(all, job.getFiles());
+    assertEquals(CompactionExecutorId.of("large"), job.getExecutor());
+  }
+
+  private CompactionJob createJob(CompactionKind kind, Set<CompactableFile> all,
+      Set<CompactableFile> files) {
+    return new CompactionPlanImpl.BuilderImpl(kind, all, all)
+        .addJob(all.size(), CompactionExecutorId.of("small"), files).build().getJobs().iterator()
+        .next();
+  }
+
+  private static Set<CompactableFile> createCFs(String... namesSizePairs) {
+    Set<CompactableFile> files = new HashSet<>();
+
+    for (int i = 0; i < namesSizePairs.length; i += 2) {
+      String name = namesSizePairs[i];
+      long size = ConfigurationTypeHelper.getFixedMemoryAsBytes(namesSizePairs[i + 1]);
+      try {
+        files.add(CompactableFile
+            .create(new URI("hdfs://fake/accumulo/tables/1/t-0000000z/" + name + ".rf"), size, 0));
+      } catch (URISyntaxException e) {
+        throw new RuntimeException(e);
+      }
+    }
+
+    return files;
+  }
+
+  private static void testFFtC(Set<CompactableFile> expected, double ratio) {
+    testFFtC(expected, expected, ratio, 100);
+  }
+
+  private static void testFFtC(Set<CompactableFile> expected, Set<CompactableFile> files,
+      double ratio) {
+    testFFtC(expected, files, ratio, 100);
+  }
+
+  private static void testFFtC(Set<CompactableFile> expected, Set<CompactableFile> files,
+      double ratio, int maxFiles) {
+    testFFtC(expected, files, ratio, maxFiles, Long.MAX_VALUE);
+  }
+
+  private static void testFFtC(Set<CompactableFile> expected, Set<CompactableFile> files,
+      double ratio, int maxFiles, long maxSize) {
+    var result = DefaultCompactionPlanner.findMapFilesToCompact(files, ratio, maxFiles, maxSize);
+    var expectedNames = expected.stream().map(CompactableFile::getUri).map(URI::getPath)
+        .map(path -> path.split("/")).map(t -> t[t.length - 1]).collect(Collectors.toSet());
+    var resultNames = result.stream().map(CompactableFile::getUri).map(URI::getPath)
+        .map(path -> path.split("/")).map(t -> t[t.length - 1]).collect(Collectors.toSet());
+    assertEquals(expectedNames, resultNames);
+  }
+
+  private static CompactionPlanner.PlanningParameters createPlanningParams(Set<CompactableFile> all,
+      Set<CompactableFile> candidates, Set<CompactionJob> compacting, double ratio,
+      CompactionKind kind) {
+    return new CompactionPlanner.PlanningParameters() {
+
+      @Override
+      public TableId getTableId() {
+        return TableId.of("42");
+      }
+
+      @Override
+      public ServiceEnvironment getServiceEnvironment() {
+        throw new UnsupportedOperationException();
+      }
+
+      @Override
+      public Collection<CompactionJob> getRunningCompactions() {
+        return compacting;
+      }
+
+      @Override
+      public double getRatio() {
+        return ratio;
+      }
+
+      @Override
+      public CompactionKind getKind() {
+        return kind;
+      }
+
+      @Override
+      public Map<String,String> getExecutionHints() {
+        return Map.of();
+      }
+
+      @Override
+      public Collection<CompactableFile> getCandidates() {
+        return candidates;
+      }
+
+      @Override
+      public Collection<CompactableFile> getAll() {
+        return all;
+      }
+
+      @Override
+      public Builder createPlanBuilder() {
+        return new CompactionPlanImpl.BuilderImpl(kind, all, candidates);
+      }
+    };
+  }
+
+  private static DefaultCompactionPlanner createPlanner(boolean withHugeExecutor) {
+    DefaultCompactionPlanner planner = new DefaultCompactionPlanner();
+    Configuration conf = EasyMock.createMock(Configuration.class);
+    EasyMock.expect(conf.isSet(EasyMock.anyString())).andReturn(false).anyTimes();
+
+    ServiceEnvironment senv = EasyMock.createMock(ServiceEnvironment.class);
+    EasyMock.expect(senv.getConfiguration()).andReturn(conf).anyTimes();
+
+    EasyMock.replay(conf, senv);
+
+    StringBuilder execBldr = new StringBuilder("[{'name':'small','maxSize':'32M','numThreads':1},"
+        + "{'name':'medium','maxSize':'128M','numThreads':2},"
+        + "{'name':'large','maxSize':'512M','numThreads':3}");
+
+    if (withHugeExecutor) {
+      execBldr.append(",{'name':'huge','numThreads':4}]");
+    } else {
+      execBldr.append("]");
+    }
+
+    String executors = execBldr.toString().replaceAll("'", "\"");
+
+    planner.init(new CompactionPlanner.InitParameters() {
+
+      @Override
+      public ServiceEnvironment getServiceEnvironment() {
+        return senv;
+      }
+
+      @Override
+      public Map<String,String> getOptions() {
+        return Map.of("executors", executors, "maxOpen", "15");
+      }
+
+      @Override
+      public String getFullyQualifiedOption(String key) {
+        assertEquals("maxOpen", key);
+        return Property.TSERV_COMPACTION_SERVICE_PREFIX.getKey() + "cs1.planner.opts." + key;
+      }
+
+      @Override
+      public ExecutorManager getExecutorManager() {
+        return new ExecutorManager() {
+          @Override
+          public CompactionExecutorId createExecutor(String name, int threads) {
+            switch (name) {
+              case "small":
+                assertEquals(1, threads);
+                break;
+              case "medium":
+                assertEquals(2, threads);
+                break;
+              case "large":
+                assertEquals(3, threads);
+                break;
+              case "huge":
+                assertEquals(4, threads);
+                break;
+              default:
+                fail("Unexpected name " + name);
+                break;
+            }
+            return CompactionExecutorId.of(name);
+          }
+        };
+      }
+    });
+
+    return planner;
+  }
+}
diff --git a/core/src/test/java/org/apache/accumulo/core/util/compaction/CompactionPrioritizerTest.java b/core/src/test/java/org/apache/accumulo/core/util/compaction/CompactionPrioritizerTest.java
new file mode 100644
index 0000000..fcc3d41
--- /dev/null
+++ b/core/src/test/java/org/apache/accumulo/core/util/compaction/CompactionPrioritizerTest.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.accumulo.core.util.compaction;
+
+import static org.junit.Assert.assertEquals;
+
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.spi.compaction.CompactionExecutorId;
+import org.apache.accumulo.core.spi.compaction.CompactionJob;
+import org.apache.accumulo.core.spi.compaction.CompactionKind;
+import org.junit.Test;
+
+public class CompactionPrioritizerTest {
+
+  public CompactionJob createJob(CompactionKind kind, String tablet, int numFiles, int totalFiles) {
+
+    Collection<CompactableFile> files = new ArrayList<>();
+    for (int i = 0; i < numFiles; i++) {
+      files.add(CompactableFile
+          .create(URI.create("hdfs://foonn/accumulo/tables/5/" + tablet + "/" + i + ".rf"), 4, 4));
+    }
+    return new CompactionJobImpl(CompactionJobPrioritizer.createPriority(kind, totalFiles),
+        CompactionExecutorId.of("test"), files, kind, false);
+  }
+
+  @Test
+  public void testCompactionJobComparator() {
+    var j1 = createJob(CompactionKind.USER, "t-009", 10, 20);
+    var j2 = createJob(CompactionKind.USER, "t-010", 11, 25);
+    var j3 = createJob(CompactionKind.USER, "t-011", 11, 20);
+    var j4 = createJob(CompactionKind.SYSTEM, "t-012", 11, 30);
+    var j5 = createJob(CompactionKind.SYSTEM, "t-013", 5, 10);
+    var j6 = createJob(CompactionKind.CHOP, "t-014", 5, 40);
+    var j7 = createJob(CompactionKind.CHOP, "t-015", 5, 7);
+    var j8 = createJob(CompactionKind.SELECTOR, "t-014", 5, 21);
+    var j9 = createJob(CompactionKind.SELECTOR, "t-015", 7, 21);
+
+    var expected = List.of(j2, j3, j1, j6, j7, j9, j8, j4, j5);
+
+    var shuffled = new ArrayList<>(expected);
+    Collections.shuffle(shuffled);
+    Collections.sort(shuffled, CompactionJobPrioritizer.JOB_COMPARATOR);
+
+    assertEquals(expected, shuffled);
+  }
+}
diff --git a/pom.xml b/pom.xml
index aa2c98d..6c5e05c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1597,6 +1597,7 @@
                   <sourceFileInclude>**/org/apache/accumulo/core/data/**/*.java</sourceFileInclude>
                   <sourceFileInclude>**/org/apache/accumulo/core/security/**/*.java</sourceFileInclude>
                   <sourceFileInclude>**/org/apache/accumulo/minicluster/**/*.java</sourceFileInclude>
+                  <sourceFileInclude>**/org/apache/accumulo/core/spi/**/*.java</sourceFileInclude>
                 </sourceFileIncludes>
               </configuration>
             </plugin>
diff --git a/server/base/src/main/java/org/apache/accumulo/server/ServiceEnvironmentImpl.java b/server/base/src/main/java/org/apache/accumulo/server/ServiceEnvironmentImpl.java
index 28732ea..cb69a83 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServiceEnvironmentImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServiceEnvironmentImpl.java
@@ -55,6 +55,16 @@ public class ServiceEnvironmentImpl implements ServiceEnvironment {
     }
 
     @Override
+    public boolean isSet(String key) {
+      Property prop = Property.getPropertyByKey(key);
+      if (prop != null) {
+        return acfg.isPropertySet(prop, false);
+      } else {
+        return acfg.get(key) != null;
+      }
+    }
+
+    @Override
     public String get(String key) {
       // Get prop to check if sensitive, also looking up by prop may be more efficient.
       Property prop = Property.getPropertyByKey(key);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java
index 491421c..3683e7d 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java
@@ -37,6 +37,7 @@ import org.apache.accumulo.core.data.TableId;
 import org.apache.accumulo.core.dataImpl.thrift.IterInfo;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.spi.common.ServiceEnvironment;
+import org.apache.accumulo.core.spi.compaction.CompactionDispatcher;
 import org.apache.accumulo.core.spi.scan.ScanDispatcher;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
@@ -61,6 +62,7 @@ public class TableConfiguration extends AccumuloConfiguration {
   private final EnumMap<IteratorScope,Deriver<ParsedIteratorConfig>> iteratorConfig;
 
   private final Deriver<ScanDispatcher> scanDispatchDeriver;
+  private final Deriver<CompactionDispatcher> compactionDispatchDeriver;
 
   public TableConfiguration(ServerContext context, TableId tableId, NamespaceConfiguration parent) {
     this.context = requireNonNull(context);
@@ -79,6 +81,8 @@ public class TableConfiguration extends AccumuloConfiguration {
     }
 
     scanDispatchDeriver = newDeriver(conf -> createScanDispatcher(conf, context, tableId));
+    compactionDispatchDeriver =
+        newDeriver(conf -> createCompactionDispatcher(conf, context, tableId));
   }
 
   void setZooCacheFactory(ZooCacheFactory zcf) {
@@ -209,15 +213,38 @@ public class TableConfiguration extends AccumuloConfiguration {
     ScanDispatcher newDispatcher = Property.createTableInstanceFromPropertyName(conf,
         Property.TABLE_SCAN_DISPATCHER, ScanDispatcher.class, null);
 
-    var builder = ImmutableMap.<String,String>builder();
-    conf.getAllPropertiesWithPrefix(Property.TABLE_SCAN_DISPATCHER_OPTS).forEach((k, v) -> {
-      String optKey = k.substring(Property.TABLE_SCAN_DISPATCHER_OPTS.getKey().length());
-      builder.put(optKey, v);
+    Map<String,String> opts =
+        conf.getAllPropertiesWithPrefixStripped(Property.TABLE_SCAN_DISPATCHER_OPTS);
+
+    newDispatcher.init(new ScanDispatcher.InitParameters() {
+      @Override
+      public TableId getTableId() {
+        return tableId;
+      }
+
+      @Override
+      public Map<String,String> getOptions() {
+        return opts;
+      }
+
+      @Override
+      public ServiceEnvironment getServiceEnv() {
+        return new ServiceEnvironmentImpl(context);
+      }
     });
 
-    Map<String,String> opts = builder.build();
+    return newDispatcher;
+  }
 
-    newDispatcher.init(new ScanDispatcher.InitParameters() {
+  private static CompactionDispatcher createCompactionDispatcher(AccumuloConfiguration conf,
+      ServerContext context, TableId tableId) {
+    CompactionDispatcher newDispatcher = Property.createTableInstanceFromPropertyName(conf,
+        Property.TABLE_COMPACTION_DISPATCHER, CompactionDispatcher.class, null);
+
+    Map<String,String> opts =
+        conf.getAllPropertiesWithPrefixStripped(Property.TABLE_COMPACTION_DISPATCHER_OPTS);
+
+    newDispatcher.init(new CompactionDispatcher.InitParameters() {
       @Override
       public TableId getTableId() {
         return tableId;
@@ -235,9 +262,14 @@ public class TableConfiguration extends AccumuloConfiguration {
     });
 
     return newDispatcher;
+
   }
 
   public ScanDispatcher getScanDispatcher() {
     return scanDispatchDeriver.derive();
   }
+
+  public CompactionDispatcher getCompactionDispatcher() {
+    return compactionDispatchDeriver.derive();
+  }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 697f56a..3855892 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@ -78,6 +78,7 @@ import org.apache.accumulo.core.replication.ReplicationConstants;
 import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection;
 import org.apache.accumulo.core.replication.ReplicationSchema.WorkSection;
 import org.apache.accumulo.core.replication.ReplicationTable;
+import org.apache.accumulo.core.spi.compaction.SimpleCompactionDispatcher;
 import org.apache.accumulo.core.spi.crypto.CryptoService;
 import org.apache.accumulo.core.util.ColumnFQ;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
@@ -167,46 +168,58 @@ public class Initialize implements KeywordExecutable {
     return zoo;
   }
 
-  private static HashMap<String,String> initialMetadataConf = new HashMap<>();
-  private static HashMap<String,String> initialMetadataCombinerConf = new HashMap<>();
+  // config only for root table
+  private static HashMap<String,String> initialRootConf = new HashMap<>();
+  // config for root and metadata table
+  private static HashMap<String,String> initialRootMetaConf = new HashMap<>();
+  // config for only metadata table
+  private static HashMap<String,String> initialMetaConf = new HashMap<>();
   private static HashMap<String,String> initialReplicationTableConf = new HashMap<>();
 
   static {
-    initialMetadataConf.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "32K");
-    initialMetadataConf.put(Property.TABLE_FILE_REPLICATION.getKey(), "5");
-    initialMetadataConf.put(Property.TABLE_DURABILITY.getKey(), "sync");
-    initialMetadataConf.put(Property.TABLE_MAJC_RATIO.getKey(), "1");
-    initialMetadataConf.put(Property.TABLE_SPLIT_THRESHOLD.getKey(), "64M");
-    initialMetadataConf.put(Property.TABLE_CONSTRAINT_PREFIX.getKey() + "1",
+    initialRootConf.put(Property.TABLE_COMPACTION_DISPATCHER.getKey(),
+        SimpleCompactionDispatcher.class.getName());
+    initialRootConf.put(Property.TABLE_COMPACTION_DISPATCHER_OPTS.getKey() + "service", "root");
+
+    initialRootMetaConf.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "32K");
+    initialRootMetaConf.put(Property.TABLE_FILE_REPLICATION.getKey(), "5");
+    initialRootMetaConf.put(Property.TABLE_DURABILITY.getKey(), "sync");
+    initialRootMetaConf.put(Property.TABLE_MAJC_RATIO.getKey(), "1");
+    initialRootMetaConf.put(Property.TABLE_SPLIT_THRESHOLD.getKey(), "64M");
+    initialRootMetaConf.put(Property.TABLE_CONSTRAINT_PREFIX.getKey() + "1",
         MetadataConstraints.class.getName());
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers",
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers",
         "10," + VersioningIterator.class.getName());
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers.opt.maxVersions",
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers.opt.maxVersions",
         "1");
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers",
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers",
         "10," + VersioningIterator.class.getName());
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers.opt.maxVersions",
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers.opt.maxVersions",
         "1");
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers",
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers",
         "10," + VersioningIterator.class.getName());
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers.opt.maxVersions",
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers.opt.maxVersions",
         "1");
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.bulkLoadFilter",
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.bulkLoadFilter",
         "20," + MetadataBulkLoadFilter.class.getName());
-    initialMetadataConf.put(Property.TABLE_FAILURES_IGNORE.getKey(), "false");
-    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "tablet",
+    initialRootMetaConf.put(Property.TABLE_FAILURES_IGNORE.getKey(), "false");
+    initialRootMetaConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "tablet",
         String.format("%s,%s", TabletColumnFamily.NAME, CurrentLocationColumnFamily.NAME));
-    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "server",
+    initialRootMetaConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "server",
         String.format("%s,%s,%s,%s", DataFileColumnFamily.NAME, LogColumnFamily.NAME,
             ServerColumnFamily.NAME, FutureLocationColumnFamily.NAME));
-    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "tablet,server");
-    initialMetadataConf.put(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "");
-    initialMetadataConf.put(Property.TABLE_INDEXCACHE_ENABLED.getKey(), "true");
-    initialMetadataConf.put(Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "true");
+    initialRootMetaConf.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "tablet,server");
+    initialRootMetaConf.put(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "");
+    initialRootMetaConf.put(Property.TABLE_INDEXCACHE_ENABLED.getKey(), "true");
+    initialRootMetaConf.put(Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "true");
+
+    initialMetaConf.put(Property.TABLE_COMPACTION_DISPATCHER.getKey(),
+        SimpleCompactionDispatcher.class.getName());
+    initialMetaConf.put(Property.TABLE_COMPACTION_DISPATCHER_OPTS.getKey() + "service", "meta");
 
     // ACCUMULO-3077 Set the combiner on accumulo.metadata during init to reduce the likelihood of a
-    // race
-    // condition where a tserver compacts away Status updates because it didn't see the Combiner
+    // race condition where a tserver compacts away Status updates because it didn't see the
+    // Combiner
     // configured
     IteratorSetting setting =
         new IteratorSetting(9, ReplicationTableUtil.COMBINER_NAME, StatusCombiner.class);
@@ -215,10 +228,9 @@ public class Initialize implements KeywordExecutable {
       String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
           scope.name().toLowerCase(), setting.getName());
       for (Entry<String,String> prop : setting.getOptions().entrySet()) {
-        initialMetadataCombinerConf.put(root + ".opt." + prop.getKey(), prop.getValue());
+        initialMetaConf.put(root + ".opt." + prop.getKey(), prop.getValue());
       }
-      initialMetadataCombinerConf.put(root,
-          setting.getPriority() + "," + setting.getIteratorClass());
+      initialMetaConf.put(root, setting.getPriority() + "," + setting.getIteratorClass());
     }
 
     // add combiners to replication table
@@ -832,7 +844,15 @@ public class Initialize implements KeywordExecutable {
       if (min > 5) {
         setMetadataReplication(min, "min");
       }
-      for (Entry<String,String> entry : initialMetadataConf.entrySet()) {
+
+      for (Entry<String,String> entry : initialRootConf.entrySet()) {
+        if (!TablePropUtil.setTableProperty(zoo, zooKeeperRoot, RootTable.ID, entry.getKey(),
+            entry.getValue())) {
+          throw new IOException("Cannot create per-table property " + entry.getKey());
+        }
+      }
+
+      for (Entry<String,String> entry : initialRootMetaConf.entrySet()) {
         if (!TablePropUtil.setTableProperty(zoo, zooKeeperRoot, RootTable.ID, entry.getKey(),
             entry.getValue())) {
           throw new IOException("Cannot create per-table property " + entry.getKey());
@@ -842,8 +862,8 @@ public class Initialize implements KeywordExecutable {
           throw new IOException("Cannot create per-table property " + entry.getKey());
         }
       }
-      // Only add combiner config to accumulo.metadata table (ACCUMULO-3077)
-      for (Entry<String,String> entry : initialMetadataCombinerConf.entrySet()) {
+
+      for (Entry<String,String> entry : initialMetaConf.entrySet()) {
         if (!TablePropUtil.setTableProperty(zoo, zooKeeperRoot, MetadataTable.ID, entry.getKey(),
             entry.getValue())) {
           throw new IOException("Cannot create per-table property " + entry.getKey());
@@ -874,7 +894,7 @@ public class Initialize implements KeywordExecutable {
       // Lets make sure it's a number
       Integer.parseInt(rep);
     }
-    initialMetadataConf.put(Property.TABLE_FILE_REPLICATION.getKey(), rep);
+    initialRootMetaConf.put(Property.TABLE_FILE_REPLICATION.getKey(), rep);
   }
 
   public static boolean isInitialized(VolumeManager fs, SiteConfiguration siteConfig,
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/tableOps/UserCompactionConfig.java b/server/base/src/main/java/org/apache/accumulo/server/master/tableOps/UserCompactionConfig.java
deleted file mode 100644
index 0133508..0000000
--- a/server/base/src/main/java/org/apache/accumulo/server/master/tableOps/UserCompactionConfig.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.server.master.tableOps;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
-import org.apache.accumulo.core.clientImpl.CompactionStrategyConfigUtil;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-
-public class UserCompactionConfig implements Writable {
-  byte[] startRow;
-  byte[] endRow;
-  List<IteratorSetting> iterators;
-  private CompactionStrategyConfig compactionStrategy;
-
-  public UserCompactionConfig(byte[] startRow, byte[] endRow, List<IteratorSetting> iterators,
-      CompactionStrategyConfig csc) {
-    this.startRow = startRow;
-    this.endRow = endRow;
-    this.iterators = iterators;
-    this.compactionStrategy = csc;
-  }
-
-  public UserCompactionConfig() {
-    startRow = null;
-    endRow = null;
-    iterators = Collections.emptyList();
-    compactionStrategy = CompactionStrategyConfigUtil.DEFAULT_STRATEGY;
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    out.writeBoolean(startRow != null);
-    if (startRow != null) {
-      out.writeInt(startRow.length);
-      out.write(startRow);
-    }
-
-    out.writeBoolean(endRow != null);
-    if (endRow != null) {
-      out.writeInt(endRow.length);
-      out.write(endRow);
-    }
-
-    out.writeInt(iterators.size());
-    for (IteratorSetting is : iterators) {
-      is.write(out);
-    }
-
-    CompactionStrategyConfigUtil.encode(out, compactionStrategy);
-
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    if (in.readBoolean()) {
-      startRow = new byte[in.readInt()];
-      in.readFully(startRow);
-    } else {
-      startRow = null;
-    }
-
-    if (in.readBoolean()) {
-      endRow = new byte[in.readInt()];
-      in.readFully(endRow);
-    } else {
-      endRow = null;
-    }
-
-    int num = in.readInt();
-    iterators = new ArrayList<>(num);
-
-    for (int i = 0; i < num; i++) {
-      iterators.add(new IteratorSetting(in));
-    }
-
-    compactionStrategy = CompactionStrategyConfigUtil.decode(in);
-  }
-
-  public Text getEndRow() {
-    if (endRow == null)
-      return null;
-    return new Text(endRow);
-  }
-
-  public Text getStartRow() {
-    if (startRow == null)
-      return null;
-    return new Text(startRow);
-  }
-
-  public List<IteratorSetting> getIterators() {
-    return iterators;
-  }
-
-  public CompactionStrategyConfig getCompactionStrategy() {
-    return compactionStrategy;
-  }
-}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
index 1ef071a..976e909 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
@@ -217,8 +217,7 @@ public class MasterMetadataUtil {
    *
    */
   public static StoredTabletFile updateTabletDataFile(ServerContext context, KeyExtent extent,
-      TabletFile path, StoredTabletFile mergeFile, DataFileValue dfv, MetadataTime time,
-      Set<StoredTabletFile> filesInUseByScans, String address, ZooLock zooLock,
+      TabletFile path, DataFileValue dfv, MetadataTime time, String address, ZooLock zooLock,
       Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) {
 
     TabletMutator tablet = context.getAmple().mutateTablet(extent);
@@ -239,12 +238,7 @@ public class MasterMetadataUtil {
     }
     tablet.putFlushId(flushId);
 
-    if (mergeFile != null) {
-      tablet.deleteFile(mergeFile);
-    }
-
     unusedWalLogs.forEach(tablet::deleteWal);
-    filesInUseByScans.forEach(tablet::putScan);
 
     tablet.putZooLock(zooLock);
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java b/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java
index 9b4bd36..327e752 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java
@@ -39,16 +39,15 @@ import java.util.Set;
 import java.util.stream.Collectors;
 
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.NamespaceNotFoundException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
+import org.apache.accumulo.core.client.admin.CompactionConfig;
 import org.apache.accumulo.core.client.admin.InitialTableState;
 import org.apache.accumulo.core.client.admin.TimeType;
-import org.apache.accumulo.core.clientImpl.CompactionStrategyConfigUtil;
 import org.apache.accumulo.core.clientImpl.Namespaces;
 import org.apache.accumulo.core.clientImpl.TableOperationsImpl;
 import org.apache.accumulo.core.clientImpl.Tables;
+import org.apache.accumulo.core.clientImpl.UserCompactionUtils;
 import org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode;
 import org.apache.accumulo.core.clientImpl.thrift.TableOperation;
 import org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType;
@@ -56,7 +55,6 @@ import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
 import org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException;
 import org.apache.accumulo.core.data.NamespaceId;
 import org.apache.accumulo.core.data.TableId;
-import org.apache.accumulo.core.iteratorsImpl.system.SystemIteratorUtil;
 import org.apache.accumulo.core.master.thrift.BulkImportState;
 import org.apache.accumulo.core.master.thrift.FateOperation;
 import org.apache.accumulo.core.master.thrift.FateService;
@@ -467,14 +465,10 @@ class FateServiceHandler implements FateService.Iface {
       }
       case TABLE_COMPACT: {
         TableOperation tableOp = TableOperation.COMPACT;
-        validateArgumentCount(arguments, tableOp, 5);
+        validateArgumentCount(arguments, tableOp, 2);
         TableId tableId = validateTableIdArgument(arguments.get(0), tableOp, null);
-        byte[] startRow = ByteBufferUtil.toBytes(arguments.get(1));
-        byte[] endRow = ByteBufferUtil.toBytes(arguments.get(2));
-        List<IteratorSetting> iterators =
-            SystemIteratorUtil.decodeIteratorSettings(ByteBufferUtil.toBytes(arguments.get(3)));
-        CompactionStrategyConfig compactionStrategy =
-            CompactionStrategyConfigUtil.decode(ByteBufferUtil.toBytes(arguments.get(4)));
+        CompactionConfig compactionConfig =
+            UserCompactionUtils.decodeCompactionConfig(ByteBufferUtil.toBytes(arguments.get(1)));
         NamespaceId namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
 
         final boolean canCompact;
@@ -488,8 +482,8 @@ class FateServiceHandler implements FateService.Iface {
         if (!canCompact)
           throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
 
-        master.fate.seedTransaction(opid, new TraceRepo<>(new CompactRange(namespaceId, tableId,
-            startRow, endRow, iterators, compactionStrategy)), autoCleanup);
+        master.fate.seedTransaction(opid,
+            new TraceRepo<>(new CompactRange(namespaceId, tableId, compactionConfig)), autoCleanup);
         break;
       }
       case TABLE_CANCEL_COMPACT: {
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/compact/CompactRange.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/compact/CompactRange.java
index 96934ea..9d9ff54 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/compact/CompactRange.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/compact/CompactRange.java
@@ -20,28 +20,27 @@ package org.apache.accumulo.master.tableOps.compact;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static java.util.Objects.requireNonNull;
+import static org.apache.accumulo.core.clientImpl.UserCompactionUtils.isDefault;
 
-import java.util.List;
+import java.util.Optional;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
+import org.apache.accumulo.core.client.admin.CompactionConfig;
 import org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException;
 import org.apache.accumulo.core.clientImpl.CompactionStrategyConfigUtil;
+import org.apache.accumulo.core.clientImpl.UserCompactionUtils;
 import org.apache.accumulo.core.clientImpl.thrift.TableOperation;
 import org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType;
 import org.apache.accumulo.core.data.NamespaceId;
 import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.ZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooReaderWriter.Mutator;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.master.tableOps.MasterRepo;
 import org.apache.accumulo.master.tableOps.Utils;
-import org.apache.accumulo.server.master.tableOps.UserCompactionConfig;
 import org.apache.commons.codec.binary.Hex;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableUtils;
 import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -56,34 +55,37 @@ public class CompactRange extends MasterRepo {
   private byte[] endRow;
   private byte[] config;
 
-  public CompactRange(NamespaceId namespaceId, TableId tableId, byte[] startRow, byte[] endRow,
-      List<IteratorSetting> iterators, CompactionStrategyConfig compactionStrategy)
+  public CompactRange(NamespaceId namespaceId, TableId tableId, CompactionConfig compactionConfig)
       throws AcceptableThriftTableOperationException {
 
     requireNonNull(namespaceId, "Invalid argument: null namespaceId");
     requireNonNull(tableId, "Invalid argument: null tableId");
-    requireNonNull(iterators, "Invalid argument: null iterator list");
-    requireNonNull(compactionStrategy, "Invalid argument: null compactionStrategy");
+    requireNonNull(compactionConfig, "Invalid argument: null compaction config");
 
     this.tableId = tableId;
     this.namespaceId = namespaceId;
-    this.startRow = startRow.length == 0 ? null : startRow;
-    this.endRow = endRow.length == 0 ? null : endRow;
 
-    if (!iterators.isEmpty()
-        || !compactionStrategy.equals(CompactionStrategyConfigUtil.DEFAULT_STRATEGY)) {
-      this.config = WritableUtils.toByteArray(
-          new UserCompactionConfig(this.startRow, this.endRow, iterators, compactionStrategy));
+    if (!compactionConfig.getIterators().isEmpty()
+        || !CompactionStrategyConfigUtil.isDefault(compactionConfig)
+        || !compactionConfig.getExecutionHints().isEmpty()
+        || !isDefault(compactionConfig.getConfigurer())
+        || !isDefault(compactionConfig.getSelector())) {
+      this.config = UserCompactionUtils.encode(compactionConfig);
     } else {
       log.debug(
           "Using default compaction strategy. No user iterators or compaction strategy provided.");
     }
 
-    if (this.startRow != null && this.endRow != null
-        && new Text(startRow).compareTo(new Text(endRow)) >= 0)
+    if (compactionConfig.getStartRow() != null && compactionConfig.getEndRow() != null
+        && compactionConfig.getStartRow().compareTo(compactionConfig.getEndRow()) >= 0)
       throw new AcceptableThriftTableOperationException(tableId.canonical(), null,
           TableOperation.COMPACT, TableOperationExceptionType.BAD_RANGE,
           "start row must be less than end row");
+
+    this.startRow =
+        Optional.ofNullable(compactionConfig.getStartRow()).map(TextUtil::getBytes).orElse(null);
+    this.endRow =
+        Optional.ofNullable(compactionConfig.getEndRow()).map(TextUtil::getBytes).orElse(null);
   }
 
   @Override
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/compact/cancel/CancelCompactions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/compact/cancel/CancelCompactions.java
index 93499f8..a2e8050 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/compact/cancel/CancelCompactions.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/compact/cancel/CancelCompactions.java
@@ -24,12 +24,15 @@ import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.clientImpl.thrift.TableOperation;
 import org.apache.accumulo.core.data.NamespaceId;
 import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.fate.FateTxId;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.ZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooReaderWriter.Mutator;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.master.tableOps.MasterRepo;
 import org.apache.accumulo.master.tableOps.Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class CancelCompactions extends MasterRepo {
 
@@ -37,6 +40,8 @@ public class CancelCompactions extends MasterRepo {
   private TableId tableId;
   private NamespaceId namespaceId;
 
+  private static final Logger log = LoggerFactory.getLogger(CancelCompactions.class);
+
   public CancelCompactions(NamespaceId namespaceId, TableId tableId) {
     this.tableId = tableId;
     this.namespaceId = namespaceId;
@@ -68,10 +73,15 @@ public class CancelCompactions extends MasterRepo {
       public byte[] mutate(byte[] currentValue) {
         long cid = Long.parseLong(new String(currentValue, UTF_8));
 
-        if (cid < flushID)
+        if (cid < flushID) {
+          log.debug("{} setting cancel compaction id to {} for {}", FateTxId.formatTid(tid),
+              flushID, tableId);
           return Long.toString(flushID).getBytes(UTF_8);
-        else
+        } else {
+          log.debug("{} leaving cancel compaction id as {} for {}", FateTxId.formatTid(tid), cid,
+              tableId);
           return Long.toString(cid).getBytes(UTF_8);
+        }
       }
     });
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/upgrade/Upgrader9to10.java b/server/master/src/main/java/org/apache/accumulo/master/upgrade/Upgrader9to10.java
index fdc2015..7367b70 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/upgrade/Upgrader9to10.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/upgrade/Upgrader9to10.java
@@ -54,6 +54,7 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.dataImpl.KeyExtent;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVIterator;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.metadata.TabletFile;
 import org.apache.accumulo.core.metadata.schema.Ample;
@@ -63,6 +64,7 @@ import org.apache.accumulo.core.metadata.schema.MetadataTime;
 import org.apache.accumulo.core.metadata.schema.RootTabletMetadata;
 import org.apache.accumulo.core.metadata.schema.TabletMetadata.LocationType;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.spi.compaction.SimpleCompactionDispatcher;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
 import org.apache.accumulo.core.util.HostAndPort;
 import org.apache.accumulo.fate.zookeeper.ZooReaderWriter;
@@ -76,6 +78,7 @@ import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.metadata.RootGcCandidates;
 import org.apache.accumulo.server.metadata.ServerAmpleImpl;
 import org.apache.accumulo.server.metadata.TabletMutatorBase;
+import org.apache.accumulo.server.util.TablePropUtil;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -108,6 +111,7 @@ public class Upgrader9to10 implements Upgrader {
 
   @Override
   public void upgradeZookeeper(ServerContext ctx) {
+    setMetaTableProps(ctx);
     upgradeRootTabletMetadata(ctx);
   }
 
@@ -125,6 +129,24 @@ public class Upgrader9to10 implements Upgrader {
     upgradeFileDeletes(ctx, Ample.DataLevel.USER);
   }
 
+  private void setMetaTableProps(ServerContext ctx) {
+    try {
+      TablePropUtil.setTableProperty(ctx, RootTable.ID,
+          Property.TABLE_COMPACTION_DISPATCHER.getKey(),
+          SimpleCompactionDispatcher.class.getName());
+      TablePropUtil.setTableProperty(ctx, RootTable.ID,
+          Property.TABLE_COMPACTION_DISPATCHER_OPTS.getKey() + "service", "root");
+
+      TablePropUtil.setTableProperty(ctx, MetadataTable.ID,
+          Property.TABLE_COMPACTION_DISPATCHER.getKey(),
+          SimpleCompactionDispatcher.class.getName());
+      TablePropUtil.setTableProperty(ctx, MetadataTable.ID,
+          Property.TABLE_COMPACTION_DISPATCHER_OPTS.getKey() + "service", "meta");
+    } catch (KeeperException | InterruptedException e) {
+      throw new RuntimeException("Unable to set system table properties", e);
+    }
+  }
+
   private void upgradeRootTabletMetadata(ServerContext ctx) {
     String rootMetaSer = getFromZK(ctx, ZROOT_TABLET);
 
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletIteratorEnvironment.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletIteratorEnvironment.java
index de58ddd..47e35fa 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletIteratorEnvironment.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletIteratorEnvironment.java
@@ -38,11 +38,11 @@ import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.spi.common.ServiceEnvironment;
+import org.apache.accumulo.core.spi.compaction.CompactionKind;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.ServiceEnvironmentImpl;
 import org.apache.accumulo.server.iterators.SystemIteratorEnvironment;
 import org.apache.accumulo.tserver.FileManager.ScanFileManager;
-import org.apache.accumulo.tserver.compaction.MajorCompactionReason;
 import org.apache.hadoop.fs.Path;
 
 public class TabletIteratorEnvironment implements SystemIteratorEnvironment {
@@ -107,7 +107,7 @@ public class TabletIteratorEnvironment implements SystemIteratorEnvironment {
   }
 
   public TabletIteratorEnvironment(ServerContext context, IteratorScope scope, boolean fullMajC,
-      AccumuloConfiguration tableConfig, TableId tableId, MajorCompactionReason reason) {
+      AccumuloConfiguration tableConfig, TableId tableId, CompactionKind kind) {
     if (scope != IteratorScope.majc)
       throw new IllegalArgumentException(
           "Tried to set maj compaction type when scope was " + scope);
@@ -119,7 +119,7 @@ public class TabletIteratorEnvironment implements SystemIteratorEnvironment {
     this.tableConfig = tableConfig;
     this.tableId = tableId;
     this.fullMajorCompaction = fullMajC;
-    this.userCompaction = reason.equals(MajorCompactionReason.USER);
+    this.userCompaction = kind.equals(CompactionKind.USER);
     this.authorizations = Authorizations.EMPTY;
     this.topLevelIterators = new ArrayList<>();
   }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
index 3681859..9959278 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
@@ -134,7 +134,8 @@ import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
 import org.apache.accumulo.start.classloader.vfs.ContextManager;
 import org.apache.accumulo.tserver.TabletServerResourceManager.TabletResourceManager;
 import org.apache.accumulo.tserver.TabletStatsKeeper.Operation;
-import org.apache.accumulo.tserver.compaction.MajorCompactionReason;
+import org.apache.accumulo.tserver.compactions.Compactable;
+import org.apache.accumulo.tserver.compactions.CompactionManager;
 import org.apache.accumulo.tserver.log.DfsLogger;
 import org.apache.accumulo.tserver.log.LogSorter;
 import org.apache.accumulo.tserver.log.MutationReceiver;
@@ -168,6 +169,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Iterators;
 
 public class TabletServer extends AbstractServer {
 
@@ -379,6 +381,7 @@ public class TabletServer extends AbstractServer {
   private final ReentrantLock recoveryLock = new ReentrantLock(true);
   private ThriftClientHandler clientHandler;
   private final ServerBulkImportStatus bulkImportStatus = new ServerBulkImportStatus();
+  private CompactionManager compactionManager;
 
   String getLockID() {
     return lockID;
@@ -446,10 +449,6 @@ public class TabletServer extends AbstractServer {
             }
 
             tablet.checkIfMinorCompactionNeededForLogs(closedCopy);
-
-            synchronized (tablet) {
-              tablet.initiateMajorCompaction(MajorCompactionReason.NORMAL);
-            }
           }
         } catch (Throwable t) {
           log.error("Unexpected exception in {}", Thread.currentThread().getName(), t);
@@ -461,14 +460,7 @@ public class TabletServer extends AbstractServer {
 
   private void splitTablet(Tablet tablet) {
     try {
-
-      TreeMap<KeyExtent,TabletData> tabletInfo = splitTablet(tablet, null);
-      if (tabletInfo == null) {
-        // either split or compact not both
-        // were not able to split... so see if a major compaction is
-        // needed
-        tablet.initiateMajorCompaction(MajorCompactionReason.NORMAL);
-      }
+      splitTablet(tablet, null);
     } catch (IOException e) {
       statsKeeper.updateTime(Operation.SPLIT, 0, true);
       log.error("split failed: {} for tablet {}", e.getMessage(), tablet.getExtent(), e);
@@ -733,6 +725,15 @@ public class TabletServer extends AbstractServer {
       }
     }
 
+    this.compactionManager = new CompactionManager(new Iterable<Compactable>() {
+      @Override
+      public Iterator<Compactable> iterator() {
+        return Iterators.transform(onlineTablets.snapshot().values().iterator(),
+            Tablet::asCompactable);
+      }
+    }, getContext(), this.resourceManager);
+    compactionManager.start();
+
     try {
       clientAddress = startTabletClientService();
     } catch (UnknownHostException e1) {
@@ -1069,12 +1070,15 @@ public class TabletServer extends AbstractServer {
       if (tablet.isMinorCompactionQueued()) {
         table.minors.queued++;
       }
+
       if (tablet.isMajorCompactionRunning()) {
         table.majors.running++;
       }
+
       if (tablet.isMajorCompactionQueued()) {
         table.majors.queued++;
       }
+
     });
 
     scanCounts.forEach((tableId, mapCounter) -> {
@@ -1375,4 +1379,8 @@ public class TabletServer extends AbstractServer {
   public final RateLimiter getMajorCompactionWriteLimiter() {
     return SharedRateLimiterFactory.getInstance().create(MAJC_WRITE_LIMITER_KEY, rateProvider);
   }
+
+  public CompactionManager getCompactionManager() {
+    return compactionManager;
+  }
 }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
index b84f07d..609c95e 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
@@ -32,7 +32,6 @@ import java.util.Map.Entry;
 import java.util.Optional;
 import java.util.OptionalInt;
 import java.util.Queue;
-import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
@@ -54,12 +53,11 @@ import org.apache.accumulo.core.dataImpl.KeyExtent;
 import org.apache.accumulo.core.file.blockfile.cache.impl.BlockCacheConfiguration;
 import org.apache.accumulo.core.file.blockfile.cache.impl.BlockCacheManagerFactory;
 import org.apache.accumulo.core.file.blockfile.impl.ScanCacheProvider;
-import org.apache.accumulo.core.metadata.StoredTabletFile;
-import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.apache.accumulo.core.spi.cache.BlockCache;
 import org.apache.accumulo.core.spi.cache.BlockCacheManager;
 import org.apache.accumulo.core.spi.cache.CacheType;
 import org.apache.accumulo.core.spi.common.ServiceEnvironment;
+import org.apache.accumulo.core.spi.compaction.CompactionExecutorId;
 import org.apache.accumulo.core.spi.scan.ScanDirectives;
 import org.apache.accumulo.core.spi.scan.ScanDispatcher;
 import org.apache.accumulo.core.spi.scan.ScanDispatcher.DispatchParameters;
@@ -78,10 +76,6 @@ import org.apache.accumulo.server.tabletserver.MemoryManager;
 import org.apache.accumulo.server.tabletserver.TabletState;
 import org.apache.accumulo.server.util.time.SimpleTimer;
 import org.apache.accumulo.tserver.FileManager.ScanFileManager;
-import org.apache.accumulo.tserver.compaction.CompactionStrategy;
-import org.apache.accumulo.tserver.compaction.DefaultCompactionStrategy;
-import org.apache.accumulo.tserver.compaction.MajorCompactionReason;
-import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
 import org.apache.accumulo.tserver.session.ScanSession;
 import org.apache.accumulo.tserver.tablet.Tablet;
 import org.apache.htrace.wrappers.TraceExecutorService;
@@ -104,9 +98,6 @@ public class TabletServerResourceManager {
   private static final Logger log = LoggerFactory.getLogger(TabletServerResourceManager.class);
 
   private final ExecutorService minorCompactionThreadPool;
-  private final ExecutorService majorCompactionThreadPool;
-  private final ExecutorService rootMajorCompactionThreadPool;
-  private final ExecutorService defaultMajorCompactionThreadPool;
   private final ExecutorService splitThreadPool;
   private final ExecutorService defaultSplitThreadPool;
   private final ExecutorService defaultMigrationPool;
@@ -328,6 +319,15 @@ public class TabletServerResourceManager {
     return builder.build();
   }
 
+  public ExecutorService createCompactionExecutor(CompactionExecutorId ceid, int numThreads,
+      BlockingQueue<Runnable> queue) {
+    String name = "compaction." + ceid;
+    ThreadPoolExecutor tp = new ThreadPoolExecutor(numThreads, numThreads, 60, TimeUnit.SECONDS,
+        queue, new NamingThreadFactory("compaction." + ceid));
+    tp.allowCoreThreadTimeOut(true);
+    return addEs(name, tp);
+  }
+
   @SuppressFBWarnings(value = "DM_GC",
       justification = "GC is run to get a good estimate of memory availability")
   public TabletServerResourceManager(ServerContext context) {
@@ -384,13 +384,6 @@ public class TabletServerResourceManager {
 
     minorCompactionThreadPool = createEs(Property.TSERV_MINC_MAXCONCURRENT, "minor compactor");
 
-    // make this thread pool have a priority queue... and execute tablets with the most
-    // files first!
-    majorCompactionThreadPool = createEs(Property.TSERV_MAJC_MAXCONCURRENT, "major compactor",
-        new CompactionQueue().asBlockingQueueOfRunnable());
-    rootMajorCompactionThreadPool = createEs(300, "md root major compactor");
-    defaultMajorCompactionThreadPool = createEs(300, "md major compactor");
-
     splitThreadPool = createEs();
     defaultSplitThreadPool = createEs(60, "md splitter");
 
@@ -733,8 +726,6 @@ public class TabletServerResourceManager {
 
   public class TabletResourceManager {
 
-    private final long creationTime = System.currentTimeMillis();
-
     private volatile boolean openFilesReserved = false;
 
     private volatile boolean closed = false;
@@ -820,52 +811,6 @@ public class TabletServerResourceManager {
 
     // END methods that Tablets call to manage memory
 
-    // BEGIN methods that Tablets call to make decisions about major compaction
-    // when too many files are open, we may want tablets to compact down
-    // to one map file
-    public boolean needsMajorCompaction(SortedMap<StoredTabletFile,DataFileValue> tabletFiles,
-        MajorCompactionReason reason) {
-      if (closed) {
-        return false;// throw new IOException("closed");
-      }
-
-      // int threshold;
-
-      if (reason == MajorCompactionReason.USER) {
-        return true;
-      }
-
-      if (reason == MajorCompactionReason.IDLE) {
-        // threshold = 1;
-        long idleTime;
-        if (lastReportedCommitTime == 0) {
-          // no commits, so compute how long the tablet has been assigned to the
-          // tablet server
-          idleTime = System.currentTimeMillis() - creationTime;
-        } else {
-          idleTime = System.currentTimeMillis() - lastReportedCommitTime;
-        }
-
-        if (idleTime < tableConf.getTimeInMillis(Property.TABLE_MAJC_COMPACTALL_IDLETIME)) {
-          return false;
-        }
-      }
-      CompactionStrategy strategy = Property.createTableInstanceFromPropertyName(tableConf,
-          Property.TABLE_COMPACTION_STRATEGY, CompactionStrategy.class,
-          new DefaultCompactionStrategy());
-      strategy.init(Property.getCompactionStrategyOptions(tableConf));
-      MajorCompactionRequest request =
-          new MajorCompactionRequest(extent, reason, tableConf, context);
-      request.setFiles(tabletFiles);
-      try {
-        return strategy.shouldCompact(request);
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-
-    // END methods that Tablets call to make decisions about major compaction
-
     // tablets call this method to run minor compactions,
     // this allows us to control how many minor compactions
     // run concurrently in a tablet server
@@ -895,11 +840,6 @@ public class TabletServerResourceManager {
     public TabletServerResourceManager getTabletServerResourceManager() {
       return TabletServerResourceManager.this;
     }
-
-    public void executeMajorCompaction(KeyExtent tablet, Runnable compactionTask) {
-      TabletServerResourceManager.this.executeMajorCompaction(tablet, compactionTask);
-    }
-
   }
 
   public void executeSplit(KeyExtent tablet, Runnable splitTask) {
@@ -914,16 +854,6 @@ public class TabletServerResourceManager {
     }
   }
 
-  public void executeMajorCompaction(KeyExtent tablet, Runnable compactionTask) {
-    if (tablet.isRootTablet()) {
-      rootMajorCompactionThreadPool.execute(compactionTask);
-    } else if (tablet.isMeta()) {
-      defaultMajorCompactionThreadPool.execute(compactionTask);
-    } else {
-      majorCompactionThreadPool.execute(compactionTask);
-    }
-  }
-
   @SuppressWarnings("deprecation")
   private static abstract class DispatchParamsImpl implements DispatchParameters,
       org.apache.accumulo.core.spi.scan.ScanDispatcher.DispatchParmaters {
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/ThriftClientHandler.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/ThriftClientHandler.java
index de18ded..b600e65 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/ThriftClientHandler.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/ThriftClientHandler.java
@@ -45,6 +45,7 @@ import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Durability;
 import org.apache.accumulo.core.client.SampleNotPresentException;
 import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.CompactionConfig;
 import org.apache.accumulo.core.clientImpl.CompressedIterators;
 import org.apache.accumulo.core.clientImpl.DurabilityImpl;
 import org.apache.accumulo.core.clientImpl.Tables;
@@ -120,7 +121,6 @@ import org.apache.accumulo.fate.zookeeper.ZooUtil;
 import org.apache.accumulo.server.client.ClientServiceHandler;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.data.ServerMutation;
-import org.apache.accumulo.server.master.tableOps.UserCompactionConfig;
 import org.apache.accumulo.server.rpc.TServerUtils;
 import org.apache.accumulo.server.util.Halt;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher;
@@ -1605,30 +1605,23 @@ class ThriftClientHandler extends ClientServiceHandler implements TabletClientSe
     KeyExtent ke = new KeyExtent(TableId.of(tableId), ByteBufferUtil.toText(endRow),
         ByteBufferUtil.toText(startRow));
 
-    ArrayList<Tablet> tabletsToCompact = new ArrayList<>();
+    Pair<Long,CompactionConfig> compactionInfo = null;
 
     for (Tablet tablet : server.getOnlineTablets().values()) {
       if (ke.overlaps(tablet.getExtent())) {
-        tabletsToCompact.add(tablet);
-      }
-    }
-
-    Pair<Long,UserCompactionConfig> compactionInfo = null;
-
-    for (Tablet tablet : tabletsToCompact) {
-      // all for the same table id, so only need to read
-      // compaction id once
-      if (compactionInfo == null) {
-        try {
-          compactionInfo = tablet.getCompactionID();
-        } catch (NoNodeException e) {
-          log.info("Asked to compact table with no compaction id {} {}", ke, e.getMessage());
-          return;
+        // all for the same table id, so only need to read
+        // compaction id once
+        if (compactionInfo == null) {
+          try {
+            compactionInfo = tablet.getCompactionID();
+          } catch (NoNodeException e) {
+            log.info("Asked to compact table with no compaction id {} {}", ke, e.getMessage());
+            return;
+          }
         }
+        tablet.compactAll(compactionInfo.getFirst(), compactionInfo.getSecond());
       }
-      tablet.compactAll(compactionInfo.getFirst(), compactionInfo.getSecond());
     }
-
   }
 
   @Override
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/CompactionPlan.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/CompactionPlan.java
index 8983310..c270644 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/CompactionPlan.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/CompactionPlan.java
@@ -33,6 +33,7 @@ import com.google.common.collect.Sets;
  * A plan for a compaction: the input files, the files that are *not* inputs to a compaction that
  * should simply be deleted, and the optional parameters used to create the resulting output file.
  */
+@Deprecated(forRemoval = true, since = "2.1.0")
 public class CompactionPlan {
   public final List<StoredTabletFile> inputFiles = new ArrayList<>();
   public final List<StoredTabletFile> deleteFiles = new ArrayList<>();
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/CompactionStrategy.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/CompactionStrategy.java
index b2430f0..d67dd5d 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/CompactionStrategy.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/CompactionStrategy.java
@@ -21,6 +21,10 @@ package org.apache.accumulo.tserver.compaction;
 import java.io.IOException;
 import java.util.Map;
 
+import org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer;
+import org.apache.accumulo.core.client.admin.compaction.CompactionSelector;
+import org.apache.accumulo.core.spi.compaction.CompactionPlanner;
+
 /**
  * The interface for customizing major compactions.
  * <p>
@@ -36,7 +40,14 @@ import java.util.Map;
  * <p>
  * <b>Note:</b> the strategy object used for the {@link #shouldCompact(MajorCompactionRequest)} call
  * is going to be different from the one used in the compaction thread.
+ *
+ * @deprecated since 2.1.0 use {@link CompactionSelector}, {@link CompactionConfigurer}, and
+ *             {@link CompactionPlanner} instead. See
+ *             {@link org.apache.accumulo.core.client.admin.CompactionStrategyConfig} for more
+ *             information about why this was deprecated.
+ * @see org.apache.accumulo.core.spi.compaction
  */
+@Deprecated(since = "2.1.0", forRemoval = true)
 public abstract class CompactionStrategy {
   /**
    * The settings for the compaction strategy pulled from zookeeper. The
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/DefaultCompactionStrategy.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/DefaultCompactionStrategy.java
index 45ef95e..270c387 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/DefaultCompactionStrategy.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/DefaultCompactionStrategy.java
@@ -32,6 +32,7 @@ import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
+@SuppressWarnings("removal")
 public class DefaultCompactionStrategy extends CompactionStrategy {
 
   /**
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java
index 4a1b5ba..bd476c8 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/EverythingCompactionStrategy.java
@@ -22,7 +22,7 @@ package org.apache.accumulo.tserver.compaction;
  * The default compaction strategy for user initiated compactions. This strategy will always select
  * all files.
  */
-
+@Deprecated(since = "2.1.0", forRemoval = true)
 public class EverythingCompactionStrategy extends CompactionStrategy {
 
   @Override
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java
index 792c0b6..7a3c4d4 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java
@@ -18,8 +18,10 @@
  */
 package org.apache.accumulo.tserver.compaction;
 
+@Deprecated(since = "2.1.0", forRemoval = true)
 public enum MajorCompactionReason {
   // do not change the order, the order of this enum determines the order
   // in which queued major compactions are executed
   USER, CHOP, NORMAL, IDLE
+
 }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionRequest.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionRequest.java
index 598df0a..d910805 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionRequest.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionRequest.java
@@ -60,6 +60,7 @@ import com.google.common.cache.Cache;
 /**
  * Information that can be used to determine how a tablet is to be major compacted, if needed.
  */
+@Deprecated(since = "2.1.0", forRemoval = true)
 public class MajorCompactionRequest implements Cloneable {
   private final KeyExtent extent;
   private final MajorCompactionReason reason;
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/SizeLimitCompactionStrategy.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/SizeLimitCompactionStrategy.java
index 79b4320..f563ae5 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/SizeLimitCompactionStrategy.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/SizeLimitCompactionStrategy.java
@@ -31,6 +31,7 @@ import org.apache.accumulo.tserver.compaction.strategies.BasicCompactionStrategy
 /**
  * {@link BasicCompactionStrategy} offer the same functionality as this class and more.
  */
+@Deprecated(since = "2.1.0", forRemoval = true)
 public class SizeLimitCompactionStrategy extends DefaultCompactionStrategy {
   public static final String SIZE_LIMIT_OPT = "sizeLimit";
 
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/WriteParameters.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/WriteParameters.java
index 42c4df9..3425ab0 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/WriteParameters.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/WriteParameters.java
@@ -20,6 +20,7 @@ package org.apache.accumulo.tserver.compaction;
 
 import static com.google.common.base.Preconditions.checkArgument;
 
+@Deprecated(forRemoval = true, since = "2.1.0")
 public class WriteParameters {
   private String compressType = null;
   private long hdfsBlockSize = 0;
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/BasicCompactionStrategy.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/BasicCompactionStrategy.java
index 4c5ce35..8550a6a 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/BasicCompactionStrategy.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/BasicCompactionStrategy.java
@@ -22,10 +22,12 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.accumulo.core.client.admin.compaction.CompressionConfigurer;
 import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.metadata.StoredTabletFile;
 import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.spi.compaction.DefaultCompactionPlanner;
 import org.apache.accumulo.tserver.compaction.CompactionPlan;
 import org.apache.accumulo.tserver.compaction.DefaultCompactionStrategy;
 import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
@@ -61,7 +63,12 @@ import org.slf4j.LoggerFactory;
  *
  * <p>
  * The options that take sizes are in bytes and the suffixes K,M,and G can be used.
+ *
+ * @deprecated since 2.1.0 see {@link CompressionConfigurer}. Also compaction planners introduced in
+ *             2.1.0 have the ability to avoid compacting files over a certain size. See
+ *             {@link DefaultCompactionPlanner}
  */
+@Deprecated(since = "2.1.0", forRemoval = true)
 public class BasicCompactionStrategy extends DefaultCompactionStrategy {
 
   private static final Logger log = LoggerFactory.getLogger(BasicCompactionStrategy.class);
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/ConfigurableCompactionStrategy.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/ConfigurableCompactionStrategy.java
index fc2efdf..41baec9 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/ConfigurableCompactionStrategy.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/ConfigurableCompactionStrategy.java
@@ -18,8 +18,6 @@
  */
 package org.apache.accumulo.tserver.compaction.strategies;
 
-import java.io.IOException;
-import java.io.UncheckedIOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -28,36 +26,32 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Optional;
 import java.util.Set;
 import java.util.regex.Pattern;
+import java.util.stream.Collectors;
 
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer;
+import org.apache.accumulo.core.client.admin.compaction.CompactionSelector;
 import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
 import org.apache.accumulo.core.client.summary.Summary;
 import org.apache.accumulo.core.compaction.CompactionSettings;
 import org.apache.accumulo.core.conf.ConfigurationCopy;
-import org.apache.accumulo.core.file.FileSKVIterator;
-import org.apache.accumulo.core.metadata.StoredTabletFile;
-import org.apache.accumulo.core.metadata.TabletFile;
-import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
-import org.apache.accumulo.tserver.compaction.CompactionPlan;
-import org.apache.accumulo.tserver.compaction.CompactionStrategy;
-import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
-import org.apache.accumulo.tserver.compaction.WriteParameters;
 import org.apache.hadoop.fs.Path;
 
 /**
  * The compaction strategy used by the shell compact command.
  */
-public class ConfigurableCompactionStrategy extends CompactionStrategy {
+public class ConfigurableCompactionStrategy implements CompactionSelector, CompactionConfigurer {
 
   private abstract static class Test {
-    // Do any work that blocks in this method. This method is not always called before
-    // shouldCompact(). See CompactionStrategy javadocs.
-    void gatherInformation(MajorCompactionRequest request) {}
-
-    abstract boolean shouldCompact(Entry<StoredTabletFile,DataFileValue> file,
-        MajorCompactionRequest request);
+    abstract Set<CompactableFile> getFilesToCompact(SelectionParameters params);
   }
 
   private static class SummaryTest extends Test {
@@ -65,125 +59,74 @@ public class ConfigurableCompactionStrategy extends CompactionStrategy {
     private boolean selectExtraSummary;
     private boolean selectNoSummary;
 
-    private boolean summaryConfigured = true;
-    private boolean gatherCalled = false;
-
-    // files that do not need compaction
-    private Set<TabletFile> okFiles = Collections.emptySet();
-
     public SummaryTest(boolean selectExtraSummary, boolean selectNoSummary) {
       this.selectExtraSummary = selectExtraSummary;
       this.selectNoSummary = selectNoSummary;
     }
 
     @Override
-    void gatherInformation(MajorCompactionRequest request) {
-      gatherCalled = true;
-      Collection<SummarizerConfiguration> configs =
-          SummarizerConfiguration.fromTableProperties(request.getTableProperties());
+    Set<CompactableFile> getFilesToCompact(SelectionParameters params) {
+
+      Collection<SummarizerConfiguration> configs = SummarizerConfiguration
+          .fromTableProperties(params.getEnvironment().getConfiguration(params.getTableId()));
+
       if (configs.isEmpty()) {
-        summaryConfigured = false;
+        return Set.of();
       } else {
+        Set<CompactableFile> filesToCompact = new HashSet<>();
         Set<SummarizerConfiguration> configsSet = configs instanceof Set
             ? (Set<SummarizerConfiguration>) configs : new HashSet<>(configs);
-        okFiles = new HashSet<>();
 
-        for (StoredTabletFile tabletFile : request.getFiles().keySet()) {
+        for (CompactableFile tabletFile : params.getAvailableFiles()) {
           Map<SummarizerConfiguration,Summary> sMap = new HashMap<>();
           Collection<Summary> summaries;
           summaries =
-              request.getSummaries(Collections.singletonList(tabletFile), configsSet::contains);
+              params.getSummaries(Collections.singletonList(tabletFile), configsSet::contains);
           for (Summary summary : summaries) {
             sMap.put(summary.getSummarizerConfiguration(), summary);
           }
 
-          boolean needsCompaction = false;
           for (SummarizerConfiguration sc : configs) {
             Summary summary = sMap.get(sc);
 
             if (summary == null && selectNoSummary) {
-              needsCompaction = true;
+              filesToCompact.add(tabletFile);
               break;
             }
 
             if (summary != null && summary.getFileStatistics().getExtra() > 0
                 && selectExtraSummary) {
-              needsCompaction = true;
+              filesToCompact.add(tabletFile);
               break;
             }
           }
-
-          if (!needsCompaction) {
-            okFiles.add(tabletFile);
-          }
         }
+        return filesToCompact;
       }
-
-    }
-
-    @Override
-    public boolean shouldCompact(Entry<StoredTabletFile,DataFileValue> file,
-        MajorCompactionRequest request) {
-
-      if (!gatherCalled) {
-        Collection<SummarizerConfiguration> configs =
-            SummarizerConfiguration.fromTableProperties(request.getTableProperties());
-        return !configs.isEmpty();
-      }
-
-      if (!summaryConfigured) {
-        return false;
-      }
-
-      // Its possible the set of files could change between gather and now. So this will default to
-      // compacting any files that are unknown.
-      return !okFiles.contains(file.getKey());
     }
   }
 
   private static class NoSampleTest extends Test {
 
-    private Set<TabletFile> filesWithSample = Collections.emptySet();
-    private boolean samplingConfigured = true;
-    private boolean gatherCalled = false;
-
     @Override
-    void gatherInformation(MajorCompactionRequest request) {
-      gatherCalled = true;
-
-      SamplerConfigurationImpl sc = SamplerConfigurationImpl
-          .newSamplerConfig(new ConfigurationCopy(request.getTableProperties()));
-      if (sc == null) {
-        samplingConfigured = false;
-      } else {
-        filesWithSample = new HashSet<>();
-        for (TabletFile file : request.getFiles().keySet()) {
-          try (FileSKVIterator reader = request.openReader(file)) {
-            if (reader.getSample(sc) != null) {
-              filesWithSample.add(file);
-            }
-          } catch (IOException e) {
-            throw new UncheckedIOException(e);
-          }
-        }
-      }
-    }
+    Set<CompactableFile> getFilesToCompact(SelectionParameters params) {
+      SamplerConfigurationImpl sc = SamplerConfigurationImpl.newSamplerConfig(
+          new ConfigurationCopy(params.getEnvironment().getConfiguration(params.getTableId())));
 
-    @Override
-    public boolean shouldCompact(Entry<StoredTabletFile,DataFileValue> file,
-        MajorCompactionRequest request) {
+      if (sc == null)
+        return Set.of();
 
-      if (!gatherCalled) {
-        SamplerConfigurationImpl sc = SamplerConfigurationImpl
-            .newSamplerConfig(new ConfigurationCopy(request.getTableProperties()));
-        return sc != null;
-      }
+      Set<CompactableFile> filesToCompact = new HashSet<>();
+      for (CompactableFile tabletFile : params.getAvailableFiles()) {
+        Optional<SortedKeyValueIterator<Key,Value>> sample =
+            params.getSample(tabletFile, sc.toSamplerConfiguration());
 
-      if (!samplingConfigured) {
-        return false;
+        if (sample.isEmpty()) {
+          filesToCompact.add(tabletFile);
+        }
       }
 
-      return !filesWithSample.contains(file.getKey());
+      return filesToCompact;
     }
   }
 
@@ -195,9 +138,9 @@ public class ConfigurableCompactionStrategy extends CompactionStrategy {
     }
 
     @Override
-    public boolean shouldCompact(Entry<StoredTabletFile,DataFileValue> file,
-        MajorCompactionRequest request) {
-      return shouldCompact(file.getValue().getSize(), esize);
+    Set<CompactableFile> getFilesToCompact(SelectionParameters params) {
+      return params.getAvailableFiles().stream()
+          .filter(cf -> shouldCompact(cf.getEstimatedSize(), esize)).collect(Collectors.toSet());
     }
 
     public abstract boolean shouldCompact(long fsize, long esize);
@@ -211,9 +154,10 @@ public class ConfigurableCompactionStrategy extends CompactionStrategy {
     }
 
     @Override
-    public boolean shouldCompact(Entry<StoredTabletFile,DataFileValue> file,
-        MajorCompactionRequest request) {
-      return pattern.matcher(getInput(file.getKey().getPath())).matches();
+    Set<CompactableFile> getFilesToCompact(SelectionParameters params) {
+      return params.getAvailableFiles().stream()
+          .filter(cf -> pattern.matcher(getInput(new Path(cf.getUri()))).matches())
+          .collect(Collectors.toSet());
     }
 
     public abstract String getInput(Path path);
@@ -223,15 +167,49 @@ public class ConfigurableCompactionStrategy extends CompactionStrategy {
   private List<Test> tests = new ArrayList<>();
   private boolean andTest = true;
   private int minFiles = 1;
-  private WriteParameters writeParams = new WriteParameters();
+  private Map<String,String> overrides = new HashMap<>();
+
+  @Override
+  public void init(
+      org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer.InitParamaters iparams) {
+    Set<Entry<String,String>> es = iparams.getOptions().entrySet();
+    for (Entry<String,String> entry : es) {
+
+      switch (CompactionSettings.valueOf(entry.getKey())) {
+        case OUTPUT_COMPRESSION_OPT:
+          overrides.put(Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), entry.getValue());
+          break;
+        case OUTPUT_BLOCK_SIZE_OPT:
+          overrides.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), entry.getValue());
+          break;
+        case OUTPUT_INDEX_BLOCK_SIZE_OPT:
+          overrides.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey(), entry.getValue());
+          break;
+        case OUTPUT_HDFS_BLOCK_SIZE_OPT:
+          overrides.put(Property.TABLE_FILE_BLOCK_SIZE.getKey(), entry.getValue());
+          break;
+        case OUTPUT_REPLICATION_OPT:
+          overrides.put(Property.TABLE_FILE_REPLICATION.getKey(), entry.getValue());
+          break;
+        default:
+          throw new IllegalArgumentException("Unknown option " + entry.getKey());
+      }
+    }
+
+  }
 
   @Override
-  public void init(Map<String,String> options) {
+  public Overrides override(InputParameters params) {
+    return new Overrides(overrides);
+  }
 
+  @Override
+  public void init(
+      org.apache.accumulo.core.client.admin.compaction.CompactionSelector.InitParamaters iparams) {
     boolean selectNoSummary = false;
     boolean selectExtraSummary = false;
 
-    Set<Entry<String,String>> es = options.entrySet();
+    Set<Entry<String,String>> es = iparams.getOptions().entrySet();
     for (Entry<String,String> entry : es) {
 
       switch (CompactionSettings.valueOf(entry.getKey())) {
@@ -279,21 +257,6 @@ public class ConfigurableCompactionStrategy extends CompactionStrategy {
         case MIN_FILES_OPT:
           minFiles = Integer.parseInt(entry.getValue());
           break;
-        case OUTPUT_COMPRESSION_OPT:
-          writeParams.setCompressType(entry.getValue());
-          break;
-        case OUTPUT_BLOCK_SIZE_OPT:
-          writeParams.setBlockSize(Long.parseLong(entry.getValue()));
-          break;
-        case OUTPUT_INDEX_BLOCK_SIZE_OPT:
-          writeParams.setIndexBlockSize(Long.parseLong(entry.getValue()));
-          break;
-        case OUTPUT_HDFS_BLOCK_SIZE_OPT:
-          writeParams.setHdfsBlockSize(Long.parseLong(entry.getValue()));
-          break;
-        case OUTPUT_REPLICATION_OPT:
-          writeParams.setReplication(Integer.parseInt(entry.getValue()));
-          break;
         default:
           throw new IllegalArgumentException("Unknown option " + entry.getKey());
       }
@@ -302,54 +265,30 @@ public class ConfigurableCompactionStrategy extends CompactionStrategy {
     if (selectExtraSummary || selectNoSummary) {
       tests.add(new SummaryTest(selectExtraSummary, selectNoSummary));
     }
-
-  }
-
-  private List<StoredTabletFile> getFilesToCompact(MajorCompactionRequest request) {
-    List<StoredTabletFile> filesToCompact = new ArrayList<>();
-
-    for (Entry<StoredTabletFile,DataFileValue> entry : request.getFiles().entrySet()) {
-      boolean compact = false;
-      for (Test test : tests) {
-        if (andTest) {
-          compact = test.shouldCompact(entry, request);
-          if (!compact)
-            break;
-        } else {
-          compact |= test.shouldCompact(entry, request);
-        }
-      }
-
-      if (compact || tests.isEmpty())
-        filesToCompact.add(entry.getKey());
-    }
-    return filesToCompact;
   }
 
   @Override
-  public boolean shouldCompact(MajorCompactionRequest request) {
-    return getFilesToCompact(request).size() >= minFiles;
-  }
+  public Selection select(SelectionParameters sparams) {
+
+    Set<CompactableFile> filesToCompact =
+        tests.isEmpty() ? new HashSet<>(sparams.getAvailableFiles()) : null;
 
-  @Override
-  public void gatherInformation(MajorCompactionRequest request) {
-    // Gather any information that requires blocking calls here. This is only called before
-    // getCompactionPlan() is called.
     for (Test test : tests) {
-      test.gatherInformation(request);
+      var files = test.getFilesToCompact(sparams);
+      if (filesToCompact == null) {
+        filesToCompact = files;
+      } else if (andTest) {
+        filesToCompact.retainAll(files);
+      } else {
+        filesToCompact.addAll(files);
+      }
     }
-  }
 
-  @Override
-  public CompactionPlan getCompactionPlan(MajorCompactionRequest request) {
-    List<StoredTabletFile> filesToCompact = getFilesToCompact(request);
-    if (filesToCompact.size() >= minFiles) {
-      CompactionPlan plan = new CompactionPlan();
-      plan.inputFiles.addAll(filesToCompact);
-      plan.writeParameters = writeParams;
-
-      return plan;
+    if (filesToCompact.size() < minFiles) {
+      return new Selection(Set.of());
     }
-    return null;
+
+    return new Selection(filesToCompact);
   }
+
 }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/TooManyDeletesCompactionStrategy.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/TooManyDeletesCompactionStrategy.java
index f9ca9bf..80f6c28 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/TooManyDeletesCompactionStrategy.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/strategies/TooManyDeletesCompactionStrategy.java
@@ -28,6 +28,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.function.Predicate;
 
+import org.apache.accumulo.core.client.admin.compaction.TooManyDeletesSelector;
 import org.apache.accumulo.core.client.rfile.RFile.WriterOptions;
 import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
 import org.apache.accumulo.core.client.summary.Summary;
@@ -78,7 +79,9 @@ import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
  * href=https://issues.apache.org/jira/browse/ACCUMULO-4573>ACCUMULO-4573</a>
  *
  * @since 2.0.0
+ * @deprecated since 2.1.0 use {@link TooManyDeletesSelector} instead
  */
+@Deprecated(since = "2.1.0", forRemoval = true)
 public class TooManyDeletesCompactionStrategy extends DefaultCompactionStrategy {
 
   private boolean shouldCompact = false;
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/Compactable.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/Compactable.java
new file mode 100644
index 0000000..44c0022
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/Compactable.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.tserver.compactions;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.stream.Collectors;
+
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.dataImpl.KeyExtent;
+import org.apache.accumulo.core.metadata.CompactableFileImpl;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.spi.compaction.CompactionJob;
+import org.apache.accumulo.core.spi.compaction.CompactionKind;
+import org.apache.accumulo.core.spi.compaction.CompactionServiceId;
+
+/**
+ * Interface between compaction service and tablet.
+ */
+public interface Compactable {
+
+  public static class Files {
+
+    public final Set<CompactableFile> allFiles;
+    public final Set<CompactableFile> candidates;
+    public final Collection<CompactionJob> compacting;
+    public final Map<String,String> executionHints;
+
+    public Files(SortedMap<StoredTabletFile,DataFileValue> allFiles,
+        Set<StoredTabletFile> candidates, Collection<CompactionJob> running) {
+      this(allFiles, candidates, running, Map.of());
+    }
+
+    public Files(SortedMap<StoredTabletFile,DataFileValue> allFiles,
+        Set<StoredTabletFile> candidates, Collection<CompactionJob> running,
+        Map<String,String> executionHints) {
+
+      this.allFiles = Collections.unmodifiableSet(allFiles.entrySet().stream()
+          .map(entry -> new CompactableFileImpl(entry.getKey(), entry.getValue()))
+          .collect(Collectors.toSet()));
+      this.candidates = Collections.unmodifiableSet(candidates.stream()
+          .map(stf -> new CompactableFileImpl(stf, allFiles.get(stf))).collect(Collectors.toSet()));
+
+      this.compacting = Set.copyOf(running);
+      this.executionHints = executionHints;
+    }
+
+    @Override
+    public String toString() {
+      return "Files [allFiles=" + allFiles + ", candidates=" + candidates + ", compacting="
+          + compacting + ", hints=" + executionHints + "]";
+    }
+
+  }
+
+  TableId getTableId();
+
+  KeyExtent getExtent();
+
+  Optional<Files> getFiles(CompactionServiceId service, CompactionKind kind);
+
+  // void compact(CompactionJob compactionJob);
+
+  void compact(CompactionServiceId service, CompactionJob job);
+
+  CompactionServiceId getConfiguredService(CompactionKind kind);
+
+  double getCompactionRatio();
+}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/CompactionExecutor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/CompactionExecutor.java
new file mode 100644
index 0000000..f405070
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/CompactionExecutor.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.tserver.compactions;
+
+import java.util.Comparator;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.PriorityBlockingQueue;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Consumer;
+
+import org.apache.accumulo.core.spi.compaction.CompactionExecutorId;
+import org.apache.accumulo.core.spi.compaction.CompactionJob;
+import org.apache.accumulo.core.spi.compaction.CompactionServiceId;
+import org.apache.accumulo.core.util.compaction.CompactionJobPrioritizer;
+import org.apache.accumulo.tserver.TabletServerResourceManager;
+import org.apache.htrace.wrappers.TraceRunnable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+public class CompactionExecutor {
+
+  private static final Logger log = LoggerFactory.getLogger(CompactionExecutor.class);
+
+  private PriorityBlockingQueue<Runnable> queue;
+  private ExecutorService executor;
+  private final CompactionExecutorId ceid;
+  private AtomicLong cancelCount = new AtomicLong();
+
+  private class CompactionTask extends SubmittedJob implements Runnable {
+
+    private AtomicReference<Status> status = new AtomicReference<>(Status.QUEUED);
+    private Compactable compactable;
+    private CompactionServiceId csid;
+    private Consumer<Compactable> completionCallback;
+
+    public CompactionTask(CompactionJob job, Compactable compactable, CompactionServiceId csid,
+        Consumer<Compactable> completionCallback) {
+      super(job);
+      this.compactable = compactable;
+      this.csid = csid;
+      this.completionCallback = completionCallback;
+    }
+
+    @Override
+    public void run() {
+
+      try {
+        if (status.compareAndSet(Status.QUEUED, Status.RUNNING)) {
+          compactable.compact(csid, getJob());
+          completionCallback.accept(compactable);
+        }
+      } catch (Exception e) {
+        log.warn("Compaction failed for {} on {}", compactable.getExtent(), getJob(), e);
+        status.compareAndSet(Status.RUNNING, Status.FAILED);
+      } finally {
+        status.compareAndSet(Status.RUNNING, Status.COMPLETE);
+      }
+    }
+
+    @Override
+    public Status getStatus() {
+      return status.get();
+    }
+
+    @Override
+    public boolean cancel(Status expectedStatus) {
+
+      boolean canceled = false;
+
+      if (expectedStatus == Status.QUEUED) {
+        canceled = status.compareAndSet(expectedStatus, Status.CANCELED);
+      }
+
+      if (canceled && cancelCount.incrementAndGet() % 1024 == 0) {
+        // nMeed to occasionally clean the queue, it could have canceled task with low priority that
+        // hang around. Avoid cleaning it every time something is canceled as that could be
+        // expensive.
+        queue.removeIf(runnable -> ((CompactionTask) runnable).getStatus() == Status.CANCELED);
+      }
+
+      return canceled;
+    }
+
+  }
+
+  private static CompactionJob getJob(Runnable r) {
+    if (r instanceof TraceRunnable) {
+      return getJob(((TraceRunnable) r).getRunnable());
+    }
+
+    if (r instanceof CompactionTask) {
+      return ((CompactionTask) r).getJob();
+    }
+
+    throw new IllegalArgumentException("Unknown runnable type " + r.getClass().getName());
+  }
+
+  CompactionExecutor(CompactionExecutorId ceid, int threads, TabletServerResourceManager tsrm) {
+    this.ceid = ceid;
+    var comparator =
+        Comparator.comparing(CompactionExecutor::getJob, CompactionJobPrioritizer.JOB_COMPARATOR);
+
+    queue = new PriorityBlockingQueue<Runnable>(100, comparator);
+
+    executor = tsrm.createCompactionExecutor(ceid, threads, queue);
+  }
+
+  public SubmittedJob submit(CompactionServiceId csid, CompactionJob job, Compactable compactable,
+      Consumer<Compactable> completionCallback) {
+    Preconditions.checkArgument(job.getExecutor().equals(ceid));
+    var ctask = new CompactionTask(job, compactable, csid, completionCallback);
+    executor.execute(ctask);
+    return ctask;
+  }
+}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/CompactionManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/CompactionManager.java
new file mode 100644
index 0000000..f19d983
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/CompactionManager.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.tserver.compactions;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.dataImpl.KeyExtent;
+import org.apache.accumulo.core.spi.compaction.CompactionKind;
+import org.apache.accumulo.core.spi.compaction.CompactionServiceId;
+import org.apache.accumulo.core.spi.compaction.CompactionServices;
+import org.apache.accumulo.core.spi.compaction.DefaultCompactionPlanner;
+import org.apache.accumulo.core.util.NamingThreadFactory;
+import org.apache.accumulo.fate.util.Retry;
+import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.tserver.TabletServerResourceManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class CompactionManager {
+
+  private static final Logger log = LoggerFactory.getLogger(CompactionManager.class);
+
+  private Iterable<Compactable> compactables;
+  private Map<CompactionServiceId,CompactionService> services;
+
+  private LinkedBlockingQueue<Compactable> compactablesToCheck = new LinkedBlockingQueue<>();
+
+  private long maxTimeBetweenChecks;
+
+  private void mainLoop() {
+    long lastCheckAllTime = System.nanoTime();
+
+    long increment = Math.max(1, maxTimeBetweenChecks / 10);
+
+    var retryFactory = Retry.builder().infiniteRetries()
+        .retryAfter(increment, TimeUnit.MILLISECONDS).incrementBy(increment, TimeUnit.MILLISECONDS)
+        .maxWait(maxTimeBetweenChecks, TimeUnit.MILLISECONDS).backOffFactor(1.07)
+        .logInterval(1, TimeUnit.MINUTES).createFactory();
+    var retry = retryFactory.createRetry();
+    Compactable last = null;
+
+    while (true) {
+      try {
+        long passed = TimeUnit.MILLISECONDS.convert(System.nanoTime() - lastCheckAllTime,
+            TimeUnit.NANOSECONDS);
+        if (passed >= maxTimeBetweenChecks) {
+          for (Compactable compactable : compactables) {
+            last = compactable;
+            compact(compactable);
+          }
+          lastCheckAllTime = System.nanoTime();
+        } else {
+          var compactable =
+              compactablesToCheck.poll(maxTimeBetweenChecks - passed, TimeUnit.MILLISECONDS);
+          if (compactable != null) {
+            last = compactable;
+            compact(compactable);
+          }
+        }
+
+        last = null;
+        if (retry.hasRetried())
+          retry = retryFactory.createRetry();
+
+      } catch (Exception e) {
+        var extent = last == null ? null : last.getExtent();
+        log.warn("Failed to compact {} ", extent, e);
+        retry.useRetry();
+        try {
+          retry.waitForNextAttempt();
+        } catch (InterruptedException e1) {
+          log.debug("Retry interrupted", e1);
+        }
+      }
+    }
+  }
+
+  private void compact(Compactable compactable) {
+    for (CompactionKind ctype : CompactionKind.values()) {
+      services.get(compactable.getConfiguredService(ctype)).compact(ctype, compactable,
+          compactablesToCheck::add);
+    }
+  }
+
+  public CompactionManager(Iterable<Compactable> compactables, ServerContext ctx,
+      TabletServerResourceManager resourceManager) {
+    this.compactables = compactables;
+
+    Map<String,String> configs =
+        ctx.getConfiguration().getAllPropertiesWithPrefix(Property.TSERV_COMPACTION_SERVICE_PREFIX);
+
+    Map<CompactionServiceId,CompactionService> tmpServices = new HashMap<>();
+
+    Map<String,String> planners = new HashMap<>();
+    Map<String,Map<String,String>> options = new HashMap<>();
+
+    configs.forEach((prop, val) -> {
+      var suffix = prop.substring(Property.TSERV_COMPACTION_SERVICE_PREFIX.getKey().length());
+      String[] tokens = suffix.split("\\.");
+      if (tokens.length == 4 && tokens[1].equals("planner") && tokens[2].equals("opts")) {
+        options.computeIfAbsent(tokens[0], k -> new HashMap<>()).put(tokens[3], val);
+      } else if (tokens.length == 2 && tokens[1].equals("planner")) {
+        planners.put(tokens[0], val);
+      } else {
+        throw new IllegalArgumentException("Malformed compaction service property " + prop);
+      }
+    });
+
+    options.forEach((serviceName, serviceOptions) -> {
+      tmpServices.put(CompactionServiceId.of(serviceName),
+          new CompactionService(serviceName,
+              planners.getOrDefault(serviceName, DefaultCompactionPlanner.class.getName()),
+              serviceOptions, ctx, resourceManager));
+    });
+
+    this.services = Map.copyOf(tmpServices);
+
+    this.maxTimeBetweenChecks = ctx.getConfiguration().getTimeInMillis(Property.TSERV_MAJC_DELAY);
+  }
+
+  public void compactableChanged(Compactable compactable) {
+    compactablesToCheck.add(compactable);
+  }
+
+  public void start() {
+    log.debug("Started compaction manager");
+    new NamingThreadFactory("Compaction Manager").newThread(() -> mainLoop()).start();
+  }
+
+  public CompactionServices getServices() {
+    return new CompactionServices() {
+      @Override
+      public Set<CompactionServiceId> getIds() {
+        return services.keySet();
+      }
+    };
+  }
+
+  public boolean isCompactionQueued(KeyExtent extent, Set<CompactionServiceId> servicesUsed) {
+    return servicesUsed.stream().map(services::get)
+        .anyMatch(compactionService -> compactionService.isCompactionQueued(extent));
+  }
+}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/CompactionService.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/CompactionService.java
new file mode 100644
index 0000000..b8e3a46
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/CompactionService.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.tserver.compactions;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.function.Consumer;
+
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.dataImpl.KeyExtent;
+import org.apache.accumulo.core.spi.common.ServiceEnvironment;
+import org.apache.accumulo.core.spi.compaction.CompactionExecutorId;
+import org.apache.accumulo.core.spi.compaction.CompactionJob;
+import org.apache.accumulo.core.spi.compaction.CompactionKind;
+import org.apache.accumulo.core.spi.compaction.CompactionPlan;
+import org.apache.accumulo.core.spi.compaction.CompactionPlanner;
+import org.apache.accumulo.core.spi.compaction.CompactionPlanner.PlanningParameters;
+import org.apache.accumulo.core.spi.compaction.CompactionServiceId;
+import org.apache.accumulo.core.spi.compaction.ExecutorManager;
+import org.apache.accumulo.core.util.compaction.CompactionPlanImpl;
+import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.server.ServiceEnvironmentImpl;
+import org.apache.accumulo.tserver.TabletServerResourceManager;
+import org.apache.accumulo.tserver.compactions.SubmittedJob.Status;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+public class CompactionService {
+  private final CompactionPlanner planner;
+  private final Map<CompactionExecutorId,CompactionExecutor> executors;
+  private final CompactionServiceId myId;
+  private Map<KeyExtent,Collection<SubmittedJob>> submittedJobs = new ConcurrentHashMap<>();
+  private ServerContext serverCtx;
+
+  private static final Logger log = LoggerFactory.getLogger(CompactionService.class);
+
+  public CompactionService(String serviceName, String plannerClass,
+      Map<String,String> serviceOptions, ServerContext sctx, TabletServerResourceManager tsrm) {
+
+    this.myId = CompactionServiceId.of(serviceName);
+    this.serverCtx = sctx;
+
+    try {
+      planner =
+          ConfigurationTypeHelper.getClassInstance(null, plannerClass, CompactionPlanner.class);
+    } catch (IOException | ReflectiveOperationException e) {
+      throw new RuntimeException(e);
+    }
+
+    Map<CompactionExecutorId,CompactionExecutor> tmpExecutors = new HashMap<>();
+
+    planner.init(new CompactionPlanner.InitParameters() {
+
+      @Override
+      public ServiceEnvironment getServiceEnvironment() {
+        return new ServiceEnvironmentImpl(sctx);
+      }
+
+      @Override
+      public Map<String,String> getOptions() {
+        return serviceOptions;
+      }
+
+      @Override
+      public ExecutorManager getExecutorManager() {
+        return new ExecutorManager() {
+          @Override
+          public CompactionExecutorId createExecutor(String executorName, int threads) {
+            var ceid = CompactionExecutorId.of(serviceName + "." + executorName);
+            Preconditions.checkState(!tmpExecutors.containsKey(ceid));
+            tmpExecutors.put(ceid, new CompactionExecutor(ceid, threads, tsrm));
+            return ceid;
+          }
+        };
+      }
+
+      @Override
+      public String getFullyQualifiedOption(String key) {
+        return Property.TSERV_COMPACTION_SERVICE_PREFIX.getKey() + serviceName + ".opts." + key;
+      }
+    });
+
+    this.executors = Map.copyOf(tmpExecutors);
+
+    log.debug("Created new compaction service id:{} executors:{}", myId, executors.keySet());
+  }
+
+  private boolean reconcile(Set<CompactionJob> jobs, Collection<SubmittedJob> submitted) {
+    for (SubmittedJob submittedJob : submitted) {
+      // only read status once to avoid race conditions since multiple compares are done
+      var status = submittedJob.getStatus();
+      if (status == Status.QUEUED) {
+        if (!jobs.remove(submittedJob.getJob())) {
+          if (!submittedJob.cancel(Status.QUEUED)) {
+            return false;
+          }
+        }
+      } else if (status == Status.RUNNING) {
+        for (CompactionJob job : jobs) {
+          if (!Collections.disjoint(submittedJob.getJob().getFiles(), job.getFiles())) {
+            return false;
+          }
+        }
+      }
+    }
+
+    return true;
+  }
+
+  public void compact(CompactionKind kind, Compactable compactable,
+      Consumer<Compactable> completionCallback) {
+    var files = compactable.getFiles(myId, kind);
+
+    if (files.isEmpty() || files.get().candidates.isEmpty()) {
+      log.trace("Compactable returned no files {} {} {}", compactable.getExtent(), kind, files);
+      return;
+    }
+
+    PlanningParameters params = new PlanningParameters() {
+
+      @Override
+      public TableId getTableId() {
+        return compactable.getTableId();
+      }
+
+      @Override
+      public ServiceEnvironment getServiceEnvironment() {
+        return new ServiceEnvironmentImpl(serverCtx);
+      }
+
+      @Override
+      public double getRatio() {
+        return compactable.getCompactionRatio();
+      }
+
+      @Override
+      public CompactionKind getKind() {
+        return kind;
+      }
+
+      @Override
+      public Collection<CompactionJob> getRunningCompactions() {
+        return files.get().compacting;
+      }
+
+      @Override
+      public Collection<CompactableFile> getCandidates() {
+        return files.get().candidates;
+      }
+
+      @Override
+      public Collection<CompactableFile> getAll() {
+        return files.get().allFiles;
+      }
+
+      @Override
+      public Map<String,String> getExecutionHints() {
+        if (kind == CompactionKind.USER)
+          return files.get().executionHints;
+        else
+          return Map.of();
+      }
+
+      @Override
+      public CompactionPlan.Builder createPlanBuilder() {
+        return new CompactionPlanImpl.BuilderImpl(kind, files.get().allFiles,
+            files.get().candidates);
+      }
+    };
+
+    log.trace("Planning compactions {} {} {} {} {}", planner.getClass().getName(),
+        compactable.getExtent(), kind, files);
+
+    CompactionPlan plan;
+    try {
+      plan = planner.makePlan(params);
+    } catch (RuntimeException e) {
+      log.debug("Planner failed {} {} {} {}", planner.getClass().getName(), compactable.getExtent(),
+          kind, files, e);
+      throw e;
+    }
+
+    plan = convertPlan(plan, kind, files.get().allFiles, files.get().candidates);
+
+    Set<CompactionJob> jobs = new HashSet<>(plan.getJobs());
+
+    Collection<SubmittedJob> submitted =
+        submittedJobs.getOrDefault(compactable.getExtent(), List.of());
+    if (!submitted.isEmpty()) {
+      submitted.removeIf(sj -> {
+        // to avoid race conditions, only read status once and use local var for the two compares
+        var status = sj.getStatus();
+        return status != Status.QUEUED && status != Status.RUNNING;
+      });
+    }
+
+    if (reconcile(jobs, submitted)) {
+      for (CompactionJob job : jobs) {
+        var sjob =
+            executors.get(job.getExecutor()).submit(myId, job, compactable, completionCallback);
+        // its important that the collection created in computeIfAbsent supports concurrency
+        submittedJobs.computeIfAbsent(compactable.getExtent(), k -> new ConcurrentLinkedQueue<>())
+            .add(sjob);
+      }
+
+      if (!jobs.isEmpty()) {
+        log.trace("Submitted compaction plan {} id:{} files:{} plan:{}", compactable.getExtent(),
+            myId, files, plan);
+      }
+    } else {
+      log.trace("Did not submit compaction plan {} id:{} files:{} plan:{}", compactable.getExtent(),
+          myId, files, plan);
+    }
+  }
+
+  private CompactionPlan convertPlan(CompactionPlan plan, CompactionKind kind,
+      Set<CompactableFile> allFiles, Set<CompactableFile> candidates) {
+
+    if (plan.getClass().equals(CompactionPlanImpl.class))
+      return plan;
+
+    var builder = new CompactionPlanImpl.BuilderImpl(kind, allFiles, candidates);
+
+    for (var job : plan.getJobs()) {
+      Preconditions.checkArgument(job.getKind() == kind, "Unexpected compaction kind %s != %s",
+          job.getKind(), kind);
+      builder.addJob(job.getPriority(), job.getExecutor(), job.getFiles());
+    }
+
+    return builder.build();
+  }
+
+  public boolean isCompactionQueued(KeyExtent extent) {
+    return submittedJobs.getOrDefault(extent, List.of()).stream()
+        .anyMatch(job -> job.getStatus() == Status.QUEUED);
+  }
+}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/PrintableTable.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/PrintableTable.java
new file mode 100644
index 0000000..74c522a
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/PrintableTable.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.tserver.compactions;
+
+import java.util.Arrays;
+
+public class PrintableTable {
+  private String[] columns;
+  private String[] rows;
+  private int[][] data;
+
+  PrintableTable(String[] columns, String[] rows, int[][] data) {
+    this.columns = columns;
+    this.rows = rows;
+    this.data = data;
+  }
+
+  public String toString() {
+    int widestRow = Arrays.asList(rows).stream().mapToInt(String::length).max().getAsInt();
+
+    StringBuilder sb = new StringBuilder();
+
+    for (int i = 0; i < widestRow; i++)
+      sb.append(" ");
+
+    for (int i = 0; i < columns.length; i++) {
+      sb.append("  C");
+      sb.append(i + 1);
+      sb.append("  ");
+    }
+
+    sb.append("\n");
+
+    for (int i = 0; i < widestRow; i++)
+      sb.append("-");
+
+    for (int i = 0; i < columns.length; i++) {
+      sb.append(" ---- ");
+    }
+
+    sb.append("\n");
+
+    for (int r = 0; r < rows.length; r++) {
+      sb.append(String.format("%" + widestRow + "s", rows[r]));
+
+      int[] row = data[r];
+
+      for (int c = 0; c < row.length; c++) {
+        if (row[c] == 0)
+          sb.append("      ");
+        else
+          sb.append(String.format(" %4d ", row[c]));
+      }
+      sb.append("\n");
+    }
+
+    sb.append("\n");
+
+    for (int i = 0; i < columns.length; i++) {
+      sb.append(" C");
+      sb.append(i + 1);
+      sb.append("='");
+      sb.append(columns[i]);
+      sb.append("'");
+    }
+
+    sb.append("\n");
+
+    return sb.toString();
+  }
+
+  public static void main(String[] args) {
+    String[] columns = {"small running", "small queued", "medium running", "medium queued",
+        "large running", "large queued"};
+    String[] rows = {"tablet 1", "tablet 2", "tablet 3"};
+
+    int[][] data = {{0, 3, 1, 0, 0, 0}, {2, 0, 0, 0, 0, 0}, {2, 0, 4, 0, 0, 0}};
+
+    System.out.println(new PrintableTable(columns, rows, data).toString());
+  }
+}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/SubmittedJob.java
similarity index 63%
copy from server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java
copy to server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/SubmittedJob.java
index 792c0b6..1335e34 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/MajorCompactionReason.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/SubmittedJob.java
@@ -16,10 +16,26 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.accumulo.tserver.compaction;
+package org.apache.accumulo.tserver.compactions;
 
-public enum MajorCompactionReason {
-  // do not change the order, the order of this enum determines the order
-  // in which queued major compactions are executed
-  USER, CHOP, NORMAL, IDLE
+import org.apache.accumulo.core.spi.compaction.CompactionJob;
+
+public abstract class SubmittedJob {
+  private final CompactionJob job;
+
+  public enum Status {
+    RUNNING, QUEUED, COMPLETE, FAILED, CANCELED
+  }
+
+  public SubmittedJob(CompactionJob job) {
+    this.job = job;
+  }
+
+  public CompactionJob getJob() {
+    return job;
+  }
+
+  public abstract Status getStatus();
+
+  public abstract boolean cancel(Status status);
 }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableImpl.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableImpl.java
new file mode 100644
index 0000000..ddfdc16
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableImpl.java
@@ -0,0 +1,759 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.tserver.tablet;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.admin.CompactionConfig;
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.dataImpl.KeyExtent;
+import org.apache.accumulo.core.logging.TabletLogger;
+import org.apache.accumulo.core.master.thrift.TabletLoadState;
+import org.apache.accumulo.core.metadata.CompactableFileImpl;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.spi.common.ServiceEnvironment;
+import org.apache.accumulo.core.spi.compaction.CompactionDispatcher.DispatchParameters;
+import org.apache.accumulo.core.spi.compaction.CompactionJob;
+import org.apache.accumulo.core.spi.compaction.CompactionKind;
+import org.apache.accumulo.core.spi.compaction.CompactionServiceId;
+import org.apache.accumulo.core.spi.compaction.CompactionServices;
+import org.apache.accumulo.core.util.compaction.CompactionJobImpl;
+import org.apache.accumulo.server.ServiceEnvironmentImpl;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.accumulo.tserver.compactions.Compactable;
+import org.apache.accumulo.tserver.compactions.CompactionManager;
+import org.apache.accumulo.tserver.mastermessage.TabletStatusMessage;
+import org.apache.accumulo.tserver.tablet.Compactor.CompactionCanceledException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Suppliers;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Sets;
+
+/**
+ * This class exists between compaction services and tablets and tracks state related to compactions
+ * for a tablet. This class was written to mainly contain code related to tracking files, state, and
+ * synchronization. All other code was placed in {@link CompactableUtils} inorder to make this class
+ * easier to analyze.
+ */
+public class CompactableImpl implements Compactable {
+
+  private static final Logger log = LoggerFactory.getLogger(CompactableImpl.class);
+
+  private final Tablet tablet;
+
+  private Set<StoredTabletFile> allCompactingFiles = new HashSet<>();
+  private Set<CompactionJob> runnningJobs = new HashSet<>();
+  private volatile boolean compactionRunning = false;
+
+  private Set<StoredTabletFile> selectedFiles = new HashSet<>();
+
+  private Set<StoredTabletFile> allFilesWhenChopStarted = new HashSet<>();
+
+  // track files produced by compactions of this tablet, those are considered chopped
+  private Set<StoredTabletFile> choppedFiles = new HashSet<>();
+  private SpecialStatus chopStatus = SpecialStatus.NOT_ACTIVE;
+
+  private Supplier<Set<CompactionServiceId>> servicesInUse;
+
+  // status of special compactions
+  private enum SpecialStatus {
+    NEW, SELECTING, SELECTED, NOT_ACTIVE, CANCELED
+  }
+
+  private SpecialStatus selectStatus = SpecialStatus.NOT_ACTIVE;
+  private CompactionKind selectKind = null;
+  private boolean selectedAll = false;
+  private CompactionHelper chelper = null;
+  private Long compactionId;
+  private CompactionConfig compactionConfig;
+
+  private CompactionManager manager;
+
+  AtomicLong lastSeenCompactionCancelId = new AtomicLong(Long.MIN_VALUE);
+
+  private volatile boolean closed = false;
+
+  // This interface exists for two purposes. First it allows abstraction of new and old
+  // implementations for user pluggable file selection code. Second it facilitates placing code
+  // outside of this class.
+  public static interface CompactionHelper {
+    Set<StoredTabletFile> selectFiles(SortedMap<StoredTabletFile,DataFileValue> allFiles);
+
+    Set<StoredTabletFile> getFilesToDrop();
+
+    AccumuloConfiguration override(AccumuloConfiguration conf, Set<CompactableFile> files);
+
+  }
+
+  public CompactableImpl(Tablet tablet, CompactionManager manager) {
+    this.tablet = tablet;
+    this.manager = manager;
+    this.servicesInUse = Suppliers.memoizeWithExpiration(() -> {
+      HashSet<CompactionServiceId> servicesIds = new HashSet<>();
+      for (CompactionKind kind : CompactionKind.values()) {
+        servicesIds.add(getConfiguredService(kind));
+      }
+      return Set.copyOf(servicesIds);
+    }, 2, TimeUnit.SECONDS);
+  }
+
+  void initiateChop() {
+
+    Set<StoredTabletFile> allFiles = tablet.getDatafiles().keySet();
+    Set<StoredTabletFile> filesToExamine = new HashSet<>(allFiles);
+
+    synchronized (this) {
+      if (chopStatus == SpecialStatus.NOT_ACTIVE) {
+        chopStatus = SpecialStatus.SELECTING;
+        filesToExamine.removeAll(choppedFiles);
+        filesToExamine.removeAll(allCompactingFiles);
+      } else {
+        return;
+      }
+    }
+
+    Set<StoredTabletFile> unchoppedFiles = selectChopFiles(filesToExamine);
+
+    synchronized (this) {
+      Preconditions.checkState(chopStatus == SpecialStatus.SELECTING);
+      choppedFiles.addAll(Sets.difference(filesToExamine, unchoppedFiles));
+      chopStatus = SpecialStatus.SELECTED;
+      this.allFilesWhenChopStarted.clear();
+      this.allFilesWhenChopStarted.addAll(allFiles);
+
+      var filesToChop = getFilesToChop(allFiles);
+      if (!filesToChop.isEmpty()) {
+        TabletLogger.selected(getExtent(), CompactionKind.CHOP, filesToChop);
+      }
+    }
+
+    checkifChopComplete(tablet.getDatafiles().keySet());
+  }
+
+  private synchronized Set<StoredTabletFile> getFilesToChop(Set<StoredTabletFile> allFiles) {
+    Preconditions.checkState(chopStatus == SpecialStatus.SELECTED);
+    var copy = new HashSet<>(allFilesWhenChopStarted);
+    copy.retainAll(allFiles);
+    copy.removeAll(choppedFiles);
+    return copy;
+  }
+
+  private void checkifChopComplete(Set<StoredTabletFile> allFiles) {
+
+    boolean completed = false;
+
+    synchronized (this) {
+      if (chopStatus == SpecialStatus.SELECTED) {
+        if (getFilesToChop(allFiles).isEmpty()) {
+          chopStatus = SpecialStatus.NOT_ACTIVE;
+          completed = true;
+        }
+      }
+
+      choppedFiles.retainAll(allFiles);
+    }
+
+    if (completed) {
+      markChopped();
+      TabletLogger.selected(getExtent(), CompactionKind.CHOP, Set.of());
+    }
+  }
+
+  private void markChopped() {
+    MetadataTableUtil.chopped(tablet.getTabletServer().getContext(), getExtent(),
+        tablet.getTabletServer().getLock());
+    tablet.getTabletServer()
+        .enqueueMasterMessage(new TabletStatusMessage(TabletLoadState.CHOPPED, getExtent()));
+  }
+
+  private Set<StoredTabletFile> selectChopFiles(Set<StoredTabletFile> chopCandidates) {
+    try {
+      var firstAndLastKeys = CompactableUtils.getFirstAndLastKeys(tablet, chopCandidates);
+      return CompactableUtils.findChopFiles(getExtent(), firstAndLastKeys, chopCandidates);
+    } catch (IOException e) {
+      throw new UncheckedIOException(e);
+    }
+  }
+
+  /**
+   * Tablet can use this to signal files were added.
+   */
+  void filesAdded(boolean chopped, Collection<StoredTabletFile> files) {
+    if (chopped) {
+      synchronized (this) {
+        choppedFiles.addAll(files);
+      }
+    }
+
+    manager.compactableChanged(this);
+  }
+
+  /**
+   * Tablet calls this signal a user compaction should run
+   */
+  void initiateUserCompaction(long compactionId, CompactionConfig compactionConfig) {
+    checkIfUserCompactionCanceled();
+    initiateSelection(CompactionKind.USER, compactionId, compactionConfig);
+  }
+
+  private void initiateSelection(CompactionKind kind) {
+    if (kind != CompactionKind.SELECTOR)
+      return;
+
+    initiateSelection(CompactionKind.SELECTOR, null, null);
+  }
+
+  private boolean noneRunning(CompactionKind kind) {
+    return runnningJobs.stream().noneMatch(job -> job.getKind() == kind);
+  }
+
+  private void checkIfUserCompactionCanceled() {
+
+    synchronized (this) {
+      if (closed)
+        return;
+
+      if (selectStatus != SpecialStatus.SELECTED || selectKind != CompactionKind.USER) {
+        return;
+      }
+    }
+
+    var cancelId = tablet.getCompactionCancelID();
+
+    lastSeenCompactionCancelId.getAndUpdate(prev -> Long.max(prev, cancelId));
+
+    synchronized (this) {
+      if (selectStatus == SpecialStatus.SELECTED && selectKind == CompactionKind.USER) {
+        if (cancelId >= compactionId) {
+          if (noneRunning(CompactionKind.USER)) {
+            selectStatus = SpecialStatus.NOT_ACTIVE;
+            log.trace("Selected compaction status changed {} {}", getExtent(), selectStatus);
+          } else {
+            selectStatus = SpecialStatus.CANCELED;
+            log.trace("Selected compaction status changed {} {}", getExtent(), selectStatus);
+          }
+        }
+      }
+    }
+  }
+
+  private void initiateSelection(CompactionKind kind, Long compactionId,
+      CompactionConfig compactionConfig) {
+    Preconditions.checkArgument(kind == CompactionKind.USER || kind == CompactionKind.SELECTOR);
+
+    var localHelper = CompactableUtils.getHelper(kind, tablet, compactionId, compactionConfig);
+
+    if (localHelper == null)
+      return;
+
+    synchronized (this) {
+      if (closed)
+        return;
+
+      if (selectStatus == SpecialStatus.NOT_ACTIVE || (kind == CompactionKind.USER
+          && selectKind == CompactionKind.SELECTOR && noneRunning(CompactionKind.SELECTOR))) {
+        selectStatus = SpecialStatus.NEW;
+        selectKind = kind;
+        selectedFiles.clear();
+        selectedAll = false;
+        this.chelper = localHelper;
+        this.compactionId = compactionId;
+        this.compactionConfig = compactionConfig;
+        log.trace("Selected compaction status changed {} {} {} {}", getExtent(), selectStatus,
+            compactionId, compactionConfig);
+      } else {
+        return;
+      }
+    }
+
+    selectFiles();
+
+  }
+
+  private void selectFiles() {
+
+    CompactionHelper localHelper;
+
+    synchronized (this) {
+      if (selectStatus == SpecialStatus.NEW && allCompactingFiles.isEmpty()) {
+        selectedFiles.clear();
+        selectStatus = SpecialStatus.SELECTING;
+        localHelper = this.chelper;
+        log.trace("Selected compaction status changed {} {}", getExtent(), selectStatus);
+      } else {
+        return;
+      }
+    }
+
+    try {
+      var allFiles = tablet.getDatafiles();
+      Set<StoredTabletFile> selectingFiles = localHelper.selectFiles(allFiles);
+
+      if (selectingFiles.isEmpty()) {
+        synchronized (this) {
+          Preconditions.checkState(selectStatus == SpecialStatus.SELECTING);
+          selectStatus = SpecialStatus.NOT_ACTIVE;
+          log.trace("Selected compaction status changed {} {}", getExtent(), selectStatus);
+        }
+      } else {
+        var allSelected =
+            allFiles.keySet().equals(Sets.union(selectingFiles, localHelper.getFilesToDrop()));
+        synchronized (this) {
+          Preconditions.checkState(selectStatus == SpecialStatus.SELECTING);
+          selectStatus = SpecialStatus.SELECTED;
+          selectedFiles.addAll(selectingFiles);
+          selectedAll = allSelected;
+          log.trace("Selected compaction status changed {} {} {} {}", getExtent(), selectStatus,
+              selectedAll, asFileNames(selectedFiles));
+          TabletLogger.selected(getExtent(), selectKind, selectedFiles);
+        }
+
+        manager.compactableChanged(this);
+      }
+
+    } catch (Exception e) {
+      synchronized (this) {
+        if (selectStatus == SpecialStatus.SELECTING)
+          selectStatus = SpecialStatus.NOT_ACTIVE;
+        log.error("Failed to select user compaction files {}", getExtent(), e);
+        log.trace("Selected compaction status changed {} {}", getExtent(), selectStatus);
+        selectedFiles.clear();
+      }
+    }
+
+  }
+
+  private Collection<String> asFileNames(Set<StoredTabletFile> files) {
+    return Collections2.transform(files, StoredTabletFile::getFileName);
+  }
+
+  private synchronized void selectedCompactionCompleted(CompactionJob job,
+      Set<StoredTabletFile> jobFiles, StoredTabletFile newFile) {
+    Preconditions.checkArgument(
+        job.getKind() == CompactionKind.USER || job.getKind() == CompactionKind.SELECTOR);
+    Preconditions.checkState(selectedFiles.containsAll(jobFiles));
+    Preconditions.checkState(
+        (selectStatus == SpecialStatus.SELECTED || selectStatus == SpecialStatus.CANCELED)
+            && selectKind == job.getKind());
+
+    selectedFiles.removeAll(jobFiles);
+
+    if (selectedFiles.isEmpty()
+        || (selectStatus == SpecialStatus.CANCELED && noneRunning(selectKind))) {
+      selectStatus = SpecialStatus.NOT_ACTIVE;
+      log.trace("Selected compaction status changed {} {}", getExtent(), selectStatus);
+    } else if (selectStatus == SpecialStatus.SELECTED) {
+      selectedFiles.add(newFile);
+      log.trace("Compacted subset of selected files {} {} -> {}", getExtent(),
+          asFileNames(jobFiles), newFile.getFileName());
+    } else {
+      log.debug("Canceled selected compaction completed {} but others still running ", getExtent());
+    }
+
+    TabletLogger.selected(getExtent(), selectKind, selectedFiles);
+  }
+
+  @Override
+  public TableId getTableId() {
+    return getExtent().getTableId();
+  }
+
+  @Override
+  public KeyExtent getExtent() {
+    return tablet.getExtent();
+  }
+
+  @SuppressWarnings("removal")
+  private boolean isCompactionStratConfigured() {
+    return tablet.getTableConfiguration().isPropertySet(Property.TABLE_COMPACTION_STRATEGY, true);
+  }
+
+  @Override
+  public Optional<Files> getFiles(CompactionServiceId service, CompactionKind kind) {
+
+    if (!service.equals(getConfiguredService(kind)))
+      return Optional.empty();
+
+    var files = tablet.getDatafiles();
+
+    // very important to call following outside of lock
+    initiateSelection(kind);
+
+    if (kind == CompactionKind.USER)
+      checkIfUserCompactionCanceled();
+
+    synchronized (this) {
+
+      if (closed)
+        return Optional.empty();
+
+      if (!files.keySet().containsAll(allCompactingFiles)) {
+        log.trace("Ignoring because compacting not a subset {}", getExtent());
+
+        // A compaction finished, so things are out of date. This can happen because this class and
+        // tablet have separate locks, its ok.
+        return Optional.of(new Compactable.Files(files, Set.of(), Set.of()));
+      }
+
+      var allCompactingCopy = Set.copyOf(allCompactingFiles);
+      var runningJobsCopy = Set.copyOf(runnningJobs);
+
+      switch (kind) {
+        case SYSTEM: {
+          if (isCompactionStratConfigured())
+            return Optional.of(new Compactable.Files(files, Set.of(), runningJobsCopy));
+
+          switch (selectStatus) {
+            case NOT_ACTIVE:
+            case CANCELED:
+              return Optional.of(new Compactable.Files(files,
+                  Sets.difference(files.keySet(), allCompactingCopy), runningJobsCopy));
+            case NEW:
+            case SELECTING:
+              return Optional.of(new Compactable.Files(files, Set.of(), runningJobsCopy));
+            case SELECTED: {
+              Set<StoredTabletFile> candidates = new HashSet<>(files.keySet());
+              candidates.removeAll(allCompactingCopy);
+              candidates.removeAll(selectedFiles);
+              candidates = Collections.unmodifiableSet(candidates);
+              return Optional.of(new Compactable.Files(files, candidates, runningJobsCopy));
+            }
+            default:
+              throw new AssertionError();
+          }
+        }
+        case SELECTOR:
+          // intentional fall through
+        case USER:
+          switch (selectStatus) {
+            case NOT_ACTIVE:
+            case NEW:
+            case SELECTING:
+            case CANCELED:
+              return Optional.of(new Compactable.Files(files, Set.of(), runningJobsCopy));
+            case SELECTED: {
+              if (selectKind == kind) {
+                Set<StoredTabletFile> candidates = new HashSet<>(selectedFiles);
+                candidates.removeAll(allCompactingFiles);
+                candidates = Collections.unmodifiableSet(candidates);
+                Preconditions.checkState(files.keySet().containsAll(candidates),
+                    "selected files not in all files %s %s", candidates, files.keySet());
+                Map<String,String> hints = Map.of();
+                if (kind == CompactionKind.USER)
+                  hints = compactionConfig.getExecutionHints();
+                return Optional.of(new Compactable.Files(files, Set.copyOf(selectedFiles),
+                    runningJobsCopy, hints));
+              } else {
+                return Optional.of(new Compactable.Files(files, Set.of(), runningJobsCopy));
+              }
+            }
+            default:
+              throw new AssertionError();
+          }
+        case CHOP: {
+          switch (chopStatus) {
+            case NOT_ACTIVE:
+            case NEW:
+            case SELECTING:
+              return Optional.of(new Compactable.Files(files, Set.of(), runningJobsCopy));
+            case SELECTED: {
+              if (selectStatus == SpecialStatus.NEW || selectStatus == SpecialStatus.SELECTING)
+                return Optional.of(new Compactable.Files(files, Set.of(), runningJobsCopy));
+
+              var filesToChop = getFilesToChop(files.keySet());
+              filesToChop.removeAll(allCompactingFiles);
+              filesToChop = Collections.unmodifiableSet(filesToChop);
+              if (selectStatus == SpecialStatus.SELECTED)
+                filesToChop.removeAll(selectedFiles);
+              return Optional.of(new Compactable.Files(files, filesToChop, runningJobsCopy));
+            }
+            case CANCELED: // intentional fall through, not expected status for chop
+            default:
+              throw new AssertionError();
+          }
+        }
+        default:
+          throw new AssertionError();
+      }
+    }
+  }
+
+  class CompactionCheck {
+    private Supplier<Boolean> memoizedCheck;
+
+    public CompactionCheck(CompactionServiceId service, CompactionKind kind, Long compactionId) {
+      this.memoizedCheck = Suppliers.memoizeWithExpiration(() -> {
+        if (closed)
+          return false;
+        if (!service.equals(getConfiguredService(kind)))
+          return false;
+        if (kind == CompactionKind.USER && lastSeenCompactionCancelId.get() >= compactionId)
+          return false;
+
+        return true;
+      }, 100, TimeUnit.MILLISECONDS);
+    }
+
+    public boolean isCompactionEnabled(long entriesCompacted) {
+      return memoizedCheck.get();
+    }
+  }
+
+  @Override
+  public void compact(CompactionServiceId service, CompactionJob job) {
+
+    Set<StoredTabletFile> jobFiles = job.getFiles().stream()
+        .map(cf -> ((CompactableFileImpl) cf).getStortedTabletFile()).collect(Collectors.toSet());
+
+    Long compactionId = null;
+    Long checkCompactionId = null;
+    boolean propogateDeletes = true;
+    CompactionHelper localHelper;
+    List<IteratorSetting> iters = List.of();
+    CompactionConfig localCompactionCfg;
+
+    if (job.getKind() == CompactionKind.USER)
+      checkIfUserCompactionCanceled();
+
+    synchronized (this) {
+      if (closed)
+        return;
+
+      if (!service.equals(getConfiguredService(job.getKind())))
+        return;
+
+      switch (selectStatus) {
+        case NEW:
+        case SELECTING:
+          log.trace(
+              "Ignoring compaction because files are being selected for user compaction {} {}",
+              getExtent(), job);
+          return;
+        case SELECTED: {
+          if (job.getKind() == CompactionKind.USER || job.getKind() == CompactionKind.SELECTOR) {
+            if (selectKind == job.getKind()) {
+              if (!selectedFiles.containsAll(jobFiles)) {
+                log.error("Ignoring {} compaction that does not contain selected files {} {} {}",
+                    job.getKind(), getExtent(), asFileNames(selectedFiles), asFileNames(jobFiles));
+                return;
+              }
+            } else {
+              log.trace("Ingoring {} compaction because not selected kind {}", job.getKind(),
+                  getExtent());
+              return;
+            }
+          } else if (!Collections.disjoint(selectedFiles, jobFiles)) {
+            log.trace("Ingoring compaction that overlaps with selected files {} {} {}", getExtent(),
+                job.getKind(), asFileNames(Sets.intersection(selectedFiles, jobFiles)));
+            return;
+          }
+          break;
+        }
+        case CANCELED:
+        case NOT_ACTIVE: {
+          if (job.getKind() == CompactionKind.USER || job.getKind() == CompactionKind.SELECTOR) {
+            log.trace("Ignoring {} compaction because selectStatus is {} for {}", job.getKind(),
+                selectStatus, getExtent());
+            return;
+          }
+          break;
+        }
+        default:
+          throw new AssertionError();
+      }
+
+      if (Collections.disjoint(allCompactingFiles, jobFiles)) {
+        allCompactingFiles.addAll(jobFiles);
+        runnningJobs.add(job);
+      } else {
+        return;
+      }
+
+      compactionRunning = !allCompactingFiles.isEmpty();
+
+      switch (job.getKind()) {
+        case SELECTOR:
+        case USER:
+          Preconditions.checkState(selectStatus == SpecialStatus.SELECTED);
+          if (job.getKind() == selectKind && selectedAll && jobFiles.containsAll(selectedFiles)) {
+            propogateDeletes = false;
+          }
+          break;
+        default:
+          if (((CompactionJobImpl) job).selectedAll()) {
+            // At the time when the job was created all files were selected, so deletes can be
+            // dropped.
+            propogateDeletes = false;
+          }
+      }
+
+      if (job.getKind() == CompactionKind.USER && selectKind == job.getKind()
+          && selectedFiles.equals(jobFiles)) {
+        compactionId = this.compactionId;
+      }
+
+      if (job.getKind() == CompactionKind.USER) {
+        iters = compactionConfig.getIterators();
+        checkCompactionId = this.compactionId;
+      }
+
+      localHelper = this.chelper;
+      localCompactionCfg = this.compactionConfig;
+    }
+
+    StoredTabletFile metaFile = null;
+    try {
+
+      TabletLogger.compacting(getExtent(), job, localCompactionCfg);
+
+      metaFile = CompactableUtils.compact(tablet, job, jobFiles, compactionId, propogateDeletes,
+          localHelper, iters, new CompactionCheck(service, job.getKind(), checkCompactionId));
+
+      TabletLogger.compacted(getExtent(), job, metaFile);
+
+    } catch (CompactionCanceledException cce) {
+      log.debug("Compaction canceled {} ", getExtent());
+      metaFile = null;
+    } catch (Exception e) {
+      metaFile = null;
+      throw new RuntimeException(e);
+    } finally {
+      synchronized (this) {
+        Preconditions.checkState(allCompactingFiles.removeAll(jobFiles));
+        Preconditions.checkState(runnningJobs.remove(job));
+        compactionRunning = !allCompactingFiles.isEmpty();
+
+        if (allCompactingFiles.isEmpty()) {
+          notifyAll();
+        }
+
+        if (metaFile != null) {
+          choppedFiles.add(metaFile);
+        }
+      }
+
+      checkifChopComplete(tablet.getDatafiles().keySet());
+
+      if ((job.getKind() == CompactionKind.USER || job.getKind() == CompactionKind.SELECTOR)
+          && metaFile != null)
+        selectedCompactionCompleted(job, jobFiles, metaFile);
+      else
+        selectFiles();
+    }
+  }
+
+  @Override
+  public CompactionServiceId getConfiguredService(CompactionKind kind) {
+
+    var dispatcher = tablet.getTableConfiguration().getCompactionDispatcher();
+
+    Map<String,String> tmpHints = Map.of();
+
+    if (kind == CompactionKind.USER) {
+      synchronized (this) {
+        if (selectStatus != SpecialStatus.NOT_ACTIVE && selectStatus != SpecialStatus.CANCELED
+            && selectKind == CompactionKind.USER) {
+          tmpHints = compactionConfig.getExecutionHints();
+        }
+      }
+    }
+
+    var hints = tmpHints;
+
+    var directives = dispatcher.dispatch(new DispatchParameters() {
+
+      @Override
+      public ServiceEnvironment getServiceEnv() {
+        return new ServiceEnvironmentImpl(tablet.getContext());
+      }
+
+      @Override
+      public Map<String,String> getExecutionHints() {
+        return hints;
+      }
+
+      @Override
+      public CompactionKind getCompactionKind() {
+        return kind;
+      }
+
+      @Override
+      public CompactionServices getCompactionServices() {
+        return manager.getServices();
+      }
+    });
+
+    return directives.getService();
+  }
+
+  @Override
+  public double getCompactionRatio() {
+    return tablet.getTableConfiguration().getFraction(Property.TABLE_MAJC_RATIO);
+  }
+
+  public boolean isMajorCompactionRunning() {
+    // this method intentionally not synchronized because its called by stats code.
+    return compactionRunning;
+  }
+
+  public boolean isMajorCompactionQueued() {
+    return manager.isCompactionQueued(getExtent(), servicesInUse.get());
+  }
+
+  /**
+   * Interrupts and waits for any running compactions. After this method returns no compactions
+   * should be running and none should be able to start.
+   */
+  public synchronized void close() {
+    if (closed)
+      return;
+
+    closed = true;
+
+    while (!allCompactingFiles.isEmpty()) {
+      try {
+        wait(50);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+        throw new RuntimeException(e);
+      }
+    }
+  }
+}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableUtils.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableUtils.java
new file mode 100644
index 0000000..2635dda
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableUtils.java
@@ -0,0 +1,602 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.tserver.tablet;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.concurrent.ExecutionException;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.PluginEnvironment;
+import org.apache.accumulo.core.client.admin.CompactionConfig;
+import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
+import org.apache.accumulo.core.client.admin.PluginConfig;
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer;
+import org.apache.accumulo.core.client.admin.compaction.CompactionSelector;
+import org.apache.accumulo.core.client.admin.compaction.CompactionSelector.Selection;
+import org.apache.accumulo.core.client.sample.SamplerConfiguration;
+import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
+import org.apache.accumulo.core.client.summary.Summary;
+import org.apache.accumulo.core.clientImpl.CompactionStrategyConfigUtil;
+import org.apache.accumulo.core.clientImpl.UserCompactionUtils;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.ConfigurationCopy;
+import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.dataImpl.KeyExtent;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.file.FileSKVIterator;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.metadata.CompactableFileImpl;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
+import org.apache.accumulo.core.metadata.TabletFile;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
+import org.apache.accumulo.core.spi.cache.BlockCache;
+import org.apache.accumulo.core.spi.compaction.CompactionJob;
+import org.apache.accumulo.core.spi.compaction.CompactionKind;
+import org.apache.accumulo.core.summary.Gatherer;
+import org.apache.accumulo.core.summary.SummarizerFactory;
+import org.apache.accumulo.core.summary.SummaryCollection;
+import org.apache.accumulo.core.summary.SummaryReader;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.core.util.ratelimit.RateLimiter;
+import org.apache.accumulo.server.ServiceEnvironmentImpl;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.accumulo.tserver.compaction.CompactionPlan;
+import org.apache.accumulo.tserver.compaction.CompactionStrategy;
+import org.apache.accumulo.tserver.compaction.MajorCompactionReason;
+import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
+import org.apache.accumulo.tserver.compaction.WriteParameters;
+import org.apache.accumulo.tserver.tablet.CompactableImpl.CompactionCheck;
+import org.apache.accumulo.tserver.tablet.CompactableImpl.CompactionHelper;
+import org.apache.accumulo.tserver.tablet.Compactor.CompactionCanceledException;
+import org.apache.accumulo.tserver.tablet.Compactor.CompactionEnv;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.collect.Collections2;
+
+@SuppressWarnings("removal")
+public class CompactableUtils {
+
+  private static final Logger log = LoggerFactory.getLogger(CompactableUtils.class);
+
+  private final static Cache<TableId,Boolean> strategyWarningsCache =
+      CacheBuilder.newBuilder().maximumSize(1000).build();
+
+  public static Map<StoredTabletFile,Pair<Key,Key>> getFirstAndLastKeys(Tablet tablet,
+      Set<StoredTabletFile> allFiles) throws IOException {
+    final Map<StoredTabletFile,Pair<Key,Key>> result = new HashMap<>();
+    final FileOperations fileFactory = FileOperations.getInstance();
+    final VolumeManager fs = tablet.getTabletServer().getFileSystem();
+    for (StoredTabletFile file : allFiles) {
+      FileSystem ns = fs.getFileSystemByPath(file.getPath());
+      try (FileSKVIterator openReader = fileFactory.newReaderBuilder()
+          .forFile(file.getPathStr(), ns, ns.getConf(), tablet.getContext().getCryptoService())
+          .withTableConfiguration(tablet.getTableConfiguration()).seekToBeginning().build()) {
+        Key first = openReader.getFirstKey();
+        Key last = openReader.getLastKey();
+        result.put(file, new Pair<>(first, last));
+      }
+    }
+    return result;
+  }
+
+  public static Set<StoredTabletFile> findChopFiles(KeyExtent extent,
+      Map<StoredTabletFile,Pair<Key,Key>> firstAndLastKeys, Collection<StoredTabletFile> allFiles) {
+    Set<StoredTabletFile> result = new HashSet<>();
+
+    for (StoredTabletFile file : allFiles) {
+      Pair<Key,Key> pair = firstAndLastKeys.get(file);
+      Key first = pair.getFirst();
+      Key last = pair.getSecond();
+      // If first and last are null, it's an empty file. Add it to the compact set so it goes
+      // away.
+      if ((first == null && last == null) || (first != null && !extent.contains(first.getRow()))
+          || (last != null && !extent.contains(last.getRow()))) {
+        result.add(file);
+      }
+
+    }
+    return result;
+  }
+
+  static CompactionPlan selectFiles(CompactionKind kind, Tablet tablet,
+      SortedMap<StoredTabletFile,DataFileValue> datafiles, CompactionStrategyConfig csc) {
+
+    var trsm = tablet.getTabletResources().getTabletServerResourceManager();
+
+    BlockCache sc = trsm.getSummaryCache();
+    BlockCache ic = trsm.getIndexCache();
+    Cache<String,Long> fileLenCache = trsm.getFileLenCache();
+    MajorCompactionRequest request = new MajorCompactionRequest(tablet.getExtent(),
+        CompactableUtils.from(kind), tablet.getTabletServer().getFileSystem(),
+        tablet.getTableConfiguration(), sc, ic, fileLenCache, tablet.getContext());
+
+    request.setFiles(datafiles);
+
+    CompactionStrategy strategy = CompactableUtils.newInstance(tablet.getTableConfiguration(),
+        csc.getClassName(), CompactionStrategy.class);
+    strategy.init(csc.getOptions());
+
+    try {
+      if (strategy.shouldCompact(request)) {
+        strategy.gatherInformation(request);
+        var plan = strategy.getCompactionPlan(request);
+
+        if (plan == null)
+          return new CompactionPlan();
+
+        log.debug("Selected files using compaction strategy {} {} {} {}",
+            strategy.getClass().getSimpleName(), csc.getOptions(), plan.inputFiles,
+            plan.deleteFiles);
+
+        plan.validate(datafiles.keySet());
+
+        return plan;
+      }
+    } catch (IOException e) {
+      throw new UncheckedIOException(e);
+    }
+    return new CompactionPlan();
+  }
+
+  static AccumuloConfiguration createCompactionConfiguration(AccumuloConfiguration base,
+      WriteParameters p) {
+    if (p == null)
+      return base;
+
+    ConfigurationCopy result = new ConfigurationCopy(base);
+    if (p.getHdfsBlockSize() > 0) {
+      result.set(Property.TABLE_FILE_BLOCK_SIZE, "" + p.getHdfsBlockSize());
+    }
+    if (p.getBlockSize() > 0) {
+      result.set(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE, "" + p.getBlockSize());
+    }
+    if (p.getIndexBlockSize() > 0) {
+      result.set(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX, "" + p.getIndexBlockSize());
+    }
+    if (p.getCompressType() != null) {
+      result.set(Property.TABLE_FILE_COMPRESSION_TYPE, p.getCompressType());
+    }
+    if (p.getReplication() != 0) {
+      result.set(Property.TABLE_FILE_REPLICATION, "" + p.getReplication());
+    }
+    return result;
+  }
+
+  static AccumuloConfiguration createCompactionConfiguration(Tablet tablet,
+      Set<CompactableFile> files) {
+    var tconf = tablet.getTableConfiguration();
+
+    var configurorClass = tconf.get(Property.TABLE_COMPACTION_CONFIGURER);
+    if (configurorClass == null || configurorClass.isBlank()) {
+      return tconf;
+    }
+
+    var opts = tconf.getAllPropertiesWithPrefixStripped(Property.TABLE_COMPACTION_CONFIGURER_OPTS);
+
+    return createCompactionConfiguration(tablet, files, new PluginConfig(configurorClass, opts));
+  }
+
+  static AccumuloConfiguration createCompactionConfiguration(Tablet tablet,
+      Set<CompactableFile> files, PluginConfig cfg) {
+    CompactionConfigurer configurer = CompactableUtils.newInstance(tablet.getTableConfiguration(),
+        cfg.getClassName(), CompactionConfigurer.class);
+
+    configurer.init(new CompactionConfigurer.InitParamaters() {
+      @Override
+      public Map<String,String> getOptions() {
+        return cfg.getOptions();
+      }
+
+      @Override
+      public PluginEnvironment getEnvironment() {
+        return new ServiceEnvironmentImpl(tablet.getContext());
+      }
+
+      @Override
+      public TableId getTableId() {
+        return tablet.getExtent().getTableId();
+      }
+    });
+
+    var overrides = configurer.override(new CompactionConfigurer.InputParameters() {
+      @Override
+      public Collection<CompactableFile> getInputFiles() {
+        return files;
+      }
+
+      @Override
+      public PluginEnvironment getEnvironment() {
+        return new ServiceEnvironmentImpl(tablet.getContext());
+      }
+
+      @Override
+      public TableId getTableId() {
+        return tablet.getExtent().getTableId();
+      }
+    });
+
+    if (overrides.getOverrides().isEmpty()) {
+      return tablet.getTableConfiguration();
+    }
+
+    ConfigurationCopy result = new ConfigurationCopy(tablet.getTableConfiguration());
+    overrides.getOverrides().forEach(result::set);
+    return result;
+  }
+
+  static <T> T newInstance(AccumuloConfiguration tableConfig, String className,
+      Class<T> baseClass) {
+    String context = tableConfig.get(Property.TABLE_CLASSPATH);
+    try {
+      return ConfigurationTypeHelper.getClassInstance(context, className, baseClass);
+    } catch (IOException | ReflectiveOperationException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  static Set<StoredTabletFile> selectFiles(Tablet tablet,
+      SortedMap<StoredTabletFile,DataFileValue> datafiles, PluginConfig selectorConfig) {
+
+    CompactionSelector selector = newInstance(tablet.getTableConfiguration(),
+        selectorConfig.getClassName(), CompactionSelector.class);
+    selector.init(new CompactionSelector.InitParamaters() {
+
+      @Override
+      public Map<String,String> getOptions() {
+        return selectorConfig.getOptions();
+      }
+
+      @Override
+      public PluginEnvironment getEnvironment() {
+        return new ServiceEnvironmentImpl(tablet.getContext());
+      }
+
+      @Override
+      public TableId getTableId() {
+        return tablet.getExtent().getTableId();
+      }
+    });
+
+    Selection selection = selector.select(new CompactionSelector.SelectionParameters() {
+
+      @Override
+      public PluginEnvironment getEnvironment() {
+        return new ServiceEnvironmentImpl(tablet.getContext());
+      }
+
+      @Override
+      public Collection<CompactableFile> getAvailableFiles() {
+        return Collections2.transform(datafiles.entrySet(),
+            e -> new CompactableFileImpl(e.getKey(), e.getValue()));
+      }
+
+      @Override
+      public Collection<Summary> getSummaries(Collection<CompactableFile> files,
+          Predicate<SummarizerConfiguration> summarySelector) {
+
+        var context = tablet.getContext();
+        var tsrm = tablet.getTabletResources().getTabletServerResourceManager();
+
+        SummaryCollection sc = new SummaryCollection();
+        SummarizerFactory factory = new SummarizerFactory(tablet.getTableConfiguration());
+        for (CompactableFile cf : files) {
+          var file = CompactableFileImpl.toStoredTabletFile(cf);
+          FileSystem fs = context.getVolumeManager().getFileSystemByPath(file.getPath());
+          Configuration conf = context.getHadoopConf();
+          SummaryCollection fsc = SummaryReader
+              .load(fs, conf, factory, file.getPath(), summarySelector, tsrm.getSummaryCache(),
+                  tsrm.getIndexCache(), tsrm.getFileLenCache(), context.getCryptoService())
+              .getSummaries(Collections.singletonList(new Gatherer.RowRange(tablet.getExtent())));
+          sc.merge(fsc, factory);
+        }
+
+        return sc.getSummaries();
+      }
+
+      @Override
+      public TableId getTableId() {
+        return tablet.getExtent().getTableId();
+      }
+
+      @Override
+      public Optional<SortedKeyValueIterator<Key,Value>> getSample(CompactableFile file,
+          SamplerConfiguration sc) {
+        try {
+          FileOperations fileFactory = FileOperations.getInstance();
+          Path path = new Path(file.getUri());
+          FileSystem ns = tablet.getTabletServer().getFileSystem().getFileSystemByPath(path);
+          var fiter = fileFactory.newReaderBuilder()
+              .forFile(path.toString(), ns, ns.getConf(), tablet.getContext().getCryptoService())
+              .withTableConfiguration(tablet.getTableConfiguration()).seekToBeginning().build();
+          return Optional.ofNullable(fiter.getSample(new SamplerConfigurationImpl(sc)));
+        } catch (IOException e) {
+          throw new UncheckedIOException(e);
+        }
+      }
+    });
+
+    return selection.getFilesToCompact().stream().map(CompactableFileImpl::toStoredTabletFile)
+        .collect(Collectors.toSet());
+  }
+
+  private static final class TableCompactionHelper implements CompactionHelper {
+    private final PluginConfig cselCfg2;
+    private final CompactionStrategyConfig stratCfg2;
+    private final Tablet tablet;
+    private WriteParameters wp;
+    private Set<StoredTabletFile> filesToDrop;
+
+    private TableCompactionHelper(PluginConfig cselCfg2, CompactionStrategyConfig stratCfg2,
+        Tablet tablet) {
+      this.cselCfg2 = cselCfg2;
+      this.stratCfg2 = stratCfg2;
+      this.tablet = tablet;
+    }
+
+    @Override
+    public Set<StoredTabletFile> selectFiles(SortedMap<StoredTabletFile,DataFileValue> allFiles) {
+      if (cselCfg2 != null) {
+        filesToDrop = Set.of();
+        return CompactableUtils.selectFiles(tablet, allFiles, cselCfg2);
+      } else {
+        var plan =
+            CompactableUtils.selectFiles(CompactionKind.SELECTOR, tablet, allFiles, stratCfg2);
+        this.wp = plan.writeParameters;
+        filesToDrop = Set.copyOf(plan.deleteFiles);
+        return Set.copyOf(plan.inputFiles);
+      }
+    }
+
+    @Override
+    public AccumuloConfiguration override(AccumuloConfiguration conf, Set<CompactableFile> files) {
+      if (wp != null) {
+        return createCompactionConfiguration(conf, wp);
+      }
+
+      return null;
+    }
+
+    @Override
+    public Set<StoredTabletFile> getFilesToDrop() {
+      Preconditions.checkState(filesToDrop != null);
+      return filesToDrop;
+    }
+  }
+
+  private static final class UserCompactionHelper implements CompactionHelper {
+    private final CompactionConfig compactionConfig;
+    private final Tablet tablet;
+    private final Long compactionId;
+    private WriteParameters wp;
+    private Set<StoredTabletFile> filesToDrop;
+
+    private UserCompactionHelper(CompactionConfig compactionConfig, Tablet tablet,
+        Long compactionId) {
+      this.compactionConfig = compactionConfig;
+      this.tablet = tablet;
+      this.compactionId = compactionId;
+    }
+
+    @Override
+    public Set<StoredTabletFile> selectFiles(SortedMap<StoredTabletFile,DataFileValue> allFiles) {
+
+      Set<StoredTabletFile> selectedFiles;
+
+      if (!CompactionStrategyConfigUtil.isDefault(compactionConfig.getCompactionStrategy())) {
+        var plan = CompactableUtils.selectFiles(CompactionKind.USER, tablet, allFiles,
+            compactionConfig.getCompactionStrategy());
+        this.wp = plan.writeParameters;
+        selectedFiles = Set.copyOf(plan.inputFiles);
+        filesToDrop = Set.copyOf(plan.deleteFiles);
+      } else if (!UserCompactionUtils.isDefault(compactionConfig.getSelector())) {
+        selectedFiles =
+            CompactableUtils.selectFiles(tablet, allFiles, compactionConfig.getSelector());
+        filesToDrop = Set.of();
+      } else {
+        selectedFiles = allFiles.keySet();
+        filesToDrop = Set.of();
+      }
+
+      if (selectedFiles.isEmpty()) {
+        tablet.setLastCompactionID(compactionId);
+
+        MetadataTableUtil.updateTabletCompactID(tablet.getExtent(), compactionId,
+            tablet.getTabletServer().getContext(), tablet.getTabletServer().getLock());
+      }
+
+      return selectedFiles;
+    }
+
+    @Override
+    public AccumuloConfiguration override(AccumuloConfiguration conf, Set<CompactableFile> files) {
+      if (!UserCompactionUtils.isDefault(compactionConfig.getConfigurer())) {
+        return createCompactionConfiguration(tablet, files, compactionConfig.getConfigurer());
+      } else if (!CompactionStrategyConfigUtil.isDefault(compactionConfig.getCompactionStrategy())
+          && wp != null) {
+        return createCompactionConfiguration(conf, wp);
+      }
+
+      return null;
+    }
+
+    @Override
+    public Set<StoredTabletFile> getFilesToDrop() {
+      Preconditions.checkState(filesToDrop != null);
+      return filesToDrop;
+    }
+  }
+
+  public static CompactionHelper getHelper(CompactionKind kind, Tablet tablet, Long compactionId,
+      CompactionConfig compactionConfig) {
+    if (kind == CompactionKind.USER) {
+      return new UserCompactionHelper(compactionConfig, tablet, compactionId);
+    } else if (kind == CompactionKind.SELECTOR) {
+      var tconf = tablet.getTableConfiguration();
+      var selectorClassName = tconf.get(Property.TABLE_COMPACTION_SELECTOR);
+
+      PluginConfig cselCfg = null;
+
+      if (selectorClassName != null && !selectorClassName.isBlank()) {
+        var opts =
+            tconf.getAllPropertiesWithPrefixStripped(Property.TABLE_COMPACTION_SELECTOR_OPTS);
+        cselCfg = new PluginConfig(selectorClassName, opts);
+      }
+
+      CompactionStrategyConfig stratCfg = null;
+
+      if (cselCfg == null && tconf.isPropertySet(Property.TABLE_COMPACTION_STRATEGY, true)) {
+        var stratClassName = tconf.get(Property.TABLE_COMPACTION_STRATEGY);
+
+        try {
+          strategyWarningsCache.get(tablet.getExtent().getTableId(), () -> {
+            log.warn(
+                "Table id {} set {} to {}.  Compaction strategies are deprecated.  See the Javadoc"
+                    + " for class {} for more details.",
+                tablet.getExtent().getTableId(), Property.TABLE_COMPACTION_STRATEGY.getKey(),
+                stratClassName, CompactionStrategyConfig.class.getName());
+            return true;
+          });
+        } catch (ExecutionException e) {
+          throw new RuntimeException(e);
+        }
+
+        var opts =
+            tconf.getAllPropertiesWithPrefixStripped(Property.TABLE_COMPACTION_STRATEGY_PREFIX);
+
+        stratCfg = new CompactionStrategyConfig(stratClassName).setOptions(opts);
+      }
+
+      if (cselCfg != null || stratCfg != null) {
+        return new TableCompactionHelper(cselCfg, stratCfg, tablet);
+      }
+    }
+
+    return null;
+  }
+
+  public static AccumuloConfiguration getCompactionConfig(CompactionKind kind, Tablet tablet,
+      CompactionHelper driver, Set<CompactableFile> files) {
+    if (kind == CompactionKind.USER || kind == CompactionKind.SELECTOR) {
+      var oconf = driver.override(tablet.getTableConfiguration(), files);
+      if (oconf != null)
+        return oconf;
+    }
+
+    return createCompactionConfiguration(tablet, files);
+  }
+
+  static StoredTabletFile compact(Tablet tablet, CompactionJob job, Set<StoredTabletFile> jobFiles,
+      Long compactionId, boolean propogateDeletes, CompactableImpl.CompactionHelper helper,
+      List<IteratorSetting> iters, CompactionCheck compactionCheck)
+      throws IOException, CompactionCanceledException {
+    StoredTabletFile metaFile;
+    CompactionEnv cenv = new CompactionEnv() {
+      @Override
+      public boolean isCompactionEnabled(long entriesCompacted) {
+        return compactionCheck.isCompactionEnabled(entriesCompacted);
+      }
+
+      @Override
+      public IteratorScope getIteratorScope() {
+        return IteratorScope.majc;
+      }
+
+      @Override
+      public RateLimiter getReadLimiter() {
+        return tablet.getTabletServer().getMajorCompactionReadLimiter();
+      }
+
+      @Override
+      public RateLimiter getWriteLimiter() {
+        return tablet.getTabletServer().getMajorCompactionWriteLimiter();
+      }
+    };
+
+    int reason = job.getKind().ordinal();
+
+    AccumuloConfiguration tableConfig =
+        getCompactionConfig(job.getKind(), tablet, helper, job.getFiles());
+
+    SortedMap<StoredTabletFile,DataFileValue> allFiles = tablet.getDatafiles();
+    HashMap<StoredTabletFile,DataFileValue> compactFiles = new HashMap<>();
+    jobFiles.forEach(file -> compactFiles.put(file, allFiles.get(file)));
+
+    TabletFile newFile = tablet.getNextMapFilename(!propogateDeletes ? "A" : "C");
+    TabletFile compactTmpName = new TabletFile(new Path(newFile.getMetaInsert() + "_tmp"));
+
+    Compactor compactor = new Compactor(tablet.getContext(), tablet, compactFiles, null,
+        compactTmpName, propogateDeletes, cenv, iters, reason, tableConfig);
+
+    var mcs = compactor.call();
+
+    if (job.getKind() == CompactionKind.USER || job.getKind() == CompactionKind.SELECTOR) {
+      helper.getFilesToDrop().forEach(f -> {
+        if (allFiles.containsKey(f)) {
+          compactFiles.put(f, allFiles.get(f));
+        }
+      });
+    }
+
+    metaFile = tablet.getDatafileManager().bringMajorCompactionOnline(compactFiles.keySet(),
+        compactTmpName, newFile, compactionId,
+        new DataFileValue(mcs.getFileSize(), mcs.getEntriesWritten()));
+    return metaFile;
+  }
+
+  public static MajorCompactionReason from(CompactionKind ck) {
+    switch (ck) {
+      case CHOP:
+        return MajorCompactionReason.CHOP;
+      case SYSTEM:
+      case SELECTOR:
+        return MajorCompactionReason.NORMAL;
+      case USER:
+        return MajorCompactionReason.USER;
+      default:
+        throw new IllegalArgumentException("Unknown kind " + ck);
+    }
+  }
+}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactionInfo.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactionInfo.java
index c07845a..0c6a50d 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactionInfo.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactionInfo.java
@@ -107,10 +107,8 @@ public class CompactionInfo {
         case CHOP:
           reason = CompactionReason.CHOP;
           break;
-        case IDLE:
-          reason = CompactionReason.IDLE;
-          break;
-        case NORMAL:
+        case SELECTOR:
+        case SYSTEM:
         default:
           reason = CompactionReason.SYSTEM;
           break;
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactionRunner.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactionRunner.java
deleted file mode 100644
index 9da266a..0000000
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactionRunner.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.tserver.tablet;
-
-import java.util.Objects;
-
-import org.apache.accumulo.tserver.compaction.MajorCompactionReason;
-
-final class CompactionRunner implements Runnable, Comparable<CompactionRunner> {
-
-  private final Tablet tablet;
-  private final MajorCompactionReason reason;
-  private final long queued;
-
-  public CompactionRunner(Tablet tablet, MajorCompactionReason reason) {
-    this.tablet = tablet;
-    queued = System.currentTimeMillis();
-    this.reason = reason;
-  }
-
-  @Override
-  public void run() {
-    CompactionStats stats = tablet.majorCompact(reason, queued);
-
-    // Some compaction strategies may always return true for shouldCompact() because they need to
-    // make blocking calls to gather information. Without the following check these strategies would
-    // endlessly requeue. So only check if a subsequent compaction is needed if the previous
-    // compaction actually did something.
-    if (stats != null && stats.getEntriesRead() > 0) {
-      // if there is more work to be done, queue another major compaction
-      synchronized (tablet) {
-        if (reason == MajorCompactionReason.NORMAL && tablet.needsMajorCompaction(reason))
-          tablet.initiateMajorCompaction(reason);
-      }
-    }
-  }
-
-  // We used to synchronize on the Tablet before fetching this information,
-  // but this method is called by the compaction queue thread to re-order the compactions.
-  // The compaction queue holds a lock during this sort.
-  // A tablet lock can be held while putting itself on the queue, so we can't lock the tablet
-  // while pulling information used to sort the tablets in the queue, or we may get deadlocked.
-  // See ACCUMULO-1110.
-  private int getNumFiles() {
-    return tablet.getDatafileManager().getNumFiles();
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hashCode(reason) + Objects.hashCode(queued) + getNumFiles();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    return this == obj || (obj != null && obj instanceof CompactionRunner
-        && compareTo((CompactionRunner) obj) == 0);
-  }
-
-  @Override
-  public int compareTo(CompactionRunner o) {
-    int cmp = reason.compareTo(o.reason);
-    if (cmp != 0)
-      return cmp;
-
-    if (reason == MajorCompactionReason.USER || reason == MajorCompactionReason.CHOP) {
-      // for these types of compactions want to do the oldest first
-      cmp = (int) (queued - o.queued);
-      if (cmp != 0)
-        return cmp;
-    }
-
-    return o.getNumFiles() - this.getNumFiles();
-  }
-}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
index fd6df84..d5aaef0 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
@@ -52,6 +52,7 @@ import org.apache.accumulo.core.iteratorsImpl.system.TimeSettingIterator;
 import org.apache.accumulo.core.metadata.StoredTabletFile;
 import org.apache.accumulo.core.metadata.TabletFile;
 import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.spi.compaction.CompactionKind;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
 import org.apache.accumulo.core.util.LocalityGroupUtil.LocalityGroupConfigurationError;
 import org.apache.accumulo.core.util.ratelimit.RateLimiter;
@@ -64,7 +65,6 @@ import org.apache.accumulo.server.problems.ProblemType;
 import org.apache.accumulo.tserver.InMemoryMap;
 import org.apache.accumulo.tserver.MinorCompactionReason;
 import org.apache.accumulo.tserver.TabletIteratorEnvironment;
-import org.apache.accumulo.tserver.compaction.MajorCompactionReason;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
@@ -81,7 +81,7 @@ public class Compactor implements Callable<CompactionStats> {
 
   public interface CompactionEnv {
 
-    boolean isCompactionEnabled();
+    boolean isCompactionEnabled(long entriesCompacted);
 
     IteratorScope getIteratorScope();
 
@@ -177,8 +177,8 @@ public class Compactor implements Callable<CompactionStats> {
     return outputFile.toString();
   }
 
-  MajorCompactionReason getMajorCompactionReason() {
-    return MajorCompactionReason.values()[reason];
+  CompactionKind getMajorCompactionReason() {
+    return CompactionKind.values()[reason];
   }
 
   protected Map<String,Set<ByteSequence>> getLocalityGroups(AccumuloConfiguration acuTableConf)
@@ -377,7 +377,7 @@ public class Compactor implements Callable<CompactionStats> {
       }
 
       try (TraceScope write = Trace.startSpan("write")) {
-        while (itr.hasTop() && env.isCompactionEnabled()) {
+        while (itr.hasTop() && env.isCompactionEnabled(entriesCompacted)) {
           mfw.append(itr.getTopKey(), itr.getTopValue());
           itr.next();
           entriesCompacted++;
@@ -388,7 +388,7 @@ public class Compactor implements Callable<CompactionStats> {
           }
         }
 
-        if (itr.hasTop() && !env.isCompactionEnabled()) {
+        if (itr.hasTop() && !env.isCompactionEnabled(entriesCompacted)) {
           // cancel major compaction operation
           try {
             try {
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
index 0639610..3937d1e 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
@@ -33,7 +33,6 @@ import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.dataImpl.KeyExtent;
 import org.apache.accumulo.core.logging.TabletLogger;
@@ -56,13 +55,14 @@ import org.apache.htrace.TraceScope;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Preconditions;
+
 class DatafileManager {
   private final Logger log = LoggerFactory.getLogger(DatafileManager.class);
   // access to datafilesizes needs to be synchronized: see CompactionRunner#getNumFiles
   private final Map<StoredTabletFile,DataFileValue> datafileSizes =
       Collections.synchronizedMap(new TreeMap<>());
   private final Tablet tablet;
-  private Long maxMergingMinorCompactionFileSize;
 
   // ensure we only have one reader/writer of our bulk file notes at at time
   private final Object bulkFileImportLock = new Object();
@@ -74,15 +74,12 @@ class DatafileManager {
     this.tablet = tablet;
   }
 
-  private TabletFile mergingMinorCompactionFile = null;
   private final Set<TabletFile> filesToDeleteAfterScan = new HashSet<>();
   private final Map<Long,Set<StoredTabletFile>> scanFileReservations = new HashMap<>();
   private final MapCounter<StoredTabletFile> fileScanReferenceCounts = new MapCounter<>();
   private long nextScanReservationId = 0;
   private boolean reservationsBlocked = false;
 
-  private final Set<TabletFile> majorCompactingFiles = new HashSet<>();
-
   static void rename(VolumeManager fs, Path src, Path dst) throws IOException {
     if (!fs.rename(src, dst)) {
       throw new IOException("Rename " + src + " to " + dst + " returned false ");
@@ -198,8 +195,8 @@ class DatafileManager {
     return inUse;
   }
 
-  public void importMapFiles(long tid, Map<TabletFile,DataFileValue> paths, boolean setTime)
-      throws IOException {
+  public Collection<StoredTabletFile> importMapFiles(long tid, Map<TabletFile,DataFileValue> paths,
+      boolean setTime) throws IOException {
 
     String bulkDir = null;
     // once tablet files are inserted into the metadata they will become StoredTabletFiles
@@ -264,68 +261,12 @@ class DatafileManager {
     for (Entry<StoredTabletFile,DataFileValue> entry : newFiles.entrySet()) {
       TabletLogger.bulkImported(tablet.getExtent(), entry.getKey());
     }
-  }
-
-  StoredTabletFile reserveMergingMinorCompactionFile() {
-    if (mergingMinorCompactionFile != null)
-      throw new IllegalStateException(
-          "Tried to reserve merging minor compaction file when already reserved  : "
-              + mergingMinorCompactionFile);
-
-    if (tablet.getExtent().isRootTablet())
-      return null;
-
-    int maxFiles = tablet.getTableConfiguration().getMaxFilesPerTablet();
-
-    // when a major compaction is running and we are at max files, write out
-    // one extra file... want to avoid the case where major compaction is
-    // compacting everything except for the largest file, and therefore the
-    // largest file is returned for merging.. the following check mostly
-    // avoids this case, except for the case where major compactions fail or
-    // are canceled
-    if (!majorCompactingFiles.isEmpty() && datafileSizes.size() == maxFiles)
-      return null;
-
-    if (datafileSizes.size() >= maxFiles) {
-      // find the smallest file
-
-      long maxFileSize = Long.MAX_VALUE;
-      maxMergingMinorCompactionFileSize = ConfigurationTypeHelper.getFixedMemoryAsBytes(
-          tablet.getTableConfiguration().get(Property.TABLE_MINC_MAX_MERGE_FILE_SIZE));
-      if (maxMergingMinorCompactionFileSize > 0) {
-        maxFileSize = maxMergingMinorCompactionFileSize;
-      }
-      long min = maxFileSize;
-      StoredTabletFile minName = null;
-
-      for (Entry<StoredTabletFile,DataFileValue> entry : datafileSizes.entrySet()) {
-        if (entry.getValue().getSize() <= min && !majorCompactingFiles.contains(entry.getKey())) {
-          min = entry.getValue().getSize();
-          minName = entry.getKey();
-        }
-      }
-
-      if (minName == null)
-        return null;
-
-      mergingMinorCompactionFile = minName;
-      return minName;
-    }
 
-    return null;
+    return newFiles.keySet();
   }
 
-  void unreserveMergingMinorCompactionFile(TabletFile file) {
-    if ((file == null && mergingMinorCompactionFile != null)
-        || (file != null && mergingMinorCompactionFile == null) || (file != null
-            && mergingMinorCompactionFile != null && !file.equals(mergingMinorCompactionFile)))
-      throw new IllegalStateException("Disagreement " + file + " " + mergingMinorCompactionFile);
-
-    mergingMinorCompactionFile = null;
-  }
-
-  void bringMinorCompactionOnline(TabletFile tmpDatafile, TabletFile newDatafile,
-      StoredTabletFile absMergeFile, DataFileValue dfv, CommitSession commitSession, long flushId) {
+  StoredTabletFile bringMinorCompactionOnline(TabletFile tmpDatafile, TabletFile newDatafile,
+      DataFileValue dfv, CommitSession commitSession, long flushId) {
     StoredTabletFile newFile;
     // rename before putting in metadata table, so files in metadata table should
     // always exist
@@ -362,25 +303,6 @@ class DatafileManager {
 
     long t1, t2;
 
-    // the code below always assumes merged files are in use by scans... this must be done
-    // because the in memory list of files is not updated until after the metadata table
-    // therefore the file is available to scans until memory is updated, but want to ensure
-    // the file is not available for garbage collection... if memory were updated
-    // before this point (like major compactions do), then the following code could wait
-    // for scans to finish like major compactions do.... used to wait for scans to finish
-    // here, but that was incorrect because a scan could start after waiting but before
-    // memory was updated... assuming the file is always in use by scans leads to
-    // one unneeded metadata update when it was not actually in use
-    Set<StoredTabletFile> filesInUseByScans = Collections.emptySet();
-    if (absMergeFile != null)
-      filesInUseByScans = Collections.singleton(absMergeFile);
-
-    // very important to write delete entries outside of log lock, because
-    // this metadata write does not go up... it goes sideways or to itself
-    if (absMergeFile != null)
-      MetadataTableUtil.addDeleteEntries(tablet.getExtent(),
-          Collections.singleton(absMergeFile.getMetaUpdateDelete()), tablet.getContext());
-
     Set<String> unusedWalLogs = tablet.beginClearingUnusedLogs();
     boolean replicate =
         ReplicationConfigurationUtil.isEnabled(tablet.getExtent(), tablet.getTableConfiguration());
@@ -407,8 +329,8 @@ class DatafileManager {
       // following metadata
       // write is made
 
-      newFile = tablet.updateTabletDataFile(commitSession.getMaxCommittedTime(), newDatafile,
-          absMergeFile, dfv, unusedWalLogs, filesInUseByScans, flushId);
+      newFile = tablet.updateTabletDataFile(commitSession.getMaxCommittedTime(), newDatafile, dfv,
+          unusedWalLogs, flushId);
 
       // Mark that we have data we want to replicate
       // This WAL could still be in use by other Tablets *from the same table*, so we can only mark
@@ -456,21 +378,12 @@ class DatafileManager {
         datafileSizes.put(newFile, dfv);
       }
 
-      if (absMergeFile != null) {
-        datafileSizes.remove(absMergeFile);
-      }
-
-      unreserveMergingMinorCompactionFile(absMergeFile);
-
       tablet.flushComplete(flushId);
 
       t2 = System.currentTimeMillis();
     }
 
-    // must do this after list of files in memory is updated above
-    removeFilesAfterScan(filesInUseByScans);
-
-    TabletLogger.flushed(tablet.getExtent(), absMergeFile, newDatafile);
+    TabletLogger.flushed(tablet.getExtent(), newDatafile);
 
     if (log.isTraceEnabled()) {
       log.trace(String.format("MinC finish lock %.2f secs %s", (t2 - t1) / 1000.0,
@@ -481,22 +394,8 @@ class DatafileManager {
       log.debug(String.format("Minor Compaction wrote out file larger than split threshold."
           + " split threshold = %,d  file size = %,d", splitSize, dfv.getSize()));
     }
-  }
-
-  public void reserveMajorCompactingFiles(Collection<StoredTabletFile> files) {
-    if (!majorCompactingFiles.isEmpty())
-      throw new IllegalStateException("Major compacting files not empty " + majorCompactingFiles);
 
-    if (mergingMinorCompactionFile != null && files.contains(mergingMinorCompactionFile))
-      throw new IllegalStateException(
-          "Major compaction tried to resrve file in use by minor compaction "
-              + mergingMinorCompactionFile);
-
-    majorCompactingFiles.addAll(files);
-  }
-
-  public void clearMajorCompactingFile() {
-    majorCompactingFiles.clear();
+    return newFile;
   }
 
   StoredTabletFile bringMajorCompactionOnline(Set<StoredTabletFile> oldDatafiles,
@@ -525,24 +424,22 @@ class DatafileManager {
     synchronized (tablet) {
       t1 = System.currentTimeMillis();
 
+      Preconditions.checkState(datafileSizes.keySet().containsAll(oldDatafiles),
+          "Compacted files %s are not a subset of tablet files %s", oldDatafiles,
+          datafileSizes.keySet());
+      if (dfv.getNumEntries() > 0) {
+        Preconditions.checkState(!datafileSizes.containsKey(newFile),
+            "New compaction file %s already exist in tablet files %s", newFile,
+            datafileSizes.keySet());
+      }
+
       tablet.incrementDataSourceDeletions();
 
-      // atomically remove old files and add new file
-      for (StoredTabletFile oldDatafile : oldDatafiles) {
-        if (!datafileSizes.containsKey(oldDatafile)) {
-          log.error("file does not exist in set {}", oldDatafile);
-        }
-        datafileSizes.remove(oldDatafile);
-        majorCompactingFiles.remove(oldDatafile);
-      }
+      datafileSizes.keySet().removeAll(oldDatafiles);
 
       if (dfv.getNumEntries() > 0) {
-        if (datafileSizes.containsKey(newFile)) {
-          log.error("Adding file that is already in set {}", newFile);
-        }
         datafileSizes.put(newFile, dfv);
         // could be used by a follow on compaction in a multipass compaction
-        majorCompactingFiles.add(newFile);
       }
 
       tablet.computeNumEntries();
@@ -567,7 +464,6 @@ class DatafileManager {
       log.trace(String.format("MajC finish lock %.2f secs", (t2 - t1) / 1000.0));
     }
 
-    TabletLogger.compacted(extent, oldDatafiles, newFile);
     return newFile;
   }
 
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactionTask.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactionTask.java
index 4750b5f..5209fe2 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactionTask.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactionTask.java
@@ -20,12 +20,10 @@ package org.apache.accumulo.tserver.tablet;
 
 import java.io.IOException;
 
-import org.apache.accumulo.core.metadata.StoredTabletFile;
 import org.apache.accumulo.core.metadata.TabletFile;
 import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.apache.accumulo.core.trace.TraceUtil;
 import org.apache.accumulo.tserver.MinorCompactionReason;
-import org.apache.accumulo.tserver.compaction.MajorCompactionReason;
 import org.apache.hadoop.fs.Path;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
@@ -41,18 +39,16 @@ class MinorCompactionTask implements Runnable {
   private long queued;
   private CommitSession commitSession;
   private DataFileValue stats;
-  private StoredTabletFile mergeFile;
   private long flushId;
   private MinorCompactionReason mincReason;
   private double tracePercent;
 
-  MinorCompactionTask(Tablet tablet, StoredTabletFile mergeFile, CommitSession commitSession,
-      long flushId, MinorCompactionReason mincReason, double tracePercent) {
+  MinorCompactionTask(Tablet tablet, CommitSession commitSession, long flushId,
+      MinorCompactionReason mincReason, double tracePercent) {
     this.tablet = tablet;
     queued = System.currentTimeMillis();
     tablet.minorCompactionWaitingToStart();
     this.commitSession = commitSession;
-    this.mergeFile = mergeFile;
     this.flushId = flushId;
     this.mincReason = mincReason;
     this.tracePercent = tracePercent;
@@ -64,7 +60,7 @@ class MinorCompactionTask implements Runnable {
     ProbabilitySampler sampler = TraceUtil.probabilitySampler(tracePercent);
     try {
       try (TraceScope minorCompaction = Trace.startSpan("minorCompaction", sampler)) {
-        TabletFile newFile = tablet.getNextMapFilename(mergeFile == null ? "F" : "M");
+        TabletFile newFile = tablet.getNextMapFilename("F");
         TabletFile tmpFile = new TabletFile(new Path(newFile.getPathStr() + "_tmp"));
         try (TraceScope span = Trace.startSpan("waitForCommits")) {
           synchronized (tablet) {
@@ -92,7 +88,7 @@ class MinorCompactionTask implements Runnable {
         }
         try (TraceScope span = Trace.startSpan("compact")) {
           this.stats = tablet.minorCompact(tablet.getTabletMemory().getMinCMemTable(), tmpFile,
-              newFile, mergeFile, queued, commitSession, flushId, mincReason);
+              newFile, queued, commitSession, flushId, mincReason);
         }
 
         if (minorCompaction.getSpan() != null) {
@@ -105,8 +101,6 @@ class MinorCompactionTask implements Runnable {
 
       if (tablet.needsSplit()) {
         tablet.getTabletServer().executeSplit(tablet);
-      } else {
-        tablet.initiateMajorCompaction(MajorCompactionReason.NORMAL);
       }
     } catch (Throwable t) {
       log.error("Unknown error during minor compaction for extent: " + tablet.getExtent(), t);
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
index be90f33..9657e5b 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
@@ -33,9 +33,7 @@ import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.StoredTabletFile;
 import org.apache.accumulo.core.metadata.TabletFile;
-import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
 import org.apache.accumulo.core.util.ratelimit.RateLimiter;
 import org.apache.accumulo.server.conf.TableConfiguration;
@@ -53,26 +51,15 @@ public class MinorCompactor extends Compactor {
 
   private static final Logger log = LoggerFactory.getLogger(MinorCompactor.class);
 
-  private static final Map<StoredTabletFile,DataFileValue> EMPTY_MAP = Collections.emptyMap();
-
-  private static Map<StoredTabletFile,DataFileValue> toFileMap(StoredTabletFile mergeFile,
-      DataFileValue dfv) {
-    if (mergeFile == null)
-      return EMPTY_MAP;
-
-    return Collections.singletonMap(mergeFile, dfv);
-  }
-
   private final TabletServer tabletServer;
 
   public MinorCompactor(TabletServer tabletServer, Tablet tablet, InMemoryMap imm,
-      StoredTabletFile mergeFile, DataFileValue dfv, TabletFile outputFile,
-      MinorCompactionReason mincReason, TableConfiguration tableConfig) {
-    super(tabletServer.getContext(), tablet, toFileMap(mergeFile, dfv), imm, outputFile, true,
+      TabletFile outputFile, MinorCompactionReason mincReason, TableConfiguration tableConfig) {
+    super(tabletServer.getContext(), tablet, Collections.emptyMap(), imm, outputFile, true,
         new CompactionEnv() {
 
           @Override
-          public boolean isCompactionEnabled() {
+          public boolean isCompactionEnabled(long entriesCompacted) {
             return true;
           }
 
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
index 48eb6b5..d0f2050 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
@@ -27,17 +27,14 @@ import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.UncheckedIOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.PriorityQueue;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
@@ -50,13 +47,11 @@ import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Durability;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
+import org.apache.accumulo.core.client.admin.CompactionConfig;
 import org.apache.accumulo.core.clientImpl.DurabilityImpl;
 import org.apache.accumulo.core.clientImpl.Tables;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.clientImpl.UserCompactionUtils;
 import org.apache.accumulo.core.conf.AccumuloConfiguration.Deriver;
-import org.apache.accumulo.core.conf.ConfigurationCopy;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.constraints.Violations;
 import org.apache.accumulo.core.data.ByteSequence;
@@ -68,15 +63,12 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.dataImpl.KeyExtent;
 import org.apache.accumulo.core.dataImpl.thrift.MapFileInfo;
 import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVIterator;
 import org.apache.accumulo.core.iterators.IterationInterruptedException;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.YieldCallback;
 import org.apache.accumulo.core.iteratorsImpl.system.SourceSwitchingIterator;
 import org.apache.accumulo.core.logging.TabletLogger;
 import org.apache.accumulo.core.master.thrift.BulkImportState;
-import org.apache.accumulo.core.master.thrift.TabletLoadState;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.StoredTabletFile;
 import org.apache.accumulo.core.metadata.TabletFile;
@@ -87,26 +79,21 @@ import org.apache.accumulo.core.protobuf.ProtobufUtil;
 import org.apache.accumulo.core.replication.ReplicationConfigurationUtil;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.spi.cache.BlockCache;
 import org.apache.accumulo.core.spi.scan.ScanDirectives;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
-import org.apache.accumulo.core.trace.TraceUtil;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.ShutdownUtil;
-import org.apache.accumulo.core.util.ratelimit.RateLimiter;
 import org.apache.accumulo.core.volume.Volume;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
 import org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl;
-import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeUtil;
 import org.apache.accumulo.server.fs.VolumeUtil.TabletFiles;
 import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.master.tableOps.UserCompactionConfig;
 import org.apache.accumulo.server.problems.ProblemReport;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
@@ -128,35 +115,24 @@ import org.apache.accumulo.tserver.TabletStatsKeeper;
 import org.apache.accumulo.tserver.TabletStatsKeeper.Operation;
 import org.apache.accumulo.tserver.TooManyFilesException;
 import org.apache.accumulo.tserver.TservConstraintEnv;
-import org.apache.accumulo.tserver.compaction.CompactionPlan;
-import org.apache.accumulo.tserver.compaction.CompactionStrategy;
-import org.apache.accumulo.tserver.compaction.DefaultCompactionStrategy;
-import org.apache.accumulo.tserver.compaction.MajorCompactionReason;
-import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
-import org.apache.accumulo.tserver.compaction.WriteParameters;
+import org.apache.accumulo.tserver.compactions.Compactable;
 import org.apache.accumulo.tserver.constraints.ConstraintChecker;
 import org.apache.accumulo.tserver.log.DfsLogger;
-import org.apache.accumulo.tserver.mastermessage.TabletStatusMessage;
 import org.apache.accumulo.tserver.metrics.TabletServerMinCMetrics;
 import org.apache.accumulo.tserver.scan.ScanParameters;
-import org.apache.accumulo.tserver.tablet.Compactor.CompactionCanceledException;
-import org.apache.accumulo.tserver.tablet.Compactor.CompactionEnv;
 import org.apache.commons.codec.DecoderException;
 import org.apache.commons.codec.binary.Hex;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
-import org.apache.htrace.impl.ProbabilitySampler;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
-import com.google.common.cache.Cache;
 import com.google.common.collect.ImmutableSet;
 
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
@@ -218,11 +194,9 @@ public class Tablet {
     WAITING_TO_START, IN_PROGRESS
   }
 
-  private volatile CompactionState minorCompactionState = null;
-  private volatile CompactionState majorCompactionState = null;
+  private CompactableImpl compactable;
 
-  private final Set<MajorCompactionReason> majorCompactionQueued =
-      Collections.synchronizedSet(EnumSet.noneOf(MajorCompactionReason.class));
+  private volatile CompactionState minorCompactionState = null;
 
   private final Deriver<ConstraintChecker> constraintChecker;
 
@@ -451,10 +425,12 @@ public class Tablet {
     getDatafileManager().removeFilesAfterScan(data.getScanFiles());
 
     // look for hints of a failure on the previous tablet server
-    if (!logEntries.isEmpty() || needsMajorCompaction(MajorCompactionReason.NORMAL)) {
+    if (!logEntries.isEmpty()) {
       // look for any temp files hanging around
       removeOldTemporaryFiles();
     }
+
+    this.compactable = new CompactableImpl(this, tabletServer.getCompactionManager());
   }
 
   public ServerContext getContext() {
@@ -795,8 +771,7 @@ public class Tablet {
   }
 
   DataFileValue minorCompact(InMemoryMap memTable, TabletFile tmpDatafile, TabletFile newDatafile,
-      StoredTabletFile mergeFile, long queued, CommitSession commitSession, long flushId,
-      MinorCompactionReason mincReason) {
+      long queued, CommitSession commitSession, long flushId, MinorCompactionReason mincReason) {
     boolean failed = false;
     long start = System.currentTimeMillis();
     timer.incrementStatusMinor();
@@ -810,21 +785,18 @@ public class Tablet {
       try (TraceScope span = Trace.startSpan("write")) {
         count = memTable.getNumEntries();
 
-        DataFileValue dfv = null;
-        if (mergeFile != null) {
-          dfv = getDatafileManager().getDatafileSizes().get(mergeFile);
-        }
-
-        MinorCompactor compactor = new MinorCompactor(tabletServer, this, memTable, mergeFile, dfv,
-            tmpDatafile, mincReason, tableConfiguration);
+        MinorCompactor compactor = new MinorCompactor(tabletServer, this, memTable, tmpDatafile,
+            mincReason, tableConfiguration);
         stats = compactor.call();
       }
 
       try (TraceScope span = Trace.startSpan("bringOnline")) {
-        getDatafileManager().bringMinorCompactionOnline(tmpDatafile, newDatafile, mergeFile,
+        var storedFile = getDatafileManager().bringMinorCompactionOnline(tmpDatafile, newDatafile,
             new DataFileValue(stats.getFileSize(), stats.getEntriesWritten()), commitSession,
             flushId);
+        compactable.filesAdded(true, List.of(storedFile));
       }
+
       return new DataFileValue(stats.getFileSize(), stats.getEntriesWritten());
     } catch (Exception | Error e) {
       failed = true;
@@ -855,16 +827,10 @@ public class Tablet {
     otherLogs = currentLogs;
     currentLogs = new HashSet<>();
 
-    StoredTabletFile mergeFile = null;
-    if (mincReason != MinorCompactionReason.RECOVERY) {
-      mergeFile = getDatafileManager().reserveMergingMinorCompactionFile();
-    }
-
     double tracePercent =
         tabletServer.getConfiguration().getFraction(Property.TSERV_MINC_TRACE_PERCENT);
 
-    return new MinorCompactionTask(this, mergeFile, oldCommitSession, flushId, mincReason,
-        tracePercent);
+    return new MinorCompactionTask(this, oldCommitSession, flushId, mincReason, tracePercent);
 
   }
 
@@ -973,15 +939,13 @@ public class Tablet {
       synchronized (this) {
         t1 = System.currentTimeMillis();
 
-        if (isClosing() || isClosed() || majorCompactionState == CompactionState.WAITING_TO_START
-            || getTabletMemory().memoryReservedForMinC()
+        if (isClosing() || isClosed() || getTabletMemory().memoryReservedForMinC()
             || getTabletMemory().getMemTable().getNumEntries() == 0 || updatingFlushID) {
 
           logMessage = new StringBuilder();
 
           logMessage.append(extent);
           logMessage.append(" closeState " + closeState);
-          logMessage.append(" majorCompactionState " + majorCompactionState);
           if (getTabletMemory() != null) {
             logMessage.append(" tabletMemory.memoryReservedForMinC() "
                 + getTabletMemory().memoryReservedForMinC());
@@ -1029,16 +993,11 @@ public class Tablet {
   long getCompactionCancelID() {
     String zTablePath = Constants.ZROOT + "/" + tabletServer.getInstanceID() + Constants.ZTABLES
         + "/" + extent.getTableId() + Constants.ZTABLE_COMPACT_CANCEL_ID;
-
-    try {
-      String id = new String(context.getZooReaderWriter().getData(zTablePath, null), UTF_8);
-      return Long.parseLong(id);
-    } catch (KeeperException | InterruptedException e) {
-      throw new RuntimeException("Exception on " + extent + " getting compact cancel ID", e);
-    }
+    String id = new String(context.getZooCache().get(zTablePath), UTF_8);
+    return Long.parseLong(id);
   }
 
-  public Pair<Long,UserCompactionConfig> getCompactionID() throws NoNodeException {
+  public Pair<Long,CompactionConfig> getCompactionID() throws NoNodeException {
     try {
       String zTablePath = Constants.ZROOT + "/" + tabletServer.getInstanceID() + Constants.ZTABLES
           + "/" + extent.getTableId() + Constants.ZTABLE_COMPACT_ID;
@@ -1047,7 +1006,7 @@ public class Tablet {
           new String(context.getZooReaderWriter().getData(zTablePath, null), UTF_8).split(",");
       long compactID = Long.parseLong(tokens[0]);
 
-      UserCompactionConfig compactionConfig = new UserCompactionConfig();
+      CompactionConfig overlappingConfig = null;
 
       if (tokens.length > 1) {
         Hex hex = new Hex();
@@ -1055,19 +1014,21 @@ public class Tablet {
             new ByteArrayInputStream(hex.decode(tokens[1].split("=")[1].getBytes(UTF_8)));
         DataInputStream dis = new DataInputStream(bais);
 
-        compactionConfig.readFields(dis);
+        var compactionConfig = UserCompactionUtils.decodeCompactionConfig(dis);
 
         KeyExtent ke = new KeyExtent(extent.getTableId(), compactionConfig.getEndRow(),
             compactionConfig.getStartRow());
 
-        if (!ke.overlaps(extent)) {
-          // only use iterators if compaction range overlaps
-          compactionConfig = new UserCompactionConfig();
+        if (ke.overlaps(extent)) {
+          overlappingConfig = compactionConfig;
         }
       }
 
-      return new Pair<>(compactID, compactionConfig);
-    } catch (IOException | InterruptedException | DecoderException | NumberFormatException e) {
+      if (overlappingConfig == null)
+        overlappingConfig = new CompactionConfig(); // no config present, set to default
+
+      return new Pair<>(compactID, overlappingConfig);
+    } catch (InterruptedException | DecoderException | NumberFormatException e) {
       throw new RuntimeException("Exception on " + extent + " getting compaction ID", e);
     } catch (KeeperException ke) {
       if (ke instanceof NoNodeException) {
@@ -1228,27 +1189,25 @@ public class Tablet {
     log.trace("initiateClose(saveState={}) {}", saveState, getExtent());
 
     MinorCompactionTask mct = null;
-
     synchronized (this) {
       if (isClosed() || isClosing()) {
         String msg = "Tablet " + getExtent() + " already " + closeState;
         throw new IllegalStateException(msg);
       }
 
-      // enter the closing state, no splits, minor, or major compactions can start
-      // should cause running major compactions to stop
+      // enter the closing state, no splits or minor compactions can start
       closeState = CloseState.CLOSING;
       this.notifyAll();
+    }
 
-      // wait for major compactions to finish, setting closing to
-      // true should cause any running major compactions to abort
-      while (isMajorCompactionRunning()) {
-        try {
-          this.wait(50);
-        } catch (InterruptedException e) {
-          log.error(e.toString());
-        }
-      }
+    // Cancel any running compactions and prevent future ones from starting. This is very important
+    // because background compactions may update the metadata table. These metadata updates can not
+    // be allowed after a tablet closes. Compactable has its own lock and calls tablet code, so do
+    // not hold tablet lock while calling it.
+    compactable.close();
+
+    synchronized (this) {
+      Preconditions.checkState(closeState == CloseState.CLOSING);
 
       while (updatingFlushID) {
         try {
@@ -1258,12 +1217,20 @@ public class Tablet {
         }
       }
 
+      // calling this.wait() releases the lock, ensure things are as expected when the lock is
+      // obtained again
+      Preconditions.checkState(closeState == CloseState.CLOSING);
+
       if (!saveState || getTabletMemory().getMemTable().getNumEntries() == 0) {
         return;
       }
 
       getTabletMemory().waitForMinC();
 
+      // calling this.wait() in waitForMinC() releases the lock, ensure things are as expected when
+      // the lock is obtained again
+      Preconditions.checkState(closeState == CloseState.CLOSING);
+
       try {
         mct = prepareForMinC(getFlushID(), MinorCompactionReason.CLOSE);
       } catch (NoNodeException e) {
@@ -1404,40 +1371,6 @@ public class Tablet {
     // TODO check lastFlushID and lostCompactID - ACCUMULO-1290
   }
 
-  public synchronized void initiateMajorCompaction(MajorCompactionReason reason) {
-
-    if (isClosing() || isClosed() || !needsMajorCompaction(reason) || isMajorCompactionRunning()
-        || majorCompactionQueued.contains(reason)) {
-      return;
-    }
-
-    majorCompactionQueued.add(reason);
-
-    try {
-      getTabletResources().executeMajorCompaction(getExtent(), new CompactionRunner(this, reason));
-    } catch (RuntimeException t) {
-      log.debug("removing {} because we encountered an exception enqueing the CompactionRunner",
-          reason, t);
-      majorCompactionQueued.remove(reason);
-      throw t;
-    }
-  }
-
-  /**
-   * Returns true if a major compaction should be performed on the tablet.
-   *
-   */
-  public boolean needsMajorCompaction(MajorCompactionReason reason) {
-    if (isMajorCompactionRunning()) {
-      return false;
-    }
-    if (reason == MajorCompactionReason.CHOP || reason == MajorCompactionReason.USER) {
-      return true;
-    }
-    return getTabletResources().needsMajorCompaction(getDatafileManager().getDatafileSizes(),
-        reason);
-  }
-
   /**
    * Returns an int representing the total block size of the files served by this tablet.
    *
@@ -1603,56 +1536,6 @@ public class Tablet {
     return common;
   }
 
-  private Map<StoredTabletFile,Pair<Key,Key>>
-      getFirstAndLastKeys(SortedMap<StoredTabletFile,DataFileValue> allFiles) throws IOException {
-    final Map<StoredTabletFile,Pair<Key,Key>> result = new HashMap<>();
-    final FileOperations fileFactory = FileOperations.getInstance();
-    final VolumeManager fs = getTabletServer().getFileSystem();
-    for (Entry<StoredTabletFile,DataFileValue> entry : allFiles.entrySet()) {
-      StoredTabletFile file = entry.getKey();
-      FileSystem ns = fs.getFileSystemByPath(file.getPath());
-      try (FileSKVIterator openReader = fileFactory.newReaderBuilder()
-          .forFile(file.getPathStr(), ns, ns.getConf(), context.getCryptoService())
-          .withTableConfiguration(this.getTableConfiguration()).seekToBeginning().build()) {
-        Key first = openReader.getFirstKey();
-        Key last = openReader.getLastKey();
-        result.put(file, new Pair<>(first, last));
-      }
-    }
-    return result;
-  }
-
-  List<StoredTabletFile> findChopFiles(KeyExtent extent,
-      Map<StoredTabletFile,Pair<Key,Key>> firstAndLastKeys, Collection<StoredTabletFile> allFiles) {
-    List<StoredTabletFile> result = new ArrayList<>();
-    if (firstAndLastKeys == null) {
-      result.addAll(allFiles);
-      return result;
-    }
-
-    for (StoredTabletFile file : allFiles) {
-      Pair<Key,Key> pair = firstAndLastKeys.get(file);
-      if (pair == null) {
-        // file was created or imported after we obtained the first and last keys... there
-        // are a few options here... throw an exception which will cause the compaction to
-        // retry and also cause ugly error message that the admin has to ignore... could
-        // go get the first and last key, but this code is called while the tablet lock
-        // is held... or just compact the file....
-        result.add(file);
-      } else {
-        Key first = pair.getFirst();
-        Key last = pair.getSecond();
-        // If first and last are null, it's an empty file. Add it to the compact set so it goes
-        // away.
-        if ((first == null && last == null) || (first != null && !extent.contains(first.getRow()))
-            || (last != null && !extent.contains(last.getRow()))) {
-          result.add(file);
-        }
-      }
-    }
-    return result;
-  }
-
   /**
    * Returns true if this tablet needs to be split
    *
@@ -1664,389 +1547,6 @@ public class Tablet {
     return findSplitRow(getDatafileManager().getFiles()) != null;
   }
 
-  // BEGIN PRIVATE METHODS RELATED TO MAJOR COMPACTION
-  private CompactionStats _majorCompact(MajorCompactionReason reason)
-      throws IOException, CompactionCanceledException {
-
-    long t1, t2, t3;
-
-    Pair<Long,UserCompactionConfig> compactionId = null;
-    CompactionStrategy strategy = null;
-    Map<StoredTabletFile,Pair<Key,Key>> firstAndLastKeys = null;
-
-    if (reason == MajorCompactionReason.USER) {
-      try {
-        compactionId = getCompactionID();
-        strategy = createCompactionStrategy(compactionId.getSecond().getCompactionStrategy());
-      } catch (NoNodeException e) {
-        throw new RuntimeException("Exception on " + extent + " during MajC", e);
-      }
-    } else if (reason == MajorCompactionReason.NORMAL || reason == MajorCompactionReason.IDLE) {
-      strategy = Property.createTableInstanceFromPropertyName(tableConfiguration,
-          Property.TABLE_COMPACTION_STRATEGY, CompactionStrategy.class,
-          new DefaultCompactionStrategy());
-      strategy.init(Property.getCompactionStrategyOptions(tableConfiguration));
-    } else if (reason == MajorCompactionReason.CHOP) {
-      firstAndLastKeys = getFirstAndLastKeys(getDatafileManager().getDatafileSizes());
-    } else {
-      throw new IllegalArgumentException(
-          "Unknown compaction reason " + reason + " during MajC on " + extent);
-    }
-
-    if (strategy != null) {
-      BlockCache sc = tabletResources.getTabletServerResourceManager().getSummaryCache();
-      BlockCache ic = tabletResources.getTabletServerResourceManager().getIndexCache();
-      Cache<String,Long> fileLenCache =
-          tabletResources.getTabletServerResourceManager().getFileLenCache();
-      MajorCompactionRequest request = new MajorCompactionRequest(extent, reason,
-          getTabletServer().getFileSystem(), tableConfiguration, sc, ic, fileLenCache, context);
-      request.setFiles(getDatafileManager().getDatafileSizes());
-      strategy.gatherInformation(request);
-    }
-
-    Map<StoredTabletFile,DataFileValue> filesToCompact = null;
-
-    int maxFilesToCompact = tableConfiguration.getCount(Property.TSERV_MAJC_THREAD_MAXOPEN);
-
-    CompactionStats majCStats = new CompactionStats();
-    CompactionPlan plan = null;
-
-    boolean propogateDeletes = false;
-    boolean updateCompactionID = false;
-
-    synchronized (this) {
-      // plan all that work that needs to be done in the sync block... then do the actual work
-      // outside the sync block
-
-      t1 = System.currentTimeMillis();
-
-      majorCompactionState = CompactionState.WAITING_TO_START;
-
-      getTabletMemory().waitForMinC();
-
-      t2 = System.currentTimeMillis();
-
-      majorCompactionState = CompactionState.IN_PROGRESS;
-      notifyAll();
-
-      SortedMap<StoredTabletFile,DataFileValue> allFiles = getDatafileManager().getDatafileSizes();
-      List<StoredTabletFile> inputFiles = new ArrayList<>();
-      if (reason == MajorCompactionReason.CHOP) {
-        // enforce rules: files with keys outside our range need to be compacted
-        inputFiles.addAll(findChopFiles(extent, firstAndLastKeys, allFiles.keySet()));
-      } else {
-        MajorCompactionRequest request =
-            new MajorCompactionRequest(extent, reason, tableConfiguration, context);
-        request.setFiles(allFiles);
-        plan = strategy.getCompactionPlan(request);
-        if (plan != null) {
-          plan.validate(allFiles.keySet());
-          inputFiles.addAll(plan.inputFiles);
-        }
-      }
-
-      if (inputFiles.isEmpty()) {
-        if (reason == MajorCompactionReason.USER) {
-          if (compactionId.getSecond().getIterators().isEmpty()) {
-            log.debug(
-                "No-op major compaction by USER on 0 input files because no iterators present.");
-            lastCompactID = compactionId.getFirst();
-            updateCompactionID = true;
-          } else {
-            log.debug("Major compaction by USER on 0 input files with iterators.");
-            filesToCompact = new HashMap<>();
-          }
-        } else {
-          return majCStats;
-        }
-      } else {
-        // If no original files will exist at the end of the compaction, we do not have to propagate
-        // deletes
-        Set<StoredTabletFile> droppedFiles = new HashSet<>();
-        droppedFiles.addAll(inputFiles);
-        if (plan != null) {
-          droppedFiles.addAll(plan.deleteFiles);
-        }
-        propogateDeletes = !(droppedFiles.equals(allFiles.keySet()));
-        log.debug("Major compaction plan: {} propagate deletes : {}", plan, propogateDeletes);
-        filesToCompact = new HashMap<>(allFiles);
-        filesToCompact.keySet().retainAll(inputFiles);
-
-        getDatafileManager().reserveMajorCompactingFiles(filesToCompact.keySet());
-      }
-
-      t3 = System.currentTimeMillis();
-    }
-
-    try {
-
-      log.trace(String.format("MajC initiate lock %.2f secs, wait %.2f secs", (t3 - t2) / 1000.0,
-          (t2 - t1) / 1000.0));
-
-      if (updateCompactionID) {
-        MetadataTableUtil.updateTabletCompactID(extent, compactionId.getFirst(), context,
-            getTabletServer().getLock());
-        return majCStats;
-      }
-
-      if (!propogateDeletes && compactionId == null) {
-        // compacting everything, so update the compaction id in metadata
-        try {
-          compactionId = getCompactionID();
-          if (compactionId.getSecond().getCompactionStrategy() != null) {
-            compactionId = null;
-            // TODO maybe return unless chop?
-          }
-
-        } catch (NoNodeException e) {
-          throw new RuntimeException("Exception on " + extent + " during MajC", e);
-        }
-      }
-
-      List<IteratorSetting> compactionIterators = new ArrayList<>();
-      if (compactionId != null) {
-        if (reason == MajorCompactionReason.USER) {
-          if (getCompactionCancelID() >= compactionId.getFirst()) {
-            // compaction was canceled
-            return majCStats;
-          }
-          compactionIterators = compactionId.getSecond().getIterators();
-
-          synchronized (this) {
-            if (lastCompactID >= compactionId.getFirst()) {
-              // already compacted
-              return majCStats;
-            }
-          }
-        }
-
-      }
-
-      // need to handle case where only one file is being major compacted
-      // ACCUMULO-3645 run loop at least once, even if filesToCompact.isEmpty()
-      do {
-        int numToCompact = maxFilesToCompact;
-
-        if (filesToCompact.size() > maxFilesToCompact
-            && filesToCompact.size() < 2 * maxFilesToCompact) {
-          // on the second to last compaction pass, compact the minimum amount of files possible
-          numToCompact = filesToCompact.size() - maxFilesToCompact + 1;
-        }
-
-        Set<StoredTabletFile> smallestFiles = removeSmallest(filesToCompact, numToCompact);
-
-        TabletFile newFile =
-            getNextMapFilename((filesToCompact.isEmpty() && !propogateDeletes) ? "A" : "C");
-        TabletFile compactTmpName = new TabletFile(new Path(newFile.getMetaInsert() + "_tmp"));
-
-        AccumuloConfiguration tableConf = createCompactionConfiguration(tableConfiguration, plan);
-
-        try (TraceScope span = Trace.startSpan("compactFiles")) {
-          CompactionEnv cenv = new CompactionEnv() {
-            @Override
-            public boolean isCompactionEnabled() {
-              return !isClosing();
-            }
-
-            @Override
-            public IteratorScope getIteratorScope() {
-              return IteratorScope.majc;
-            }
-
-            @Override
-            public RateLimiter getReadLimiter() {
-              return getTabletServer().getMajorCompactionReadLimiter();
-            }
-
-            @Override
-            public RateLimiter getWriteLimiter() {
-              return getTabletServer().getMajorCompactionWriteLimiter();
-            }
-
-          };
-
-          HashMap<StoredTabletFile,DataFileValue> copy =
-              new HashMap<>(getDatafileManager().getDatafileSizes());
-          if (!copy.keySet().containsAll(smallestFiles)) {
-            throw new IllegalStateException("Cannot find data file values for " + smallestFiles
-                + " on " + extent + " during MajC");
-          }
-
-          copy.keySet().retainAll(smallestFiles);
-
-          log.debug("Starting MajC {} ({}) {} --> {} {}", extent, reason, copy.keySet(),
-              compactTmpName, compactionIterators);
-
-          // always propagate deletes, unless last batch
-          boolean lastBatch = filesToCompact.isEmpty();
-          Compactor compactor = new Compactor(context, this, copy, null, compactTmpName,
-              lastBatch ? propogateDeletes : true, cenv, compactionIterators, reason.ordinal(),
-              tableConf);
-
-          CompactionStats mcs = compactor.call();
-
-          if (span.getSpan() != null) {
-            span.getSpan().addKVAnnotation("files", ("" + smallestFiles.size()));
-            span.getSpan().addKVAnnotation("read", ("" + mcs.getEntriesRead()));
-            span.getSpan().addKVAnnotation("written", ("" + mcs.getEntriesWritten()));
-          }
-          majCStats.add(mcs);
-
-          if (lastBatch && plan != null && plan.deleteFiles != null) {
-            smallestFiles.addAll(plan.deleteFiles);
-          }
-          StoredTabletFile newTabletFile = getDatafileManager().bringMajorCompactionOnline(
-              smallestFiles, compactTmpName, newFile,
-              filesToCompact.isEmpty() && compactionId != null ? compactionId.getFirst() : null,
-              new DataFileValue(mcs.getFileSize(), mcs.getEntriesWritten()));
-
-          // when major compaction produces a file w/ zero entries, it will be deleted... do not
-          // want to add the deleted file
-          if (!filesToCompact.isEmpty() && mcs.getEntriesWritten() > 0) {
-            filesToCompact.put(newTabletFile,
-                new DataFileValue(mcs.getFileSize(), mcs.getEntriesWritten()));
-          }
-        }
-
-      } while (!filesToCompact.isEmpty());
-      return majCStats;
-    } finally {
-      synchronized (Tablet.this) {
-        getDatafileManager().clearMajorCompactingFile();
-      }
-    }
-  }
-
-  protected static AccumuloConfiguration createCompactionConfiguration(TableConfiguration base,
-      CompactionPlan plan) {
-    if (plan == null || plan.writeParameters == null) {
-      return base;
-    }
-    WriteParameters p = plan.writeParameters;
-    ConfigurationCopy result = new ConfigurationCopy(base);
-    if (p.getHdfsBlockSize() > 0) {
-      result.set(Property.TABLE_FILE_BLOCK_SIZE, "" + p.getHdfsBlockSize());
-    }
-    if (p.getBlockSize() > 0) {
-      result.set(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE, "" + p.getBlockSize());
-    }
-    if (p.getIndexBlockSize() > 0) {
-      result.set(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX, "" + p.getIndexBlockSize());
-    }
-    if (p.getCompressType() != null) {
-      result.set(Property.TABLE_FILE_COMPRESSION_TYPE, p.getCompressType());
-    }
-    if (p.getReplication() != 0) {
-      result.set(Property.TABLE_FILE_REPLICATION, "" + p.getReplication());
-    }
-    return result;
-  }
-
-  private Set<StoredTabletFile> removeSmallest(Map<StoredTabletFile,DataFileValue> filesToCompact,
-      int maxFilesToCompact) {
-    // ensure this method works properly when multiple files have the same size
-
-    // short-circuit; also handles zero files case
-    if (filesToCompact.size() <= maxFilesToCompact) {
-      Set<StoredTabletFile> smallestFiles = new HashSet<>(filesToCompact.keySet());
-      filesToCompact.clear();
-      return smallestFiles;
-    }
-
-    PriorityQueue<Pair<StoredTabletFile,Long>> fileHeap =
-        new PriorityQueue<>(filesToCompact.size(), (o1, o2) -> {
-          if (o1.getSecond().equals(o2.getSecond())) {
-            return o1.getFirst().compareTo(o2.getFirst());
-          }
-          if (o1.getSecond() < o2.getSecond()) {
-            return -1;
-          }
-          return 1;
-        });
-
-    for (Entry<StoredTabletFile,DataFileValue> entry : filesToCompact.entrySet()) {
-      fileHeap.add(new Pair<>(entry.getKey(), entry.getValue().getSize()));
-    }
-
-    Set<StoredTabletFile> smallestFiles = new HashSet<>();
-    while (smallestFiles.size() < maxFilesToCompact && !fileHeap.isEmpty()) {
-      Pair<StoredTabletFile,Long> pair = fileHeap.remove();
-      filesToCompact.remove(pair.getFirst());
-      smallestFiles.add(pair.getFirst());
-    }
-
-    return smallestFiles;
-  }
-
-  // END PRIVATE METHODS RELATED TO MAJOR COMPACTION
-
-  /**
-   * Performs a major compaction on the tablet. If needsSplit() returns true, the tablet is split
-   * and a reference to the new tablet is returned.
-   */
-
-  CompactionStats majorCompact(MajorCompactionReason reason, long queued) {
-    CompactionStats majCStats = null;
-    boolean success = false;
-    long start = System.currentTimeMillis();
-
-    timer.incrementStatusMajor();
-
-    synchronized (this) {
-      // check that compaction is still needed - defer to splitting
-      majorCompactionQueued.remove(reason);
-
-      if (isClosing() || isClosed() || !needsMajorCompaction(reason) || isMajorCompactionRunning()
-          || needsSplit()) {
-        return null;
-      }
-
-      majorCompactionState = CompactionState.WAITING_TO_START;
-    }
-
-    double tracePercent =
-        tabletServer.getConfiguration().getFraction(Property.TSERV_MAJC_TRACE_PERCENT);
-    ProbabilitySampler sampler = TraceUtil.probabilitySampler(tracePercent);
-    try (TraceScope span = Trace.startSpan("majorCompaction", sampler)) {
-
-      majCStats = _majorCompact(reason);
-      if (reason == MajorCompactionReason.CHOP) {
-        MetadataTableUtil.chopped(getTabletServer().getContext(), getExtent(),
-            this.getTabletServer().getLock());
-        getTabletServer()
-            .enqueueMasterMessage(new TabletStatusMessage(TabletLoadState.CHOPPED, extent));
-      }
-      if (span.getSpan() != null) {
-        span.getSpan().addKVAnnotation("extent", ("" + getExtent()));
-        if (majCStats != null) {
-          span.getSpan().addKVAnnotation("read", ("" + majCStats.getEntriesRead()));
-          span.getSpan().addKVAnnotation("written", ("" + majCStats.getEntriesWritten()));
-        }
-      }
-      success = true;
-    } catch (CompactionCanceledException cce) {
-      log.debug("Major compaction canceled, extent = {}", getExtent());
-    } catch (IOException ioe) {
-      log.error("MajC Failed, extent = " + getExtent(), ioe);
-    } catch (RuntimeException e) {
-      log.error("MajC Unexpected exception, extent = " + getExtent(), e);
-    } finally {
-      // ensure we always reset boolean, even
-      // when an exception is thrown
-      synchronized (this) {
-        majorCompactionState = null;
-        this.notifyAll();
-      }
-    }
-    long count = 0;
-    if (majCStats != null) {
-      count = majCStats.getEntriesRead();
-    }
-    timer.updateTime(Operation.MAJOR, queued, start, count, !success);
-
-    return majCStats;
-  }
-
   public KeyExtent getExtent() {
     return extent;
   }
@@ -2091,7 +1591,11 @@ public class Tablet {
   }
 
   public boolean isMajorCompactionRunning() {
-    return majorCompactionState != null;
+    return compactable.isMajorCompactionRunning();
+  }
+
+  public boolean isMajorCompactionQueued() {
+    return compactable.isMajorCompactionQueued();
   }
 
   public boolean isMinorCompactionQueued() {
@@ -2102,10 +1606,6 @@ public class Tablet {
     return minorCompactionState == CompactionState.IN_PROGRESS;
   }
 
-  public boolean isMajorCompactionQueued() {
-    return !majorCompactionQueued.isEmpty();
-  }
-
   public TreeMap<KeyExtent,TabletData> split(byte[] sp) throws IOException {
 
     if (sp != null && extent.getEndRow() != null && extent.getEndRow().equals(new Text(sp))) {
@@ -2311,13 +1811,13 @@ public class Tablet {
     try {
       tabletServer.updateBulkImportState(files, BulkImportState.LOADING);
 
-      getDatafileManager().importMapFiles(tid, entries, setTime);
+      var storedTabletFile = getDatafileManager().importMapFiles(tid, entries, setTime);
       lastMapFileImportTime = System.currentTimeMillis();
 
       if (needsSplit()) {
         getTabletServer().executeSplit(this);
       } else {
-        initiateMajorCompaction(MajorCompactionReason.NORMAL);
+        compactable.filesAdded(false, storedTabletFile);
       }
     } finally {
       synchronized (this) {
@@ -2552,31 +2052,13 @@ public class Tablet {
     logLock.unlock();
   }
 
-  public synchronized void chopFiles() {
-    initiateMajorCompaction(MajorCompactionReason.CHOP);
+  public void chopFiles() {
+    compactable.initiateChop();
   }
 
-  private CompactionStrategy createCompactionStrategy(CompactionStrategyConfig strategyConfig) {
-    String context = tableConfiguration.get(Property.TABLE_CLASSPATH);
-    String clazzName = strategyConfig.getClassName();
-    try {
-      Class<? extends CompactionStrategy> clazz;
-      if (context != null && !context.equals("")) {
-        clazz = AccumuloVFSClassLoader.getContextManager().loadClass(context, clazzName,
-            CompactionStrategy.class);
-      } else {
-        clazz = AccumuloVFSClassLoader.loadClass(clazzName, CompactionStrategy.class);
-      }
-      CompactionStrategy strategy = clazz.getDeclaredConstructor().newInstance();
-      strategy.init(strategyConfig.getOptions());
-      return strategy;
-    } catch (Exception e) {
-      throw new RuntimeException("Error creating compaction strategy on " + extent, e);
-    }
-  }
+  public void compactAll(long compactionId, CompactionConfig compactionConfig) {
 
-  public void compactAll(long compactionId, UserCompactionConfig compactionConfig) {
-    boolean updateMetadata = false;
+    boolean shouldInitiate = false;
 
     synchronized (this) {
       if (lastCompactID >= compactionId) {
@@ -2596,43 +2078,16 @@ public class Tablet {
         }
       }
 
-      if (isClosing() || isClosed() || majorCompactionQueued.contains(MajorCompactionReason.USER)
-          || isMajorCompactionRunning()) {
+      if (isClosing() || isClosed()) {
         return;
       }
 
-      CompactionStrategyConfig strategyConfig = compactionConfig.getCompactionStrategy();
-      CompactionStrategy strategy = createCompactionStrategy(strategyConfig);
-
-      MajorCompactionRequest request = new MajorCompactionRequest(extent,
-          MajorCompactionReason.USER, tableConfiguration, context);
-      request.setFiles(getDatafileManager().getDatafileSizes());
+      shouldInitiate = true;
 
-      try {
-        if (strategy.shouldCompact(request)) {
-          initiateMajorCompaction(MajorCompactionReason.USER);
-        } else {
-          majorCompactionState = CompactionState.IN_PROGRESS;
-          updateMetadata = true;
-          lastCompactID = compactionId;
-        }
-      } catch (IOException e) {
-        throw new UncheckedIOException("IOException on " + extent + " during compact all", e);
-      }
     }
 
-    if (updateMetadata) {
-      try {
-        // if multiple threads were allowed to update this outside of a sync block, then it would be
-        // a race condition
-        MetadataTableUtil.updateTabletCompactID(extent, compactionId,
-            getTabletServer().getContext(), getTabletServer().getLock());
-      } finally {
-        synchronized (this) {
-          majorCompactionState = null;
-          this.notifyAll();
-        }
-      }
+    if (shouldInitiate) {
+      compactable.initiateUserCompaction(compactionId, compactionConfig);
     }
   }
 
@@ -2676,17 +2131,16 @@ public class Tablet {
   }
 
   public StoredTabletFile updateTabletDataFile(long maxCommittedTime, TabletFile newDatafile,
-      StoredTabletFile absMergeFile, DataFileValue dfv, Set<String> unusedWalLogs,
-      Set<StoredTabletFile> filesInUseByScans, long flushId) {
+      DataFileValue dfv, Set<String> unusedWalLogs, long flushId) {
     synchronized (timeLock) {
       if (maxCommittedTime > persistedTime) {
         persistedTime = maxCommittedTime;
       }
 
       return MasterMetadataUtil.updateTabletDataFile(getTabletServer().getContext(), extent,
-          newDatafile, absMergeFile, dfv, tabletTime.getMetadataTime(persistedTime),
-          filesInUseByScans, tabletServer.getClientAddressString(), tabletServer.getLock(),
-          unusedWalLogs, lastLocation, flushId);
+          newDatafile, dfv, tabletTime.getMetadataTime(persistedTime),
+          tabletServer.getClientAddressString(), tabletServer.getLock(), unusedWalLogs,
+          lastLocation, flushId);
     }
 
   }
@@ -2780,4 +2234,8 @@ public class Tablet {
   public String getDirName() {
     return dirName;
   }
+
+  public Compactable asCompactable() {
+    return compactable;
+  }
 }
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/TabletServerResourceManagerDynamicCompactionPoolTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/TabletServerResourceManagerDynamicCompactionPoolTest.java
deleted file mode 100644
index 78a5dce..0000000
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/TabletServerResourceManagerDynamicCompactionPoolTest.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.tserver;
-
-import static org.apache.accumulo.fate.util.UtilWaitThread.sleep;
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.junit.Assert.assertTrue;
-
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.accumulo.core.conf.ConfigurationCopy;
-import org.apache.accumulo.core.conf.DefaultConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.TableId;
-import org.apache.accumulo.core.dataImpl.KeyExtent;
-import org.apache.accumulo.server.ServerContext;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
-import org.junit.After;
-import org.junit.Test;
-
-public class TabletServerResourceManagerDynamicCompactionPoolTest {
-
-  private final AtomicBoolean keepRunning = new AtomicBoolean(true);
-  private final AtomicInteger numRunning = new AtomicInteger(0);
-  private final AtomicInteger maxRan = new AtomicInteger(0);
-
-  private class FakeCompaction implements Runnable, Comparable<Runnable> {
-    private final TableId id;
-
-    public FakeCompaction(TableId id) {
-      this.id = id;
-    }
-
-    @Override
-    public void run() {
-      numRunning.addAndGet(1);
-      while (keepRunning.get()) {
-        sleep(5);
-      }
-      numRunning.decrementAndGet();
-      maxRan.addAndGet(1);
-    }
-
-    @Override
-    public int compareTo(Runnable o) {
-      if (o instanceof FakeCompaction) {
-        return id.canonical().compareTo(((FakeCompaction) o).id.canonical());
-      }
-      return 1;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      return super.equals(obj);
-    }
-
-    @Override
-    public int hashCode() {
-      return super.hashCode();
-    }
-  }
-
-  @After
-  public void stopRunningThreads() {
-    keepRunning.set(false);
-  }
-
-  /*
-   * Ensure the TabletServerResourceManager increases the thread pool size dynamically
-   */
-  @Test(timeout = 20_000)
-  public void testDynamicThreadPoolUpdates() {
-    // create a mock config that substitutes for the system configuration in ZK
-    ConfigurationCopy config = new ConfigurationCopy(DefaultConfiguration.getInstance()) {
-      @Override
-      public boolean isPropertySet(Property prop, boolean cacheAndWatch) {
-        return false;
-      }
-    };
-    config.set(Property.TSERV_NATIVEMAP_ENABLED, "false");
-    config.set(Property.TSERV_MAJC_DELAY, "100ms");
-    config.set(Property.TSERV_MAJC_MAXCONCURRENT, "3");
-
-    ServerConfigurationFactory serverConfFactory = createMock(ServerConfigurationFactory.class);
-    expect(serverConfFactory.getSystemConfiguration()).andReturn(config).anyTimes();
-    ServerContext context = createMock(ServerContext.class);
-    expect(context.getConfiguration()).andReturn(config).anyTimes();
-    expect(context.getVolumeManager()).andReturn(null).anyTimes();
-    expect(context.getServerConfFactory()).andReturn(serverConfFactory).anyTimes();
-    replay(context, serverConfFactory);
-
-    // create a resource manager to test
-    keepRunning.set(true);
-    TabletServerResourceManager manager = new TabletServerResourceManager(context);
-
-    // start first batch and ensure it runs at most 3 at a time (initial configuration)
-    for (int i = 0; i < 10; i++) {
-      TableId id = TableId.of("userTableBatch1_" + i);
-      manager.executeMajorCompaction(new KeyExtent(id, null, null), new FakeCompaction(id));
-    }
-    waitForNumRunningToReach(3);
-
-    // increase the number of concurrent threads to 5 and wait for it to eventually reach 5
-    config.set(Property.TSERV_MAJC_MAXCONCURRENT, "5");
-    waitForNumRunningToReach(5);
-
-    // shut down the first batch (this will run all the remaining queued)
-    keepRunning.set(false);
-    waitForNumRunningToReach(0);
-
-    // make sure all 10 in the first batch ran, and reset it for the second batch
-    assertTrue(maxRan.compareAndSet(10, 0));
-
-    // decrease to 2, but need to wait for it to propagate, or else the running compactions will
-    // block the decrease in the threadpool size; the scheduler updates this every 10 seconds, so
-    // we'll give it 12 to be sure it updated before we execute any new tasks
-    config.set(Property.TSERV_MAJC_MAXCONCURRENT, "2");
-    sleep(12_000);
-
-    // start the second batch of 10 tasks, and make sure it stops at 2
-    keepRunning.set(true);
-    for (int i = 0; i < 10; i++) {
-      TableId id = TableId.of("userTableBatch2_" + i);
-      manager.executeMajorCompaction(new KeyExtent(id, null, null), new FakeCompaction(id));
-    }
-    waitForNumRunningToReach(2);
-
-    // shut down second batch (this will run out all the remaining queued)
-    keepRunning.set(false);
-    waitForNumRunningToReach(0);
-
-    // make sure all 10 in the second batch ran, and reset it
-    assertTrue(maxRan.compareAndSet(10, 0));
-  }
-
-  private void waitForNumRunningToReach(int expected) {
-    while (numRunning.get() != expected) {
-      sleep(10);
-    }
-  }
-
-}
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/CompactionPlanTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/CompactionPlanTest.java
index 4a7383e..9df184a 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/CompactionPlanTest.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/CompactionPlanTest.java
@@ -25,6 +25,7 @@ import java.util.Set;
 import org.apache.accumulo.core.metadata.StoredTabletFile;
 import org.junit.Test;
 
+@SuppressWarnings("removal")
 public class CompactionPlanTest {
 
   @Test
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/DefaultCompactionStrategyTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/DefaultCompactionStrategyTest.java
index 7421bda..07728a3 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/DefaultCompactionStrategyTest.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/DefaultCompactionStrategyTest.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.io.Text;
 import org.easymock.EasyMock;
 import org.junit.Test;
 
+@SuppressWarnings("removal")
 public class DefaultCompactionStrategyTest {
 
   private static Pair<Key,Key> keys(String firstString, String secondString) {
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/SizeLimitCompactionStrategyTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/SizeLimitCompactionStrategyTest.java
index 258c078..f9c81dd 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/SizeLimitCompactionStrategyTest.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/SizeLimitCompactionStrategyTest.java
@@ -36,6 +36,7 @@ import org.apache.accumulo.core.metadata.StoredTabletFile;
 import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.junit.Test;
 
+@SuppressWarnings("removal")
 public class SizeLimitCompactionStrategyTest {
 
   private static Map<StoredTabletFile,DataFileValue> nfl(String... sa) {
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/strategies/BasicCompactionStrategyTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/strategies/BasicCompactionStrategyTest.java
index b8b4625..a1162ff 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/strategies/BasicCompactionStrategyTest.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/strategies/BasicCompactionStrategyTest.java
@@ -47,6 +47,7 @@ import org.junit.Test;
 /**
  * Tests org.apache.accumulo.tserver.compaction.BasicCompactionStrategy
  */
+@SuppressWarnings("removal")
 public class BasicCompactionStrategyTest {
   private String largeCompressionType = "gz";
   private BasicCompactionStrategy ttcs = null;
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/strategies/ConfigurableCompactionStrategyTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/strategies/ConfigurableCompactionStrategyTest.java
index bbfcec1..b83581b 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/strategies/ConfigurableCompactionStrategyTest.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/strategies/ConfigurableCompactionStrategyTest.java
@@ -18,22 +18,24 @@
  */
 package org.apache.accumulo.tserver.compaction.strategies;
 
-import static org.apache.accumulo.tserver.compaction.DefaultCompactionStrategyTest.getServerContext;
+import static org.apache.accumulo.core.conf.ConfigurationTypeHelper.getFixedMemoryAsBytes;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Set;
 
+import org.apache.accumulo.core.client.PluginEnvironment;
+import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer;
+import org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer.Overrides;
 import org.apache.accumulo.core.compaction.CompactionSettings;
-import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
+import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.TableId;
-import org.apache.accumulo.core.dataImpl.KeyExtent;
-import org.apache.accumulo.core.metadata.StoredTabletFile;
-import org.apache.accumulo.core.metadata.schema.DataFileValue;
-import org.apache.accumulo.tserver.compaction.CompactionPlan;
-import org.apache.accumulo.tserver.compaction.MajorCompactionReason;
-import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
 import org.junit.Test;
 
 public class ConfigurableCompactionStrategyTest {
@@ -41,51 +43,79 @@ public class ConfigurableCompactionStrategyTest {
   // file selection options are adequately tested by ShellServerIT
 
   @Test
-  public void testOutputOptions() {
-    MajorCompactionRequest mcr =
-        new MajorCompactionRequest(new KeyExtent(TableId.of("1"), null, null),
-            MajorCompactionReason.USER, null, getServerContext());
+  public void testOutputOptions() throws URISyntaxException {
 
-    Map<StoredTabletFile,DataFileValue> files = new HashMap<>();
-    files.put(new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-009/F00001.rf"),
-        new DataFileValue(50000, 400));
-    mcr.setFiles(files);
+    Collection<CompactableFile> files = Set.of(CompactableFile
+        .create(new URI("hdfs://nn1/accumulo/tables/1/t-009/F00001.rf"), 50000, 400));
 
     // test setting no output options
     ConfigurableCompactionStrategy ccs = new ConfigurableCompactionStrategy();
 
     Map<String,String> opts = new HashMap<>();
-    ccs.init(opts);
 
-    CompactionPlan plan = ccs.getCompactionPlan(mcr);
+    var initParams = new CompactionConfigurer.InitParamaters() {
 
-    assertEquals(0, plan.writeParameters.getBlockSize());
-    assertEquals(0, plan.writeParameters.getHdfsBlockSize());
-    assertEquals(0, plan.writeParameters.getIndexBlockSize());
-    assertEquals(0, plan.writeParameters.getReplication());
-    assertNull(plan.writeParameters.getCompressType());
+      @Override
+      public TableId getTableId() {
+        return TableId.of("1");
+      }
+
+      @Override
+      public Map<String,String> getOptions() {
+        return opts;
+      }
+
+      @Override
+      public PluginEnvironment getEnvironment() {
+        return null;
+      }
+    };
+
+    ccs.init(initParams);
+
+    var inputParams = new CompactionConfigurer.InputParameters() {
+
+      @Override
+      public TableId getTableId() {
+        return null;
+      }
+
+      @Override
+      public Collection<CompactableFile> getInputFiles() {
+        return files;
+      }
+
+      @Override
+      public PluginEnvironment getEnvironment() {
+        return null;
+      }
+    };
+
+    Overrides plan = ccs.override(inputParams);
+
+    assertTrue(plan.getOverrides().isEmpty());
 
     // test setting all output options
     ccs = new ConfigurableCompactionStrategy();
 
-    CompactionSettings.OUTPUT_BLOCK_SIZE_OPT.put(opts, "64K");
-    CompactionSettings.OUTPUT_COMPRESSION_OPT.put(opts, "snappy");
-    CompactionSettings.OUTPUT_HDFS_BLOCK_SIZE_OPT.put(opts, "256M");
-    CompactionSettings.OUTPUT_INDEX_BLOCK_SIZE_OPT.put(opts, "32K");
-    CompactionSettings.OUTPUT_REPLICATION_OPT.put(opts, "5");
+    CompactionSettings.OUTPUT_BLOCK_SIZE_OPT.put(null, opts, "64K");
+    CompactionSettings.OUTPUT_COMPRESSION_OPT.put(null, opts, "snappy");
+    CompactionSettings.OUTPUT_HDFS_BLOCK_SIZE_OPT.put(null, opts, "256M");
+    CompactionSettings.OUTPUT_INDEX_BLOCK_SIZE_OPT.put(null, opts, "32K");
+    CompactionSettings.OUTPUT_REPLICATION_OPT.put(null, opts, "5");
+
+    ccs.init(initParams);
 
-    ccs.init(opts);
+    plan = ccs.override(inputParams);
 
-    plan = ccs.getCompactionPlan(mcr);
+    Map<String,
+        String> expected = Map.of(Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "snappy",
+            Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), getFixedMemoryAsBytes("64K") + "",
+            Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey(),
+            getFixedMemoryAsBytes("32K") + "", Property.TABLE_FILE_BLOCK_SIZE.getKey(),
+            getFixedMemoryAsBytes("256M") + "", Property.TABLE_FILE_REPLICATION.getKey(), "5");
 
-    assertEquals(ConfigurationTypeHelper.getFixedMemoryAsBytes("64K"),
-        plan.writeParameters.getBlockSize());
-    assertEquals(ConfigurationTypeHelper.getFixedMemoryAsBytes("256M"),
-        plan.writeParameters.getHdfsBlockSize());
-    assertEquals(ConfigurationTypeHelper.getFixedMemoryAsBytes("32K"),
-        plan.writeParameters.getIndexBlockSize());
-    assertEquals(5, plan.writeParameters.getReplication());
-    assertEquals("snappy", plan.writeParameters.getCompressType());
+    assertEquals(expected, plan.getOverrides());
 
   }
 }
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/DatafileManagerTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/DatafileManagerTest.java
deleted file mode 100644
index 4e7b0be..0000000
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/DatafileManagerTest.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.tserver.tablet;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.dataImpl.KeyExtent;
-import org.apache.accumulo.core.metadata.StoredTabletFile;
-import org.apache.accumulo.core.metadata.TabletFile;
-import org.apache.accumulo.core.metadata.schema.DataFileValue;
-import org.apache.accumulo.server.conf.TableConfiguration;
-import org.easymock.EasyMock;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Unit tests for org.apache.accumulo.tserver.tablet.DatafileManager
- */
-public class DatafileManagerTest {
-  private Tablet tablet;
-  private KeyExtent extent;
-  private TableConfiguration tableConf;
-
-  private SortedMap<StoredTabletFile,DataFileValue> createFileMap(String... sa) {
-    SortedMap<StoredTabletFile,DataFileValue> ret = new TreeMap<>();
-    for (int i = 0; i < sa.length; i += 2) {
-      ret.put(new StoredTabletFile("hdfs://nn1/accumulo/tables/5/t-0001/" + sa[i]),
-          new DataFileValue(ConfigurationTypeHelper.getFixedMemoryAsBytes(sa[i + 1]), 1));
-    }
-    return ret;
-  }
-
-  @Before
-  public void setupMockClasses() {
-    tablet = EasyMock.createMock(Tablet.class);
-    extent = EasyMock.createMock(KeyExtent.class);
-    tableConf = EasyMock.createMock(TableConfiguration.class);
-
-    EasyMock.expect(tablet.getExtent()).andReturn(extent);
-    EasyMock.expect(tablet.getTableConfiguration()).andReturn(tableConf);
-    EasyMock.expect(tableConf.getMaxFilesPerTablet()).andReturn(5);
-  }
-
-  /*
-   * Test max file size (table.compaction.minor.merge.file.size.max) exceeded when calling
-   * reserveMergingMinorCompactionFile
-   */
-  @Test
-  public void testReserveMergingMinorCompactionFile_MaxExceeded() {
-    String maxMergeFileSize = "1000B";
-    EasyMock.expect(tablet.getTableConfiguration()).andReturn(tableConf);
-    EasyMock.expect(tableConf.get(Property.TABLE_MINC_MAX_MERGE_FILE_SIZE))
-        .andReturn(maxMergeFileSize);
-    EasyMock.replay(tablet, tableConf);
-
-    SortedMap<StoredTabletFile,DataFileValue> testFiles = createFileMap("largefile", "10M", "file2",
-        "100M", "file3", "100M", "file4", "100M", "file5", "100M");
-
-    DatafileManager dfm = new DatafileManager(tablet, testFiles);
-    TabletFile mergeFile = dfm.reserveMergingMinorCompactionFile();
-
-    EasyMock.verify(tablet, tableConf);
-
-    assertNull(mergeFile);
-  }
-
-  /*
-   * Test max files not reached (table.file.max) when calling reserveMergingMinorCompactionFile
-   */
-  @Test
-  public void testReserveMergingMinorCompactionFile_MaxFilesNotReached() {
-    EasyMock.replay(tablet, tableConf);
-
-    SortedMap<StoredTabletFile,DataFileValue> testFiles =
-        createFileMap("smallfile", "100B", "file2", "100M", "file3", "100M", "file4", "100M");
-
-    DatafileManager dfm = new DatafileManager(tablet, testFiles);
-    TabletFile mergeFile = dfm.reserveMergingMinorCompactionFile();
-
-    EasyMock.verify(tablet, tableConf);
-
-    assertNull(mergeFile);
-  }
-
-  /*
-   * Test the smallest file is chosen for merging minor compaction
-   */
-  @Test
-  public void testReserveMergingMinorCompactionFile() {
-    String maxMergeFileSize = "1000B";
-    EasyMock.expect(tablet.getTableConfiguration()).andReturn(tableConf);
-    EasyMock.expect(tableConf.get(Property.TABLE_MINC_MAX_MERGE_FILE_SIZE))
-        .andReturn(maxMergeFileSize);
-    EasyMock.replay(tablet, tableConf);
-
-    SortedMap<StoredTabletFile,DataFileValue> testFiles = createFileMap("smallfile", "100B",
-        "file2", "100M", "file3", "100M", "file4", "100M", "file5", "100M");
-
-    DatafileManager dfm = new DatafileManager(tablet, testFiles);
-    TabletFile mergeFile = dfm.reserveMergingMinorCompactionFile();
-
-    EasyMock.verify(tablet, tableConf);
-
-    assertEquals("smallfile", mergeFile.getFileName());
-  }
-
-  /*
-   * Test disabled max file size for merging minor compaction
-   */
-  @Test
-  public void testReserveMergingMinorCompactionFileDisabled() {
-    String maxMergeFileSize = "0";
-    EasyMock.expect(tablet.getTableConfiguration()).andReturn(tableConf);
-    EasyMock.expect(tableConf.get(Property.TABLE_MINC_MAX_MERGE_FILE_SIZE))
-        .andReturn(maxMergeFileSize);
-    EasyMock.replay(tablet, tableConf);
-
-    SortedMap<StoredTabletFile,DataFileValue> testFiles = createFileMap("smallishfile", "10M",
-        "file2", "100M", "file3", "100M", "file4", "100M", "file5", "100M");
-
-    DatafileManager dfm = new DatafileManager(tablet, testFiles);
-    TabletFile mergeFile = dfm.reserveMergingMinorCompactionFile();
-
-    EasyMock.verify(tablet, tableConf);
-
-    assertEquals("smallishfile", mergeFile.getFileName());
-  }
-
-}
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/TabletTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/TabletTest.java
index 8c1cc2e..33fcefb 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/TabletTest.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/TabletTest.java
@@ -30,6 +30,7 @@ import org.apache.accumulo.tserver.compaction.WriteParameters;
 import org.easymock.EasyMock;
 import org.junit.Test;
 
+@SuppressWarnings("removal")
 public class TabletTest {
 
   @Test
@@ -52,7 +53,8 @@ public class TabletTest {
 
     EasyMock.replay(tableConf, plan, writeParams);
 
-    AccumuloConfiguration aConf = Tablet.createCompactionConfiguration(tableConf, plan);
+    AccumuloConfiguration aConf =
+        CompactableUtils.createCompactionConfiguration(tableConf, writeParams);
 
     EasyMock.verify(tableConf, plan, writeParams);
 
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/CompactCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/CompactCommand.java
index e53b743..7133c63 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/CompactCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/CompactCommand.java
@@ -29,6 +29,7 @@ import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.admin.CompactionConfig;
 import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
+import org.apache.accumulo.core.client.admin.PluginConfig;
 import org.apache.accumulo.core.compaction.CompactionSettings;
 import org.apache.accumulo.shell.Shell;
 import org.apache.accumulo.shell.ShellUtil;
@@ -91,30 +92,27 @@ public class CompactCommand extends TableOperation {
     }
   }
 
-  private void put(CommandLine cl, Map<String,String> opts, Option opt,
+  private void put(CommandLine cl, Map<String,String> sopts, Map<String,String> copts, Option opt,
       CompactionSettings setting) {
     if (cl.hasOption(opt.getLongOpt()))
-      setting.put(opts, cl.getOptionValue(opt.getLongOpt()));
+      setting.put(sopts, copts, cl.getOptionValue(opt.getLongOpt()));
   }
... 923 lines suppressed ...


Mime
View raw message