drill-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jacq...@apache.org
Subject [1/2] drill git commit: DRILL-4465: Simplify Calcite parsing & planning integration
Date Fri, 04 Mar 2016 06:49:24 GMT
Repository: drill
Updated Branches:
  refs/heads/master c98edbafb -> 84b3a8a87


http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DefaultSqlHandler.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DefaultSqlHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DefaultSqlHandler.java
index d6bdc78..396a0ef 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DefaultSqlHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DefaultSqlHandler.java
@@ -21,31 +21,25 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
-import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
-import com.google.common.collect.ImmutableSet;
 import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.plan.hep.HepMatchOrder;
 import org.apache.calcite.plan.hep.HepPlanner;
-import org.apache.calcite.plan.hep.HepProgram;
 import org.apache.calcite.plan.hep.HepProgramBuilder;
+import org.apache.calcite.plan.volcano.VolcanoPlanner;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelShuttleImpl;
-import org.apache.calcite.rel.core.Join;
 import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.RelFactories;
 import org.apache.calcite.rel.core.TableFunctionScan;
 import org.apache.calcite.rel.core.TableScan;
 import org.apache.calcite.rel.logical.LogicalValues;
 import org.apache.calcite.rel.metadata.CachingRelMetadataProvider;
 import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMetadataProvider;
-import org.apache.calcite.rel.rules.JoinToMultiJoinRule;
-import org.apache.calcite.rel.rules.LoptOptimizeJoinRule;
-import org.apache.calcite.rel.rules.ProjectRemoveRule;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexNode;
@@ -54,8 +48,10 @@ import org.apache.calcite.sql.SqlExplainLevel;
 import org.apache.calcite.sql.SqlNode;
 import org.apache.calcite.sql.TypedSqlNode;
 import org.apache.calcite.sql.validate.SqlValidatorUtil;
-import org.apache.calcite.tools.Planner;
+import org.apache.calcite.tools.Program;
+import org.apache.calcite.tools.Programs;
 import org.apache.calcite.tools.RelConversionException;
+import org.apache.calcite.tools.RuleSet;
 import org.apache.calcite.tools.ValidationException;
 import org.apache.drill.common.JSONOptions;
 import org.apache.drill.common.logical.PlanProperties;
@@ -68,20 +64,15 @@ import org.apache.drill.exec.physical.PhysicalPlan;
 import org.apache.drill.exec.physical.base.AbstractPhysicalVisitor;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.impl.join.JoinUtils;
+import org.apache.drill.exec.planner.PlannerPhase;
+import org.apache.drill.exec.planner.PlannerType;
 import org.apache.drill.exec.planner.common.DrillRelOptUtil;
 import org.apache.drill.exec.planner.cost.DrillDefaultRelMetadataProvider;
-import org.apache.drill.exec.planner.logical.DrillJoinRel;
-import org.apache.drill.exec.planner.logical.DrillMergeProjectRule;
 import org.apache.drill.exec.planner.logical.DrillProjectRel;
-import org.apache.drill.exec.planner.logical.DrillPushProjectPastFilterRule;
 import org.apache.drill.exec.planner.logical.DrillRel;
-import org.apache.drill.exec.planner.logical.DrillRelFactories;
-import org.apache.drill.exec.planner.logical.DrillRuleSets;
 import org.apache.drill.exec.planner.logical.DrillScreenRel;
 import org.apache.drill.exec.planner.logical.DrillStoreRel;
 import org.apache.drill.exec.planner.logical.PreProcessLogicalRel;
-import org.apache.drill.exec.planner.logical.partition.ParquetPruneScanRule;
-import org.apache.drill.exec.planner.logical.partition.PruneScanRule;
 import org.apache.drill.exec.planner.physical.DrillDistributionTrait;
 import org.apache.drill.exec.planner.physical.PhysicalPlanCreator;
 import org.apache.drill.exec.planner.physical.PlannerSettings;
@@ -99,7 +90,6 @@ import org.apache.drill.exec.planner.physical.visitor.SelectionVectorPrelVisitor
 import org.apache.drill.exec.planner.physical.visitor.SplitUpComplexExpressions;
 import org.apache.drill.exec.planner.physical.visitor.StarColumnConverter;
 import org.apache.drill.exec.planner.physical.visitor.SwapHashJoinVisitor;
-import org.apache.drill.exec.planner.sql.DrillSqlWorker;
 import org.apache.drill.exec.planner.sql.parser.UnsupportedOperatorsVisitor;
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.server.options.OptionValue;
@@ -111,17 +101,17 @@ import org.slf4j.Logger;
 
 import com.fasterxml.jackson.core.JsonProcessingException;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Stopwatch;
 import com.google.common.collect.Lists;
 
 public class DefaultSqlHandler extends AbstractSqlHandler {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DefaultSqlHandler.class);
 
+  // protected final QueryContext context;
+  private final Pointer<String> textPlan;
+  private final long targetSliceSize;
   protected final SqlHandlerConfig config;
   protected final QueryContext context;
-  protected final HepPlanner hepPlanner;
-  protected final Planner planner;
-  private Pointer<String> textPlan;
-  private final long targetSliceSize;
 
   public DefaultSqlHandler(SqlHandlerConfig config) {
     this(config, null);
@@ -129,28 +119,44 @@ public class DefaultSqlHandler extends AbstractSqlHandler {
 
   public DefaultSqlHandler(SqlHandlerConfig config, Pointer<String> textPlan) {
     super();
-    this.planner = config.getPlanner();
-    this.context = config.getContext();
-    this.hepPlanner = config.getHepPlanner();
     this.config = config;
+    this.context = config.getContext();
     this.textPlan = textPlan;
-    targetSliceSize = context.getOptions().getOption(ExecConstants.SLICE_TARGET).num_val;
+    this.targetSliceSize = config.getContext().getOptions().getOption(ExecConstants.SLICE_TARGET_OPTION);
+
   }
 
-  protected static void log(final String name, final RelNode node, final Logger logger) {
+  protected void log(final PlannerType plannerType, final PlannerPhase phase, final RelNode node, final Logger logger,
+      Stopwatch watch) {
+    log(plannerType, phase, node, logger, watch, false);
+  }
+
+  protected void log(final PlannerType plannerType, final PlannerPhase phase, final RelNode node, final Logger logger,
+      Stopwatch watch, boolean number) {
     if (logger.isDebugEnabled()) {
-      logger.debug(name + " : \n" + RelOptUtil.toString(node, SqlExplainLevel.ALL_ATTRIBUTES));
+      log(plannerType.name() + ":" + phase.description, node, logger, watch, number);
     }
   }
 
-  protected void log(final String name, final Prel node, final Logger logger) {
-    String plan = PrelSequencer.printWithIds(node, SqlExplainLevel.ALL_ATTRIBUTES);
-    if(textPlan != null){
-      textPlan.value = plan;
-    }
+  protected void log(final String description, final RelNode node, final Logger logger, Stopwatch watch) {
+    log(description, node, logger, watch, false);
+  }
+
+  protected void log(final String description, final RelNode node, final Logger logger, Stopwatch watch, boolean number) {
 
     if (logger.isDebugEnabled()) {
-      logger.debug(name + " : \n" + plan);
+      final String plan;
+      if (number && node instanceof Prel) {
+        plan = PrelSequencer.printWithIds((Prel) node, SqlExplainLevel.ALL_ATTRIBUTES);
+        if (textPlan != null) {
+          textPlan.value = plan;
+        }
+      } else {
+        plan = RelOptUtil.toString(node, SqlExplainLevel.ALL_ATTRIBUTES);
+      }
+
+      final String time = watch == null ? "" : String.format(" (%dms)", watch.elapsed(TimeUnit.MILLISECONDS));
+      logger.debug(String.format("%s%s:\n%s", description, time, plan));
     }
   }
 
@@ -161,21 +167,16 @@ public class DefaultSqlHandler extends AbstractSqlHandler {
     }
   }
 
-
   @Override
   public PhysicalPlan getPlan(SqlNode sqlNode) throws ValidationException, RelConversionException, IOException, ForemanSetupException {
     final ConvertedRelNode convertedRelNode = validateAndConvert(sqlNode);
     final RelDataType validatedRowType = convertedRelNode.getValidatedRowType();
     final RelNode queryRelNode = convertedRelNode.getConvertedNode();
 
-    log("Optiq Logical", queryRelNode, logger);
-    DrillRel drel = convertToDrel(queryRelNode, validatedRowType);
-
-    log("Drill Logical", drel, logger);
-    Prel prel = convertToPrel(drel);
-    log("Drill Physical", prel, logger);
-    PhysicalOperator pop = convertToPop(prel);
-    PhysicalPlan plan = convertToPlan(pop);
+    final DrillRel drel = convertToDrel(queryRelNode, validatedRowType);
+    final Prel prel = convertToPrel(drel);
+    final PhysicalOperator pop = convertToPop(prel);
+    final PhysicalPlan plan = convertToPlan(pop);
     log("Drill Plan", plan, logger);
     return plan;
   }
@@ -211,13 +212,38 @@ public class DefaultSqlHandler extends AbstractSqlHandler {
    * @throws SqlUnsupportedException
    * @throws RelConversionException
    */
-  protected DrillRel convertToDrel(RelNode relNode) throws SqlUnsupportedException, RelConversionException {
+  protected DrillRel convertToDrel(final RelNode relNode) throws SqlUnsupportedException, RelConversionException {
     try {
-      final DrillRel convertedRelNode;
+      final RelNode convertedRelNode;
+
+      // HEP Directory pruning .
+      final RelNode pruned = transform(PlannerType.HEP_BOTTOM_UP, PlannerPhase.DIRECTORY_PRUNING, relNode);
+      final RelTraitSet logicalTraits = pruned.getTraitSet().plus(DrillRel.DRILL_LOGICAL);
 
-      convertedRelNode = (DrillRel) doLogicalPlanning(relNode);
+      if (!context.getPlannerSettings().isHepOptEnabled()) {
+        // hep is disabled, use volcano
+        convertedRelNode = transform(PlannerType.VOLCANO, PlannerPhase.LOGICAL_PRUNE_AND_JOIN, pruned, logicalTraits);
 
-      if (convertedRelNode instanceof DrillStoreRel) {
+      } else {
+        final RelNode intermediateNode2;
+        if (context.getPlannerSettings().isHepPartitionPruningEnabled()) {
+
+          // hep is enabled and hep pruning is enabled.
+          final RelNode intermediateNode = transform(PlannerType.VOLCANO, PlannerPhase.LOGICAL, pruned, logicalTraits);
+          intermediateNode2 = transform(PlannerType.HEP_BOTTOM_UP, PlannerPhase.PARTITION_PRUNING, intermediateNode);
+
+        } else {
+          // Only hep is enabled
+          intermediateNode2 = transform(PlannerType.VOLCANO, PlannerPhase.LOGICAL_PRUNE, pruned, logicalTraits);
+        }
+
+        // Do Join Planning.
+        convertedRelNode = transform(PlannerType.HEP_BOTTOM_UP, PlannerPhase.JOIN_PLANNING, intermediateNode2);
+      }
+
+      final DrillRel drillRel = (DrillRel) convertedRelNode;
+
+      if (drillRel instanceof DrillStoreRel) {
         throw new UnsupportedOperationException();
       } else {
 
@@ -226,7 +252,7 @@ public class DefaultSqlHandler extends AbstractSqlHandler {
           context.getPlannerSettings().forceSingleMode();
         }
 
-        return convertedRelNode;
+        return drillRel;
       }
     } catch (RelOptPlanner.CannotPlanException ex) {
       logger.error(ex.getMessage());
@@ -273,13 +299,120 @@ public class DefaultSqlHandler extends AbstractSqlHandler {
 
   }
 
+  /**
+   * Transform RelNode to a new RelNode without changing any traits. Also will log the outcome.
+   *
+   * @param plannerType
+   *          The type of Planner to use.
+   * @param phase
+   *          The transformation phase we're running.
+   * @param input
+   *          The origianl RelNode
+   * @return The transformed relnode.
+   */
+  private RelNode transform(PlannerType plannerType, PlannerPhase phase, RelNode input) {
+    return transform(plannerType, phase, input, input.getTraitSet());
+  }
+
+  /**
+   * Transform RelNode to a new RelNode, targeting the provided set of traits. Also will log the outcome.
+   *
+   * @param plannerType
+   *          The type of Planner to use.
+   * @param phase
+   *          The transformation phase we're running.
+   * @param input
+   *          The origianl RelNode
+   * @param targetTraits
+   *          The traits we are targeting for output.
+   * @return The transformed relnode.
+   */
+  protected RelNode transform(PlannerType plannerType, PlannerPhase phase, RelNode input, RelTraitSet targetTraits) {
+    return transform(plannerType, phase, input, targetTraits, true);
+  }
+
+  /**
+   * Transform RelNode to a new RelNode, targeting the provided set of traits. Also will log the outcome if asked.
+   *
+   * @param plannerType
+   *          The type of Planner to use.
+   * @param phase
+   *          The transformation phase we're running.
+   * @param input
+   *          The origianl RelNode
+   * @param targetTraits
+   *          The traits we are targeting for output.
+   * @param log
+   *          Whether to log the planning phase.
+   * @return The transformed relnode.
+   */
+  protected RelNode transform(PlannerType plannerType, PlannerPhase phase, RelNode input, RelTraitSet targetTraits,
+      boolean log) {
+    final Stopwatch watch = Stopwatch.createStarted();
+    final RuleSet rules = config.getRules(phase);
+    final RelTraitSet toTraits = targetTraits.simplify();
+
+    final RelNode output;
+    switch (plannerType) {
+    case HEP_BOTTOM_UP:
+    case HEP: {
+      final HepProgramBuilder hepPgmBldr = new HepProgramBuilder();
+      if (plannerType == PlannerType.HEP_BOTTOM_UP) {
+        hepPgmBldr.addMatchOrder(HepMatchOrder.BOTTOM_UP);
+      }
+      for (RelOptRule rule : rules) {
+        hepPgmBldr.addRuleInstance(rule);
+      }
+
+      final HepPlanner planner = new HepPlanner(hepPgmBldr.build(), context.getPlannerSettings());
+
+      final List<RelMetadataProvider> list = Lists.newArrayList();
+      list.add(DrillDefaultRelMetadataProvider.INSTANCE);
+      planner.registerMetadataProviders(list);
+      final RelMetadataProvider cachingMetaDataProvider = new CachingRelMetadataProvider(
+          ChainedRelMetadataProvider.of(list), planner);
+
+      // Modify RelMetaProvider for every RelNode in the SQL operator Rel tree.
+      input.accept(new MetaDataProviderModifier(cachingMetaDataProvider));
+      planner.setRoot(input);
+      if (!input.getTraitSet().equals(targetTraits)) {
+        planner.changeTraits(input, toTraits);
+      }
+      output = planner.findBestExp();
+      break;
+    }
+    case VOLCANO:
+    default: {
+      // as weird as it seems, the cluster's only planner is the volcano planner.
+      final RelOptPlanner planner = input.getCluster().getPlanner();
+      final Program program = Programs.of(rules);
+      Preconditions.checkArgument(planner instanceof VolcanoPlanner,
+          "Cluster is expected to be constructed using VolcanoPlanner. Was actually of type %s.", planner.getClass()
+              .getName());
+      output = program.run(planner, input, toTraits);
+
+      break;
+    }
+    }
+
+    if (log) {
+      log(plannerType, phase, output, logger, watch);
+    }
+
+    return output;
+  }
+
   protected Prel convertToPrel(RelNode drel) throws RelConversionException, SqlUnsupportedException {
     Preconditions.checkArgument(drel.getConvention() == DrillRel.DRILL_LOGICAL);
-    RelTraitSet traits = drel.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(DrillDistributionTrait.SINGLETON);
+
+    final RelTraitSet traits = drel.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(DrillDistributionTrait.SINGLETON);
     Prel phyRelNode;
     try {
-      final RelNode relNode = planner.transform(DrillSqlWorker.PHYSICAL_MEM_RULES, traits, drel);
+      final Stopwatch watch = Stopwatch.createStarted();
+      final RelNode relNode = transform(PlannerType.VOLCANO, PlannerPhase.PHYSICAL, drel, traits, false);
       phyRelNode = (Prel) relNode.accept(new PrelFinalizer());
+      // log externally as we need to finalize before traversing the tree.
+      log(PlannerType.VOLCANO, PlannerPhase.PHYSICAL, phyRelNode, logger, watch);
     } catch (RelOptPlanner.CannotPlanException ex) {
       logger.error(ex.getMessage());
 
@@ -293,15 +426,15 @@ public class DefaultSqlHandler extends AbstractSqlHandler {
     OptionManager queryOptions = context.getOptions();
 
     if (context.getPlannerSettings().isMemoryEstimationEnabled()
-      && !MemoryEstimationVisitor.enoughMemory(phyRelNode, queryOptions, context.getActiveEndpoints().size())) {
-      log("Not enough memory for this plan", phyRelNode, logger);
+        && !MemoryEstimationVisitor.enoughMemory(phyRelNode, queryOptions, context.getActiveEndpoints().size())) {
+      log("Not enough memory for this plan", phyRelNode, logger, null);
       logger.debug("Re-planning without hash operations.");
 
       queryOptions.setOption(OptionValue.createBoolean(OptionValue.OptionType.QUERY, PlannerSettings.HASHJOIN.getOptionName(), false));
       queryOptions.setOption(OptionValue.createBoolean(OptionValue.OptionType.QUERY, PlannerSettings.HASHAGG.getOptionName(), false));
 
       try {
-        final RelNode relNode = planner.transform(DrillSqlWorker.PHYSICAL_MEM_RULES, traits, drel);
+        final RelNode relNode = transform(PlannerType.VOLCANO, PlannerPhase.PHYSICAL, drel, traits);
         phyRelNode = (Prel) relNode.accept(new PrelFinalizer());
       } catch (RelOptPlanner.CannotPlanException ex) {
         logger.error(ex.getMessage());
@@ -314,7 +447,7 @@ public class DefaultSqlHandler extends AbstractSqlHandler {
       }
     }
 
-    /*  The order of the following transformation is important */
+    /* The order of the following transformations is important */
 
     /*
      * 0.) For select * from join query, we need insert project on top of scan and a top project just
@@ -337,18 +470,22 @@ public class DefaultSqlHandler extends AbstractSqlHandler {
      * We want to have smaller dataset on the right side, since hash table builds on right side.
      */
     if (context.getPlannerSettings().isHashJoinSwapEnabled()) {
-      phyRelNode = SwapHashJoinVisitor.swapHashJoin(phyRelNode, new Double(context.getPlannerSettings().getHashJoinSwapMarginFactor()));
+      phyRelNode = SwapHashJoinVisitor.swapHashJoin(phyRelNode, new Double(context.getPlannerSettings()
+          .getHashJoinSwapMarginFactor()));
     }
 
     /*
      * 1.2) Break up all expressions with complex outputs into their own project operations
      */
-    phyRelNode = phyRelNode.accept(new SplitUpComplexExpressions(planner.getTypeFactory(), context.getDrillOperatorTable(), context.getPlannerSettings().functionImplementationRegistry), null);
+    phyRelNode = phyRelNode.accept(
+        new SplitUpComplexExpressions(config.getConverter().getTypeFactory(), context.getDrillOperatorTable(), context
+            .getPlannerSettings().functionImplementationRegistry), null);
 
     /*
      * 1.3) Projections that contain reference to flatten are rewritten as Flatten operators followed by Project
      */
-    phyRelNode = phyRelNode.accept(new RewriteProjectToFlatten(planner.getTypeFactory(), context.getDrillOperatorTable()), null);
+    phyRelNode = phyRelNode.accept(
+        new RewriteProjectToFlatten(config.getConverter().getTypeFactory(), context.getDrillOperatorTable()), null);
 
     /*
      * 2.)
@@ -448,9 +585,9 @@ public class DefaultSqlHandler extends AbstractSqlHandler {
   }
 
   private TypedSqlNode validateNode(SqlNode sqlNode) throws ValidationException, RelConversionException, ForemanSetupException {
-    TypedSqlNode typedSqlNode = planner.validateAndGetType(sqlNode);
-
-    SqlNode sqlNodeValidated = typedSqlNode.getSqlNode();
+    final SqlNode sqlNodeValidated = config.getConverter().validate(sqlNode);
+    final TypedSqlNode typedSqlNode = new TypedSqlNode(sqlNodeValidated, config.getConverter().getOutputType(
+        sqlNodeValidated));
 
     // Check if the unsupported functionality is used
     UnsupportedOperatorsVisitor visitor = UnsupportedOperatorsVisitor.createVisitor(context);
@@ -468,22 +605,9 @@ public class DefaultSqlHandler extends AbstractSqlHandler {
   }
 
   private RelNode convertToRel(SqlNode node) throws RelConversionException {
-    final RelNode convertedNode = planner.convert(node);
-
-    final RelMetadataProvider provider = convertedNode.getCluster().getMetadataProvider();
-
-    // Register RelMetadataProvider with HepPlanner.
-    final List<RelMetadataProvider> list = Lists.newArrayList(provider);
-    hepPlanner.registerMetadataProviders(list);
-    final RelMetadataProvider cachingMetaDataProvider = new CachingRelMetadataProvider(ChainedRelMetadataProvider.of(list), hepPlanner);
-    convertedNode.accept(new MetaDataProviderModifier(cachingMetaDataProvider));
-
-    // HepPlanner is specifically used for Window Function planning only.
-    hepPlanner.setRoot(convertedNode);
-    RelNode rel = hepPlanner.findBestExp();
-
-    rel.accept(new MetaDataProviderModifier(provider));
-    return rel;
+    final RelNode convertedNode = config.getConverter().toRel(node);
+    log("INITIAL", convertedNode, logger, null);
+    return transform(PlannerType.HEP, PlannerPhase.WINDOW_REWRITE, convertedNode);
   }
 
   private RelNode preprocessNode(RelNode rel) throws SqlUnsupportedException {
@@ -495,7 +619,7 @@ public class DefaultSqlHandler extends AbstractSqlHandler {
      * 2. see where the tree contains unsupported functions; throw SqlUnsupportedException if there is any.
      */
 
-    PreProcessLogicalRel visitor = PreProcessLogicalRel.createVisitor(planner.getTypeFactory(),
+    PreProcessLogicalRel visitor = PreProcessLogicalRel.createVisitor(config.getConverter().getTypeFactory(),
         context.getDrillOperatorTable());
     try {
       rel = rel.accept(visitor);
@@ -532,82 +656,6 @@ public class DefaultSqlHandler extends AbstractSqlHandler {
     }
   }
 
-
-  private RelNode doLogicalPlanning(RelNode relNode) throws RelConversionException, SqlUnsupportedException {
-    // 1. Call HepPlanner with directory-based partition pruning, in Calcite logical rel
-    // Partition pruning .
-    ImmutableSet<RelOptRule> dirPruneScanRules = ImmutableSet.<RelOptRule>builder()
-        .addAll(DrillRuleSets.getDirPruneScanRules(context))
-        .build();
-
-    relNode = doHepPlan(relNode, dirPruneScanRules, HepMatchOrder.BOTTOM_UP);
-    log("Post-Dir-Pruning", relNode, logger);
-
-    if (! context.getPlannerSettings().isHepOptEnabled()) {
-      return planner.transform(DrillSqlWorker.LOGICAL_RULES, relNode.getTraitSet().plus(DrillRel.DRILL_LOGICAL), relNode);
-    } else {
-      RelNode convertedRelNode = null;
-      if (context.getPlannerSettings().isHepPartitionPruningEnabled()) {
-        convertedRelNode = planner.transform(DrillSqlWorker.LOGICAL_HEP_JOIN__PP_RULES, relNode.getTraitSet().plus(DrillRel.DRILL_LOGICAL), relNode);
-        log("VolcanoRel", convertedRelNode, logger);
-
-        // Partition pruning .
-        ImmutableSet<RelOptRule> pruneScanRules = ImmutableSet.<RelOptRule>builder()
-            .addAll(DrillRuleSets.getPruneScanRules(context))
-            .build();
-
-        convertedRelNode = doHepPlan(convertedRelNode, pruneScanRules, HepMatchOrder.BOTTOM_UP);
-      } else {
-        convertedRelNode = planner.transform(DrillSqlWorker.LOGICAL_HEP_JOIN_RULES, relNode.getTraitSet().plus(DrillRel.DRILL_LOGICAL), relNode);
-        log("VolcanoRel", convertedRelNode, logger);
-      }
-
-      // Join order planning with LOPT rule
-      ImmutableSet<RelOptRule> joinOrderRules = ImmutableSet.<RelOptRule>builder()
-          .add(
-              DrillRuleSets.DRILL_JOIN_TO_MULTIJOIN_RULE,
-              DrillRuleSets.DRILL_LOPT_OPTIMIZE_JOIN_RULE,
-              ProjectRemoveRule.INSTANCE)
-          .build();
-
-      final RelNode loptNode = doHepPlan(convertedRelNode, joinOrderRules, HepMatchOrder.BOTTOM_UP);
-
-      log("HepRel", loptNode, logger);
-
-      return loptNode;
-    }
-  }
-
-  /**
-   * Use HepPlanner to apply optimization rules to RelNode tree.
-   * @return : the root node for optimized plan.
-   */
-  private RelNode doHepPlan(RelNode root, Set<RelOptRule> rules, HepMatchOrder matchOrder) {
-    final HepProgramBuilder hepPgmBldr = new HepProgramBuilder()
-        .addMatchOrder(matchOrder);
-
-    for (final RelOptRule rule : rules) {
-      hepPgmBldr.addRuleInstance(rule);
-    }
-
-    final HepProgram hepPgm = hepPgmBldr.build();
-    final HepPlanner hepPlanner = new HepPlanner(hepPgm, context.getPlannerSettings());
-
-    final List<RelMetadataProvider> list = Lists.newArrayList();
-    list.add(DrillDefaultRelMetadataProvider.INSTANCE);
-    hepPlanner.registerMetadataProviders(list);
-    final RelMetadataProvider cachingMetaDataProvider = new CachingRelMetadataProvider(ChainedRelMetadataProvider.of(list), hepPlanner);
-
-    // Modify RelMetaProvider for every RelNode in the SQL operator Rel tree.
-    root.accept(new MetaDataProviderModifier(cachingMetaDataProvider));
-
-    hepPlanner.setRoot(root);
-
-    RelNode calciteOptimizedPlan = hepPlanner.findBestExp();
-
-    return calciteOptimizedPlan;
-  }
-
   public static class MetaDataProviderModifier extends RelShuttleImpl {
     private final RelMetadataProvider metadataProvider;
 

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DescribeTableHandler.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DescribeTableHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DescribeTableHandler.java
index ba67971..9c14c59 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DescribeTableHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DescribeTableHandler.java
@@ -18,18 +18,18 @@
 
 package org.apache.drill.exec.planner.sql.handlers;
 
+import static org.apache.drill.exec.planner.sql.parser.DrillParserUtil.CHARSET;
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.COLS_COL_COLUMN_NAME;
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.COLS_COL_DATA_TYPE;
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.COLS_COL_IS_NULLABLE;
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.IS_SCHEMA_NAME;
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.SHRD_COL_TABLE_NAME;
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.SHRD_COL_TABLE_SCHEMA;
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.TAB_COLUMNS;
+
 import java.util.List;
 
 import org.apache.calcite.schema.SchemaPlus;
-import org.apache.calcite.tools.RelConversionException;
-
-import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.*;
-
-import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.exec.planner.sql.SchemaUtilites;
-import org.apache.drill.exec.planner.sql.parser.DrillParserUtil;
-import org.apache.drill.exec.planner.sql.parser.SqlDescribeTable;
-import org.apache.drill.exec.work.foreman.ForemanSetupException;
 import org.apache.calcite.sql.SqlIdentifier;
 import org.apache.calcite.sql.SqlLiteral;
 import org.apache.calcite.sql.SqlNode;
@@ -37,12 +37,16 @@ import org.apache.calcite.sql.SqlNodeList;
 import org.apache.calcite.sql.SqlSelect;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.tools.RelConversionException;
 import org.apache.calcite.util.Util;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.exec.planner.sql.SchemaUtilites;
+import org.apache.drill.exec.planner.sql.parser.DrillParserUtil;
+import org.apache.drill.exec.planner.sql.parser.SqlDescribeTable;
+import org.apache.drill.exec.work.foreman.ForemanSetupException;
 
 import com.google.common.collect.ImmutableList;
 
-import static org.apache.drill.exec.planner.sql.parser.DrillParserUtil.CHARSET;
-
 public class DescribeTableHandler extends DefaultSqlHandler {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DescribeTableHandler.class);
 
@@ -63,7 +67,7 @@ public class DescribeTableHandler extends DefaultSqlHandler {
           ImmutableList.of(IS_SCHEMA_NAME, TAB_COLUMNS), null, SqlParserPos.ZERO, null);
 
       final SqlIdentifier table = node.getTable();
-      final SchemaPlus defaultSchema = context.getNewDefaultSchema();
+      final SchemaPlus defaultSchema = config.getConverter().getDefaultSchema();
       final List<String> schemaPathGivenInCmd = Util.skipLast(table.names);
       final SchemaPlus schema = SchemaUtilites.findSchema(defaultSchema, schemaPathGivenInCmd);
 

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropTableHandler.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropTableHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropTableHandler.java
index 211d256..7684cb3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropTableHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropTableHandler.java
@@ -17,6 +17,8 @@
  */
 package org.apache.drill.exec.planner.sql.handlers;
 
+import java.io.IOException;
+
 import org.apache.calcite.schema.SchemaPlus;
 import org.apache.calcite.sql.SqlIdentifier;
 import org.apache.calcite.sql.SqlNode;
@@ -29,8 +31,6 @@ import org.apache.drill.exec.planner.sql.SchemaUtilites;
 import org.apache.drill.exec.planner.sql.parser.SqlDropTable;
 import org.apache.drill.exec.store.AbstractSchema;
 
-import java.io.IOException;
-
 // SqlHandler for dropping a table.
 public class DropTableHandler extends DefaultSqlHandler {
 
@@ -55,7 +55,7 @@ public class DropTableHandler extends DefaultSqlHandler {
     SqlDropTable dropTableNode = ((SqlDropTable) sqlNode);
     SqlIdentifier tableIdentifier = dropTableNode.getTableIdentifier();
 
-    SchemaPlus defaultSchema = context.getNewDefaultSchema();
+    SchemaPlus defaultSchema = config.getConverter().getDefaultSchema();
     AbstractSchema drillSchema = null;
 
     if (tableIdentifier != null) {

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ExplainHandler.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ExplainHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ExplainHandler.java
index 502987c..98a14e5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ExplainHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ExplainHandler.java
@@ -19,10 +19,15 @@ package org.apache.drill.exec.planner.sql.handlers;
 
 import java.io.IOException;
 
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlExplain;
+import org.apache.calcite.sql.SqlExplainLevel;
+import org.apache.calcite.sql.SqlLiteral;
+import org.apache.calcite.sql.SqlNode;
 import org.apache.calcite.tools.RelConversionException;
 import org.apache.calcite.tools.ValidationException;
-
 import org.apache.drill.common.logical.LogicalPlan;
 import org.apache.drill.common.logical.PlanProperties.Generator.ResultMode;
 import org.apache.drill.exec.ops.QueryContext;
@@ -35,12 +40,6 @@ import org.apache.drill.exec.planner.physical.Prel;
 import org.apache.drill.exec.planner.physical.explain.PrelSequencer;
 import org.apache.drill.exec.planner.sql.DirectPlan;
 import org.apache.drill.exec.work.foreman.ForemanSetupException;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.sql.SqlExplain;
-import org.apache.calcite.sql.SqlExplainLevel;
-import org.apache.calcite.sql.SqlLiteral;
-import org.apache.calcite.sql.SqlNode;
 
 public class ExplainHandler extends DefaultSqlHandler {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ExplainHandler.class);
@@ -57,9 +56,8 @@ public class ExplainHandler extends DefaultSqlHandler {
     final RelDataType validatedRowType = convertedRelNode.getValidatedRowType();
     final RelNode queryRelNode = convertedRelNode.getConvertedNode();
 
-    log("Optiq Logical", queryRelNode, logger);
+    log("Calcite", queryRelNode, logger, null);
     DrillRel drel = convertToDrel(queryRelNode, validatedRowType);
-    log("Drill Logical", drel, logger);
 
     if (mode == ResultMode.LOGICAL) {
       LogicalExplain logicalResult = new LogicalExplain(drel, level, context);
@@ -67,7 +65,6 @@ public class ExplainHandler extends DefaultSqlHandler {
     }
 
     Prel prel = convertToPrel(drel);
-    log("Drill Physical", prel, logger);
     PhysicalOperator pop = convertToPop(prel);
     PhysicalPlan plan = convertToPlan(pop);
     log("Drill Plan", plan, logger);

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/RefreshMetadataHandler.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/RefreshMetadataHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/RefreshMetadataHandler.java
index ce4059b..059f7d0 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/RefreshMetadataHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/RefreshMetadataHandler.java
@@ -17,9 +17,9 @@
  */
 package org.apache.drill.exec.planner.sql.handlers;
 
-import java.io.IOException;
-import java.util.List;
+import static org.apache.drill.exec.planner.sql.SchemaUtilites.findSchema;
 
+import java.io.IOException;
 
 import org.apache.calcite.schema.SchemaPlus;
 import org.apache.calcite.schema.Table;
@@ -28,17 +28,9 @@ import org.apache.calcite.tools.RelConversionException;
 import org.apache.calcite.tools.ValidationException;
 import org.apache.drill.common.logical.FormatPluginConfig;
 import org.apache.drill.exec.physical.PhysicalPlan;
-import org.apache.drill.exec.physical.base.PhysicalOperator;
-import org.apache.drill.exec.planner.logical.DrillRel;
-import org.apache.drill.exec.planner.logical.DrillScreenRel;
-import org.apache.drill.exec.planner.logical.DrillStoreRel;
 import org.apache.drill.exec.planner.logical.DrillTable;
-import org.apache.drill.exec.planner.logical.DrillWriterRel;
-import org.apache.drill.exec.planner.physical.Prel;
 import org.apache.drill.exec.planner.sql.DirectPlan;
-import org.apache.drill.exec.planner.sql.DrillSqlWorker;
 import org.apache.drill.exec.planner.sql.parser.SqlRefreshMetadata;
-import org.apache.drill.exec.store.AbstractSchema;
 import org.apache.drill.exec.store.dfs.DrillFileSystem;
 import org.apache.drill.exec.store.dfs.FileSystemPlugin;
 import org.apache.drill.exec.store.dfs.FormatSelection;
@@ -46,11 +38,8 @@ import org.apache.drill.exec.store.dfs.NamedFormatPluginConfig;
 import org.apache.drill.exec.store.parquet.Metadata;
 import org.apache.drill.exec.store.parquet.ParquetFormatConfig;
 import org.apache.drill.exec.work.foreman.ForemanSetupException;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 
-import static org.apache.drill.exec.planner.sql.SchemaUtilites.findSchema;
-
 public class RefreshMetadataHandler extends DefaultSqlHandler {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(RefreshMetadataHandler.class);
 
@@ -72,7 +61,7 @@ public class RefreshMetadataHandler extends DefaultSqlHandler {
 
     try {
 
-      final SchemaPlus schema = findSchema(context.getNewDefaultSchema(),
+      final SchemaPlus schema = findSchema(config.getConverter().getDefaultSchema(),
           refreshTable.getSchemaPath());
 
       final String tableName = refreshTable.getName();

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowFileHandler.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowFileHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowFileHandler.java
index 3051279..fb564a2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowFileHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowFileHandler.java
@@ -22,21 +22,20 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.sql.SqlIdentifier;
+import org.apache.calcite.sql.SqlNode;
 import org.apache.calcite.tools.RelConversionException;
 import org.apache.calcite.tools.ValidationException;
-
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.exec.physical.PhysicalPlan;
 import org.apache.drill.exec.planner.sql.DirectPlan;
 import org.apache.drill.exec.planner.sql.SchemaUtilites;
 import org.apache.drill.exec.planner.sql.parser.SqlShowFiles;
 import org.apache.drill.exec.store.AbstractSchema;
-import org.apache.drill.exec.store.dfs.WorkspaceSchemaFactory.WorkspaceSchema;
 import org.apache.drill.exec.store.dfs.DrillFileSystem;
+import org.apache.drill.exec.store.dfs.WorkspaceSchemaFactory.WorkspaceSchema;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
-import org.apache.calcite.sql.SqlIdentifier;
-import org.apache.calcite.sql.SqlNode;
 
 
 public class ShowFileHandler extends DefaultSqlHandler {
@@ -55,7 +54,7 @@ public class ShowFileHandler extends DefaultSqlHandler {
     String defaultLocation = null;
     String fromDir = "./";
 
-    SchemaPlus defaultSchema = context.getNewDefaultSchema();
+    SchemaPlus defaultSchema = config.getConverter().getDefaultSchema();
     SchemaPlus drillSchema = defaultSchema;
 
     // Show files can be used without from clause, in which case we display the files in the default schema

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowTablesHandler.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowTablesHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowTablesHandler.java
index a007e9f..206f966 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowTablesHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowTablesHandler.java
@@ -19,19 +19,14 @@
 package org.apache.drill.exec.planner.sql.handlers;
 
 import static org.apache.drill.exec.planner.sql.parser.DrillParserUtil.CHARSET;
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.IS_SCHEMA_NAME;
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.SHRD_COL_TABLE_NAME;
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.SHRD_COL_TABLE_SCHEMA;
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.TAB_TABLES;
 
 import java.util.List;
 
 import org.apache.calcite.schema.SchemaPlus;
-import org.apache.calcite.tools.RelConversionException;
-
-import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.exec.planner.sql.SchemaUtilites;
-import org.apache.drill.exec.planner.sql.parser.DrillParserUtil;
-import org.apache.drill.exec.planner.sql.parser.SqlShowTables;
-import org.apache.drill.exec.store.AbstractSchema;
-import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.*;
-import org.apache.drill.exec.work.foreman.ForemanSetupException;
 import org.apache.calcite.sql.SqlIdentifier;
 import org.apache.calcite.sql.SqlLiteral;
 import org.apache.calcite.sql.SqlNode;
@@ -39,6 +34,13 @@ import org.apache.calcite.sql.SqlNodeList;
 import org.apache.calcite.sql.SqlSelect;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.tools.RelConversionException;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.exec.planner.sql.SchemaUtilites;
+import org.apache.drill.exec.planner.sql.parser.DrillParserUtil;
+import org.apache.drill.exec.planner.sql.parser.SqlShowTables;
+import org.apache.drill.exec.store.AbstractSchema;
+import org.apache.drill.exec.work.foreman.ForemanSetupException;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
@@ -68,7 +70,7 @@ public class ShowTablesHandler extends DefaultSqlHandler {
       tableSchema = db.toString();
     } else {
       // If no schema is given in SHOW TABLES command, list tables from current schema
-      SchemaPlus schema = context.getNewDefaultSchema();
+      SchemaPlus schema = config.getConverter().getDefaultSchema();
 
       if (SchemaUtilites.isRootSchema(schema)) {
         // If the default schema is a root schema, throw an error to select a default schema

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerConfig.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerConfig.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerConfig.java
index 4027fe6..493b097 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerConfig.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerConfig.java
@@ -18,30 +18,41 @@
 
 package org.apache.drill.exec.planner.sql.handlers;
 
-import org.apache.calcite.tools.Planner;
+import java.util.Collection;
+import java.util.Map.Entry;
+
+import org.apache.calcite.tools.RuleSet;
 import org.apache.drill.exec.ops.QueryContext;
-import org.apache.calcite.plan.hep.HepPlanner;
+import org.apache.drill.exec.planner.PlannerPhase;
+import org.apache.drill.exec.planner.sql.SqlConverter;
+import org.apache.drill.exec.store.StoragePlugin;
+
+import com.google.common.collect.Lists;
 
 public class SqlHandlerConfig {
+
   private final QueryContext context;
-  private final HepPlanner hepPlanner;
-  private final Planner planner;
+  private final SqlConverter converter;
 
-  public SqlHandlerConfig(HepPlanner hepPlanner, Planner planner, QueryContext context) {
-    this.hepPlanner = hepPlanner;
-    this.planner = planner;
+  public SqlHandlerConfig(QueryContext context, SqlConverter converter) {
+    super();
     this.context = context;
+    this.converter = converter;
   }
 
-  public Planner getPlanner() {
-    return planner;
+  public QueryContext getContext() {
+    return context;
   }
 
-  public HepPlanner getHepPlanner() {
-    return hepPlanner;
+  public RuleSet getRules(PlannerPhase phase) {
+    Collection<StoragePlugin> plugins = Lists.newArrayList();
+    for (Entry<String, StoragePlugin> k : context.getStorage()) {
+      plugins.add(k.getValue());
+    }
+    return phase.getRules(context, plugins);
   }
 
-  public QueryContext getContext() {
-    return context;
+  public SqlConverter getConverter() {
+    return converter;
   }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
index d99c712..fa2c450 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
@@ -26,6 +26,7 @@ import org.apache.drill.common.JSONOptions;
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.exec.ops.OptimizerRulesContext;
 import org.apache.drill.exec.physical.base.AbstractGroupScan;
+import org.apache.drill.exec.planner.PlannerPhase;
 
 import com.google.common.collect.ImmutableSet;
 
@@ -49,8 +50,8 @@ public abstract class AbstractStoragePlugin implements StoragePlugin{
   }
 
   /**
-   * @deprecated Marking for deprecation in next major version release.
-   * Use {@link #getPhysicalOptimizerRules(org.apache.drill.exec.ops.OptimizerRulesContext)}
+   * @deprecated Marking for deprecation in next major version release. Use
+   *             {@link #getPhysicalOptimizerRules(org.apache.drill.exec.ops.OptimizerRulesContext, org.apache.drill.exec.planner.PlannerPhase)}
    */
   @Override
   @Deprecated
@@ -58,27 +59,45 @@ public abstract class AbstractStoragePlugin implements StoragePlugin{
     return ImmutableSet.of();
   }
 
-  /** An implementation of this method will return one or more specialized rules that Drill query
-   *  optimizer can leverage in <i>logical</i> space. Otherwise, it should return an empty set.
-   * @return an empty set or a set of plugin specific logical optimizer rules.
-   *
-   * Note: Move this method to {@link StoragePlugin} interface in next major version release.
+  /**
+   * @deprecated Marking for deprecation in next major version release. Use
+   *             {@link #getPhysicalOptimizerRules(org.apache.drill.exec.ops.OptimizerRulesContext, org.apache.drill.exec.planner.PlannerPhase)}
    */
+  @Deprecated
   public Set<? extends RelOptRule> getLogicalOptimizerRules(OptimizerRulesContext optimizerContext) {
     return ImmutableSet.of();
   }
 
-  /** An implementation of this method will return one or more specialized rules that Drill query
-   *  optimizer can leverage in <i>physical</i> space. Otherwise, it should return an empty set.
-   * @return an empty set or a set of plugin specific physical optimizer rules.
-   *
-   * Note: Move this method to {@link StoragePlugin} interface in next major version release.
+  /**
+   * @deprecated Marking for deprecation in next major version release. Use
+   *             {@link #getPhysicalOptimizerRules(org.apache.drill.exec.ops.OptimizerRulesContext, org.apache.drill.exec.planner.PlannerPhase)}
    */
+  @Deprecated
   public Set<? extends RelOptRule> getPhysicalOptimizerRules(OptimizerRulesContext optimizerRulesContext) {
     // To be backward compatible, by default call the getOptimizerRules() method.
     return getOptimizerRules(optimizerRulesContext);
   }
 
+  /**
+   *
+   * Note: Move this method to {@link StoragePlugin} interface in next major version release.
+   */
+  public Set<? extends RelOptRule> getOptimizerRules(OptimizerRulesContext optimizerContext, PlannerPhase phase) {
+    switch (phase) {
+    case LOGICAL_PRUNE_AND_JOIN:
+    case LOGICAL_PRUNE:
+    case LOGICAL:
+      return getLogicalOptimizerRules(optimizerContext);
+    case PHYSICAL:
+      return getPhysicalOptimizerRules(optimizerContext);
+    case PARTITION_PRUNING:
+    case JOIN_PLANNING:
+    default:
+      return ImmutableSet.of();
+    }
+
+  }
+
   @Override
   public AbstractGroupScan getPhysicalScan(String userName, JSONOptions selection) throws IOException {
     return getPhysicalScan(userName, selection, AbstractGroupScan.ALL_COLUMNS);

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePlugin.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePlugin.java
index 574ae8c..112bc15 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePlugin.java
@@ -46,6 +46,7 @@ public interface StoragePlugin extends SchemaFactory, AutoCloseable {
    *  optimizer can leverage in <i>physical</i> space. Otherwise, it should return an empty set.
    * @return an empty set or a set of plugin specific physical optimizer rules.
    */
+  @Deprecated
   public Set<? extends RelOptRule> getOptimizerRules(OptimizerRulesContext optimizerContext);
 
   /**

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistry.java
index b6eed2d..7018ce8 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistry.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistry.java
@@ -19,12 +19,10 @@ package org.apache.drill.exec.store;
 
 import java.util.Map;
 
-import org.apache.calcite.tools.RuleSet;
 import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.common.logical.FormatPluginConfig;
 import org.apache.drill.common.logical.StoragePluginConfig;
 import org.apache.drill.exec.exception.DrillbitStartupException;
-import org.apache.drill.exec.ops.OptimizerRulesContext;
 import org.apache.drill.exec.store.dfs.FormatPlugin;
 import org.apache.drill.exec.store.sys.PersistentStore;
 
@@ -107,14 +105,6 @@ public interface StoragePluginRegistry extends Iterable<Map.Entry<String, Storag
   PersistentStore<StoragePluginConfig> getStore();
 
   /**
-   * Return StoragePlugin rule sets.
-   *
-   * @param optimizerRulesContext
-   * @return Array of logical and physical rule sets.
-   */
-  RuleSet[] getStoragePluginRuleSet(OptimizerRulesContext optimizerRulesContext);
-
-  /**
    * Get the Schema factory associated with this storage plugin registry.
    * @return A SchemaFactory that can register the schemas associated with this plugin registry.
    */

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistryImpl.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistryImpl.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistryImpl.java
index e680502..ad38586 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistryImpl.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistryImpl.java
@@ -34,9 +34,7 @@ import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.schema.SchemaPlus;
-import org.apache.calcite.tools.RuleSet;
 import org.apache.drill.common.config.LogicalPlanPersistence;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.common.exceptions.ExecutionSetupException;
@@ -47,8 +45,6 @@ import org.apache.drill.common.scanner.persistence.ScanResult;
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.exception.DrillbitStartupException;
 import org.apache.drill.exec.exception.StoreException;
-import org.apache.drill.exec.ops.OptimizerRulesContext;
-import org.apache.drill.exec.planner.logical.DrillRuleSets;
 import org.apache.drill.exec.planner.logical.StoragePlugins;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.store.dfs.FileSystemPlugin;
@@ -68,8 +64,6 @@ import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
 import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalNotification;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableSet.Builder;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
@@ -338,40 +332,6 @@ public class StoragePluginRegistryImpl implements StoragePluginRegistry {
     return plugins.iterator();
   }
 
-  /**
-   * Return StoragePlugin rule sets.
-   *
-   * @param optimizerRulesContext
-   * @return Array of logical and physical rule sets.
-   */
-  public RuleSet[] getStoragePluginRuleSet(OptimizerRulesContext optimizerRulesContext) {
-    // query registered engines for optimizer rules and build the storage plugin RuleSet
-    Builder<RelOptRule> logicalRulesBuilder = ImmutableSet.builder();
-    Builder<RelOptRule> physicalRulesBuilder = ImmutableSet.builder();
-    for (StoragePlugin plugin : this.plugins.plugins()) {
-      if (plugin instanceof AbstractStoragePlugin) {
-        final AbstractStoragePlugin abstractPlugin = (AbstractStoragePlugin) plugin;
-        Set<? extends RelOptRule> rules = abstractPlugin.getLogicalOptimizerRules(optimizerRulesContext);
-        if (rules != null && rules.size() > 0) {
-          logicalRulesBuilder.addAll(rules);
-        }
-        rules = abstractPlugin.getPhysicalOptimizerRules(optimizerRulesContext);
-        if (rules != null && rules.size() > 0) {
-          physicalRulesBuilder.addAll(rules);
-        }
-      } else {
-        final Set<? extends RelOptRule> rules = plugin.getOptimizerRules(optimizerRulesContext);
-        if (rules != null && rules.size() > 0) {
-          physicalRulesBuilder.addAll(rules);
-        }
-      }
-    }
-
-    return new RuleSet[] {
-        DrillRuleSets.create(logicalRulesBuilder.build()),
-        DrillRuleSets.create(physicalRulesBuilder.build()) };
-  }
-
   public SchemaFactory getSchemaFactory() {
     return schemaFactory;
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java
index 8527850..1464cad 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java
@@ -923,9 +923,8 @@ public class Foreman implements Runnable {
     // them together such that it is easy to search based on query id
     logger.info("Query text for query id {}: {}", this.queryIdString, sql);
 
-    final DrillSqlWorker sqlWorker = new DrillSqlWorker(queryContext);
     final Pointer<String> textPlan = new Pointer<>();
-    final PhysicalPlan plan = sqlWorker.getPlan(sql, textPlan);
+    final PhysicalPlan plan = DrillSqlWorker.getPlan(queryContext, sql, textPlan);
     queryManager.setPlanText(textPlan.value);
     runPhysicalPlan(plan);
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java b/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java
index 9f39d15..6a038f1 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java
@@ -150,8 +150,7 @@ public class PlanningBase extends ExecTest{
       if (sql.trim().isEmpty()) {
         continue;
       }
-      final DrillSqlWorker worker = new DrillSqlWorker(context);
-      final PhysicalPlan p = worker.getPlan(sql);
+      final PhysicalPlan p = DrillSqlWorker.getPlan(context, sql);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/test/java/org/apache/drill/exec/planner/sql/TestDrillSQLWorker.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/planner/sql/TestDrillSQLWorker.java b/exec/java-exec/src/test/java/org/apache/drill/exec/planner/sql/TestDrillSQLWorker.java
index 718d42c..3d85e2e 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/planner/sql/TestDrillSQLWorker.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/planner/sql/TestDrillSQLWorker.java
@@ -25,7 +25,7 @@ import org.junit.Test;
 public class TestDrillSQLWorker {
 
   private void validateFormattedIs(String sql, SqlParserPos pos, String expected) {
-    String formatted = DrillSqlWorker.formatSQLParsingError(sql, pos);
+    String formatted = SqlConverter.formatSQLParsingError(sql, pos);
     assertEquals(expected, formatted);
   }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestSqlBracketlessSyntax.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestSqlBracketlessSyntax.java b/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestSqlBracketlessSyntax.java
index 4a3323d..0e9fdb7 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestSqlBracketlessSyntax.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/sql/TestSqlBracketlessSyntax.java
@@ -44,7 +44,7 @@ public class TestSqlBracketlessSyntax {
             .setParserFactory(DrillParserImpl.FACTORY)
             .build()) //
         .defaultSchema(SimpleCalciteSchema.createRootSchema(false)) //
-        .convertletTable(new DrillConvertletTable()) //
+        .convertletTable(DrillConvertletTable.INSTANCE) //
         .build();
     Planner planner = Frameworks.getPlanner(config);
 

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataGetColumnsTest.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataGetColumnsTest.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataGetColumnsTest.java
index d201140..bca6325 100644
--- a/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataGetColumnsTest.java
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataGetColumnsTest.java
@@ -17,32 +17,30 @@
  */
 package org.apache.drill.jdbc;
 
-import static org.junit.Assert.fail;
+import static java.sql.ResultSetMetaData.columnNoNulls;
+import static java.sql.ResultSetMetaData.columnNullable;
+import static java.sql.ResultSetMetaData.columnNullableUnknown;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.nullValue;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
-import static org.hamcrest.CoreMatchers.*;
-
-import org.apache.drill.jdbc.Driver;
-import org.apache.drill.jdbc.test.JdbcAssert;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
+import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
 import java.sql.ResultSet;
 import java.sql.ResultSetMetaData;
-import java.sql.Statement;
-
-import static java.sql.ResultSetMetaData.columnNoNulls;
-import static java.sql.ResultSetMetaData.columnNullable;
-import static java.sql.ResultSetMetaData.columnNullableUnknown;
-
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.sql.Types;
 
+import org.apache.drill.jdbc.test.JdbcAssert;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
 // NOTE: TestInformationSchemaColumns and DatabaseMetaDataGetColumnsTest
 // have identical sections.  (Cross-maintain them for now; factor out later.)
 
@@ -1022,7 +1020,7 @@ public class DatabaseMetaDataGetColumnsTest extends JdbcTestBase {
 
   @Test
   public void test_COLUMN_SIZE_hasRightValue_mdrOptVARCHAR() throws SQLException {
-    assertThat( getIntOrNull( mdrOptVARCHAR, "COLUMN_SIZE" ), equalTo( 1 ) );
+    assertThat(getIntOrNull(mdrOptVARCHAR, "COLUMN_SIZE"), equalTo(65536));
   }
 
   @Test
@@ -2166,7 +2164,7 @@ public class DatabaseMetaDataGetColumnsTest extends JdbcTestBase {
   @Test
   public void test_CHAR_OCTET_LENGTH_hasRightValue_mdrOptVARCHAR() throws SQLException {
     assertThat( getIntOrNull( mdrOptVARCHAR, "CHAR_OCTET_LENGTH" ),
-                equalTo( 1    /* chars. (default of 1) */
+        equalTo(65536 /* chars. (default of 65536) */
                          * 4  /* max. UTF-8 bytes per char. */ ) );
   }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestInformationSchemaColumns.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestInformationSchemaColumns.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestInformationSchemaColumns.java
index 4e076b7..5faf4dc 100644
--- a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestInformationSchemaColumns.java
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestInformationSchemaColumns.java
@@ -17,10 +17,20 @@
  */
 package org.apache.drill.jdbc.test;
 
-import static org.junit.Assert.fail;
+import static java.sql.ResultSetMetaData.columnNoNulls;
+import static java.sql.ResultSetMetaData.columnNullable;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.nullValue;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
-import static org.hamcrest.CoreMatchers.*;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Types;
 
 import org.apache.drill.jdbc.Driver;
 import org.apache.drill.jdbc.JdbcTestBase;
@@ -29,17 +39,6 @@ import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
-import java.sql.Statement;
-
-import static java.sql.ResultSetMetaData.columnNoNulls;
-import static java.sql.ResultSetMetaData.columnNullable;
-
-import java.sql.SQLException;
-import java.sql.Types;
-
 // NOTE: TestInformationSchemaColumns and DatabaseMetaDataGetColumnsTest
 // have identical sections.  (Cross-maintain them for now; factor out later.)
 
@@ -1155,7 +1154,7 @@ public class TestInformationSchemaColumns extends JdbcTestBase {
 
   @Test
   public void test_CHARACTER_MAXIMUM_LENGTH_hasRightValue_mdrOptVARCHAR() throws SQLException {
-    assertThat( getIntOrNull( mdrOptVARCHAR, "CHARACTER_MAXIMUM_LENGTH" ), equalTo( 1 ) );
+    assertThat(getIntOrNull(mdrOptVARCHAR, "CHARACTER_MAXIMUM_LENGTH"), equalTo(65536));
   }
 
   @Test
@@ -1319,7 +1318,7 @@ public class TestInformationSchemaColumns extends JdbcTestBase {
   @Test
   public void test_CHARACTER_OCTET_LENGTH_hasRightValue_mdrOptVARCHAR() throws SQLException {
     assertThat( getIntOrNull( mdrOptVARCHAR, "CHARACTER_OCTET_LENGTH" ),
-                equalTo( 1    /* chars. (default of 1) */
+        equalTo(65536 /* chars. (default of 65536) */
                          * 4  /* max. UTF-8 bytes per char. */ ) );
   }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/jdbc/src/test/resources/null_ordering_and_grouping_data.json
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/resources/null_ordering_and_grouping_data.json b/exec/jdbc/src/test/resources/null_ordering_and_grouping_data.json
index aa701aa..4e68cf0 100644
--- a/exec/jdbc/src/test/resources/null_ordering_and_grouping_data.json
+++ b/exec/jdbc/src/test/resources/null_ordering_and_grouping_data.json
@@ -1,7 +1,7 @@
 {
     "id": "1"
     ,
-    "for_VarChar": "Bb"
+    "for_VarChar": "B"
     ,
     "for_Int": 180
     ,


Mime
View raw message