drill-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jacq...@apache.org
Subject [2/2] drill git commit: DRILL-4465: Simplify Calcite parsing & planning integration
Date Fri, 04 Mar 2016 06:49:25 GMT
DRILL-4465: Simplify Calcite parsing & planning integration

- Canonicalize Planning phases with PlannerPhase enumeration
- Canonicalize PlannerType transforms
- Remove dependency on Calcite's Frameworks.Planner since Drill need stop heavily customize interactions
- Update AbstractStoragePlugin to implement a phase-aware planning rule injection behavior.
- Avoid (or at least reduce) duplicated registerSchemas() invocations

This closes #401.


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/84b3a8a8
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/84b3a8a8
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/84b3a8a8

Branch: refs/heads/master
Commit: 84b3a8a87f7d3dec928a871fa5d5e6bfed7c340e
Parents: c98edba
Author: Jacques Nadeau <jacques@apache.org>
Authored: Thu Feb 25 10:45:40 2016 -0800
Committer: Jacques Nadeau <jacques@apache.org>
Committed: Thu Mar 3 21:38:05 2016 -0800

----------------------------------------------------------------------
 .../org/apache/drill/exec/ExecConstants.java    |   3 +-
 .../apache/drill/exec/planner/PlannerPhase.java | 433 +++++++++++++++++++
 .../apache/drill/exec/planner/PlannerType.java  |  22 +
 .../exec/planner/logical/DrillRuleSets.java     | 329 --------------
 .../exec/planner/sql/DrillConvertletTable.java  |   5 +
 .../drill/exec/planner/sql/DrillSqlWorker.java  | 161 +------
 .../drill/exec/planner/sql/SqlConverter.java    | 367 ++++++++++++++++
 .../sql/handlers/CreateTableHandler.java        |  17 +-
 .../planner/sql/handlers/DefaultSqlHandler.java | 352 ++++++++-------
 .../sql/handlers/DescribeTableHandler.java      |  28 +-
 .../planner/sql/handlers/DropTableHandler.java  |   6 +-
 .../planner/sql/handlers/ExplainHandler.java    |  17 +-
 .../sql/handlers/RefreshMetadataHandler.java    |  17 +-
 .../planner/sql/handlers/ShowFileHandler.java   |   9 +-
 .../planner/sql/handlers/ShowTablesHandler.java |  22 +-
 .../planner/sql/handlers/SqlHandlerConfig.java  |  37 +-
 .../drill/exec/store/AbstractStoragePlugin.java |  43 +-
 .../apache/drill/exec/store/StoragePlugin.java  |   1 +
 .../drill/exec/store/StoragePluginRegistry.java |  10 -
 .../exec/store/StoragePluginRegistryImpl.java   |  40 --
 .../apache/drill/exec/work/foreman/Foreman.java |   3 +-
 .../java/org/apache/drill/PlanningBase.java     |   3 +-
 .../exec/planner/sql/TestDrillSQLWorker.java    |   2 +-
 .../exec/sql/TestSqlBracketlessSyntax.java      |   2 +-
 .../jdbc/DatabaseMetaDataGetColumnsTest.java    |  32 +-
 .../jdbc/test/TestInformationSchemaColumns.java |  29 +-
 .../null_ordering_and_grouping_data.json        |   2 +-
 27 files changed, 1188 insertions(+), 804 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index c5a5063..179924e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -167,7 +167,8 @@ public interface ExecConstants {
 
   String SLICE_TARGET = "planner.slice_target";
   long SLICE_TARGET_DEFAULT = 100000l;
-  OptionValidator SLICE_TARGET_OPTION = new PositiveLongValidator(SLICE_TARGET, Long.MAX_VALUE, SLICE_TARGET_DEFAULT);
+  PositiveLongValidator SLICE_TARGET_OPTION = new PositiveLongValidator(SLICE_TARGET, Long.MAX_VALUE,
+      SLICE_TARGET_DEFAULT);
 
   String CAST_TO_NULLABLE_NUMERIC = "drill.exec.functions.cast_empty_string_to_null";
   OptionValidator CAST_TO_NULLABLE_NUMERIC_OPTION = new BooleanValidator(CAST_TO_NULLABLE_NUMERIC, false);

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PlannerPhase.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PlannerPhase.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PlannerPhase.java
new file mode 100644
index 0000000..7cbed77
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PlannerPhase.java
@@ -0,0 +1,433 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.plan.volcano.AbstractConverter.ExpandConversionRule;
+import org.apache.calcite.rel.core.RelFactories;
+import org.apache.calcite.rel.rules.AggregateExpandDistinctAggregatesRule;
+import org.apache.calcite.rel.rules.AggregateRemoveRule;
+import org.apache.calcite.rel.rules.FilterAggregateTransposeRule;
+import org.apache.calcite.rel.rules.FilterMergeRule;
+import org.apache.calcite.rel.rules.JoinPushExpressionsRule;
+import org.apache.calcite.rel.rules.JoinPushThroughJoinRule;
+import org.apache.calcite.rel.rules.JoinToMultiJoinRule;
+import org.apache.calcite.rel.rules.LoptOptimizeJoinRule;
+import org.apache.calcite.rel.rules.ProjectRemoveRule;
+import org.apache.calcite.rel.rules.ProjectToWindowRule;
+import org.apache.calcite.rel.rules.ProjectWindowTransposeRule;
+import org.apache.calcite.rel.rules.ReduceExpressionsRule;
+import org.apache.calcite.rel.rules.SortRemoveRule;
+import org.apache.calcite.rel.rules.UnionToDistinctRule;
+import org.apache.calcite.tools.RuleSet;
+import org.apache.calcite.tools.RuleSets;
+import org.apache.drill.exec.ops.OptimizerRulesContext;
+import org.apache.drill.exec.planner.logical.DrillAggregateRule;
+import org.apache.drill.exec.planner.logical.DrillFilterJoinRules;
+import org.apache.drill.exec.planner.logical.DrillFilterRule;
+import org.apache.drill.exec.planner.logical.DrillJoinRel;
+import org.apache.drill.exec.planner.logical.DrillJoinRule;
+import org.apache.drill.exec.planner.logical.DrillLimitRule;
+import org.apache.drill.exec.planner.logical.DrillMergeProjectRule;
+import org.apache.drill.exec.planner.logical.DrillProjectRule;
+import org.apache.drill.exec.planner.logical.DrillPushFilterPastProjectRule;
+import org.apache.drill.exec.planner.logical.DrillPushLimitToScanRule;
+import org.apache.drill.exec.planner.logical.DrillPushProjIntoScan;
+import org.apache.drill.exec.planner.logical.DrillPushProjectPastFilterRule;
+import org.apache.drill.exec.planner.logical.DrillPushProjectPastJoinRule;
+import org.apache.drill.exec.planner.logical.DrillReduceAggregatesRule;
+import org.apache.drill.exec.planner.logical.DrillReduceExpressionsRule;
+import org.apache.drill.exec.planner.logical.DrillRelFactories;
+import org.apache.drill.exec.planner.logical.DrillScanRule;
+import org.apache.drill.exec.planner.logical.DrillSortRule;
+import org.apache.drill.exec.planner.logical.DrillUnionAllRule;
+import org.apache.drill.exec.planner.logical.DrillValuesRule;
+import org.apache.drill.exec.planner.logical.DrillWindowRule;
+import org.apache.drill.exec.planner.logical.partition.ParquetPruneScanRule;
+import org.apache.drill.exec.planner.logical.partition.PruneScanRule;
+import org.apache.drill.exec.planner.physical.ConvertCountToDirectScan;
+import org.apache.drill.exec.planner.physical.FilterPrule;
+import org.apache.drill.exec.planner.physical.HashAggPrule;
+import org.apache.drill.exec.planner.physical.HashJoinPrule;
+import org.apache.drill.exec.planner.physical.LimitPrule;
+import org.apache.drill.exec.planner.physical.LimitUnionExchangeTransposeRule;
+import org.apache.drill.exec.planner.physical.MergeJoinPrule;
+import org.apache.drill.exec.planner.physical.NestedLoopJoinPrule;
+import org.apache.drill.exec.planner.physical.PlannerSettings;
+import org.apache.drill.exec.planner.physical.ProjectPrule;
+import org.apache.drill.exec.planner.physical.PushLimitToTopN;
+import org.apache.drill.exec.planner.physical.ScanPrule;
+import org.apache.drill.exec.planner.physical.ScreenPrule;
+import org.apache.drill.exec.planner.physical.SortConvertPrule;
+import org.apache.drill.exec.planner.physical.SortPrule;
+import org.apache.drill.exec.planner.physical.StreamAggPrule;
+import org.apache.drill.exec.planner.physical.UnionAllPrule;
+import org.apache.drill.exec.planner.physical.ValuesPrule;
+import org.apache.drill.exec.planner.physical.WindowPrule;
+import org.apache.drill.exec.planner.physical.WriterPrule;
+import org.apache.drill.exec.store.AbstractStoragePlugin;
+import org.apache.drill.exec.store.StoragePlugin;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ImmutableSet.Builder;
+
+public enum PlannerPhase {
+  //private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillRuleSets.class);
+
+  LOGICAL_PRUNE_AND_JOIN("Loigcal Planning (with join and partition pruning)") {
+    public RuleSet getRules(OptimizerRulesContext context, Collection<StoragePlugin> plugins) {
+      return PlannerPhase.mergedRuleSets(
+          getDrillBasicRules(context),
+          getPruneScanRules(context),
+          getJoinPermRules(context),
+          getDrillUserConfigurableLogicalRules(context),
+          getStorageRules(context, plugins, this));
+    }
+  },
+
+  WINDOW_REWRITE("Window Function rewrites") {
+    public RuleSet getRules(OptimizerRulesContext context, Collection<StoragePlugin> plugins) {
+      return RuleSets.ofList(
+          ReduceExpressionsRule.CALC_INSTANCE,
+          ProjectToWindowRule.PROJECT
+          );
+    }
+  },
+
+  LOGICAL_PRUNE("Logical Planning (with partition pruning)") {
+    public RuleSet getRules(OptimizerRulesContext context, Collection<StoragePlugin> plugins) {
+      return PlannerPhase.mergedRuleSets(
+          getDrillBasicRules(context),
+          getPruneScanRules(context),
+          getDrillUserConfigurableLogicalRules(context),
+          getStorageRules(context, plugins, this));
+    }
+  },
+
+  JOIN_PLANNING("LOPT Join Planning") {
+    public RuleSet getRules(OptimizerRulesContext context, Collection<StoragePlugin> plugins) {
+      return PlannerPhase.mergedRuleSets(
+          RuleSets.ofList(
+              DRILL_JOIN_TO_MULTIJOIN_RULE,
+              DRILL_LOPT_OPTIMIZE_JOIN_RULE,
+              ProjectRemoveRule.INSTANCE),
+          getStorageRules(context, plugins, this)
+          );
+    }
+  },
+
+  PARTITION_PRUNING("Partition Prune Planning") {
+    public RuleSet getRules(OptimizerRulesContext context, Collection<StoragePlugin> plugins) {
+      return PlannerPhase.mergedRuleSets(getPruneScanRules(context), getStorageRules(context, plugins, this));
+    }
+  },
+
+  DIRECTORY_PRUNING("Directory Prune Planning") {
+    public RuleSet getRules(OptimizerRulesContext context, Collection<StoragePlugin> plugins) {
+      return PlannerPhase.mergedRuleSets(getDirPruneScanRules(context), getStorageRules(context, plugins, this));
+    }
+  },
+
+  LOGICAL("Logical Planning (no pruning or join).") {
+    public RuleSet getRules(OptimizerRulesContext context, Collection<StoragePlugin> plugins) {
+      return PlannerPhase.mergedRuleSets(
+          PlannerPhase.getDrillBasicRules(context),
+          PlannerPhase.getDrillUserConfigurableLogicalRules(context),
+          getStorageRules(context, plugins, this));
+    }
+  },
+
+  PHYSICAL("Physical Planning") {
+    public RuleSet getRules(OptimizerRulesContext context, Collection<StoragePlugin> plugins) {
+      return PlannerPhase.mergedRuleSets(
+          PlannerPhase.getPhysicalRules(context),
+          getStorageRules(context, plugins, this));
+    }
+  };
+
+  public final String description;
+
+  PlannerPhase(String description) {
+    this.description = description;
+  }
+
+  public abstract RuleSet getRules(OptimizerRulesContext context, Collection<StoragePlugin> plugins);
+
+  private static RuleSet getStorageRules(OptimizerRulesContext context, Collection<StoragePlugin> plugins,
+      PlannerPhase phase) {
+    final Builder<RelOptRule> rules = ImmutableSet.builder();
+    for(StoragePlugin plugin : plugins){
+      if(plugin instanceof AbstractStoragePlugin){
+        rules.addAll(((AbstractStoragePlugin) plugin).getOptimizerRules(context, phase));
+      }else{
+        rules.addAll(plugin.getOptimizerRules(context));
+      }
+    }
+    return RuleSets.ofList(rules.build());
+  }
+
+
+  static final RelOptRule DRILL_JOIN_TO_MULTIJOIN_RULE = new JoinToMultiJoinRule(DrillJoinRel.class);
+  static final RelOptRule DRILL_LOPT_OPTIMIZE_JOIN_RULE = new LoptOptimizeJoinRule(
+      DrillRelFactories.DRILL_LOGICAL_JOIN_FACTORY,
+      DrillRelFactories.DRILL_LOGICAL_PROJECT_FACTORY,
+      DrillRelFactories.DRILL_LOGICAL_FILTER_FACTORY);
+
+  /**
+   * Get a list of logical rules that can be turned on or off by session/system options.
+   *
+   * If a rule is intended to always be included with the logical set, it should be added
+   * to the immutable list created in the getDrillBasicRules() method below.
+   *
+   * @param optimizerRulesContext - used to get the list of planner settings, other rules may
+   *                                also in the future need to get other query state from this,
+   *                                such as the available list of UDFs (as is used by the
+   *                                DrillMergeProjectRule created in getDrillBasicRules())
+   * @return - a list of rules that have been filtered to leave out
+   *         rules that have been turned off by system or session settings
+   */
+  static RuleSet getDrillUserConfigurableLogicalRules(OptimizerRulesContext optimizerRulesContext) {
+    final PlannerSettings ps = optimizerRulesContext.getPlannerSettings();
+
+    // This list is used to store rules that can be turned on an off
+    // by user facing planning options
+    final Builder<RelOptRule> userConfigurableRules = ImmutableSet.<RelOptRule>builder();
+
+    if (ps.isConstantFoldingEnabled()) {
+      // TODO - DRILL-2218
+      userConfigurableRules.add(ReduceExpressionsRule.PROJECT_INSTANCE);
+      userConfigurableRules.add(DrillReduceExpressionsRule.FILTER_INSTANCE_DRILL);
+      userConfigurableRules.add(DrillReduceExpressionsRule.CALC_INSTANCE_DRILL);
+    }
+
+    return RuleSets.ofList(userConfigurableRules.build());
+  }
+
+  /*
+   * These basic rules don't require any context, so singleton instances can be used.
+   * These are merged with per-query rules in getDrillBasicRules() below.
+   */
+  final static ImmutableSet<RelOptRule> staticRuleSet = ImmutableSet.<RelOptRule> builder().add(
+      // Add support for Distinct Union (by using Union-All followed by Distinct)
+      UnionToDistinctRule.INSTANCE,
+
+      // Add support for WHERE style joins.
+      DrillFilterJoinRules.DRILL_FILTER_ON_JOIN,
+      DrillFilterJoinRules.DRILL_JOIN,
+      JoinPushExpressionsRule.INSTANCE,
+      // End support for WHERE style joins.
+
+      /*
+       Filter push-down related rules
+       */
+      DrillPushFilterPastProjectRule.INSTANCE,
+      // Due to infinite loop in planning (DRILL-3257), temporarily disable this rule
+      //FilterSetOpTransposeRule.INSTANCE,
+      FilterAggregateTransposeRule.INSTANCE,
+
+      FilterMergeRule.INSTANCE,
+      AggregateRemoveRule.INSTANCE,
+      ProjectRemoveRule.INSTANCE,
+      SortRemoveRule.INSTANCE,
+
+      AggregateExpandDistinctAggregatesRule.JOIN,
+      DrillReduceAggregatesRule.INSTANCE,
+
+      /*
+       Projection push-down related rules
+       */
+      DrillPushProjectPastFilterRule.INSTANCE,
+      DrillPushProjectPastJoinRule.INSTANCE,
+      // Due to infinite loop in planning (DRILL-3257), temporarily disable this rule
+      //DrillProjectSetOpTransposeRule.INSTANCE,
+      ProjectWindowTransposeRule.INSTANCE,
+      DrillPushProjIntoScan.INSTANCE,
+
+      /*
+       Convert from Calcite Logical to Drill Logical Rules.
+       */
+      ExpandConversionRule.INSTANCE,
+      DrillScanRule.INSTANCE,
+      DrillFilterRule.INSTANCE,
+      DrillProjectRule.INSTANCE,
+      DrillWindowRule.INSTANCE,
+      DrillAggregateRule.INSTANCE,
+
+      DrillLimitRule.INSTANCE,
+      DrillSortRule.INSTANCE,
+      DrillJoinRule.INSTANCE,
+      DrillUnionAllRule.INSTANCE,
+      DrillValuesRule.INSTANCE
+      ).build();
+
+  /**
+   * Get an immutable list of rules that will always be used when running
+   * logical planning.
+   *
+   * This cannot be a static singleton because some of the rules need to
+   * reference state owned by the current query (including its allocator).
+   *
+   * If a logical rule needs to be user configurable, such as turning
+   * it on and off with a system/session option, add it in the
+   * getDrillUserConfigurableLogicalRules() method instead of here.
+   *
+   * @param optimizerRulesContext - shared state used during planning, currently used here
+   *                                to gain access to the function registry described above.
+   * @return - a RuleSet containing the logical rules that will always
+   *           be used, either by VolcanoPlanner directly, or
+   *           used VolcanoPlanner as pre-processing for LOPTPlanner.
+   *
+   * Note : Join permutation rule is excluded here.
+   */
+  static RuleSet getDrillBasicRules(OptimizerRulesContext optimizerRulesContext) {
+    /*
+     * We have to create another copy of the ruleset with the context dependent elements;
+     * this cannot be reused across queries.
+     */
+    final ImmutableSet<RelOptRule> basicRules = ImmutableSet.<RelOptRule>builder()
+        .addAll(staticRuleSet)
+        .add(
+            DrillMergeProjectRule.getInstance(true, RelFactories.DEFAULT_PROJECT_FACTORY,
+                optimizerRulesContext.getFunctionRegistry())
+            )
+        .build();
+
+    return RuleSets.ofList(basicRules);
+  }
+
+  /**
+   *   Get an immutable list of partition pruning rules that will be used in logical planning.
+   */
+  static RuleSet getPruneScanRules(OptimizerRulesContext optimizerRulesContext) {
+    final ImmutableSet<RelOptRule> pruneRules = ImmutableSet.<RelOptRule>builder()
+        .add(
+            PruneScanRule.getDirFilterOnProject(optimizerRulesContext),
+            PruneScanRule.getDirFilterOnScan(optimizerRulesContext),
+            ParquetPruneScanRule.getFilterOnProjectParquet(optimizerRulesContext),
+            ParquetPruneScanRule.getFilterOnScanParquet(optimizerRulesContext),
+            DrillPushLimitToScanRule.LIMIT_ON_SCAN,
+            DrillPushLimitToScanRule.LIMIT_ON_PROJECT
+        )
+        .build();
+
+    return RuleSets.ofList(pruneRules);
+  }
+
+  /**
+   *  Get an immutable list of directory-based partition pruing rules that will be used in Calcite logical planning.
+   * @param optimizerRulesContext
+   * @return
+   */
+  static RuleSet getDirPruneScanRules(OptimizerRulesContext optimizerRulesContext) {
+    final ImmutableSet<RelOptRule> pruneRules = ImmutableSet.<RelOptRule>builder()
+        .add(
+            PruneScanRule.getDirFilterOnProject(optimizerRulesContext),
+            PruneScanRule.getDirFilterOnScan(optimizerRulesContext)
+        )
+        .build();
+
+    return RuleSets.ofList(pruneRules);
+
+  }
+
+  // Ruleset for join permutation, used only in VolcanoPlanner.
+  static RuleSet getJoinPermRules(OptimizerRulesContext optimizerRulesContext) {
+    return RuleSets.ofList(ImmutableSet.<RelOptRule> builder().add( //
+        JoinPushThroughJoinRule.RIGHT,
+        JoinPushThroughJoinRule.LEFT
+        ).build());
+  }
+
+  static final RuleSet DRILL_PHYSICAL_DISK = RuleSets.ofList(ImmutableSet.of(
+      ProjectPrule.INSTANCE
+    ));
+
+  static final RuleSet getPhysicalRules(OptimizerRulesContext optimizerRulesContext) {
+    final List<RelOptRule> ruleList = new ArrayList<RelOptRule>();
+    final PlannerSettings ps = optimizerRulesContext.getPlannerSettings();
+
+    ruleList.add(ConvertCountToDirectScan.AGG_ON_PROJ_ON_SCAN);
+    ruleList.add(ConvertCountToDirectScan.AGG_ON_SCAN);
+    ruleList.add(SortConvertPrule.INSTANCE);
+    ruleList.add(SortPrule.INSTANCE);
+    ruleList.add(ProjectPrule.INSTANCE);
+    ruleList.add(ScanPrule.INSTANCE);
+    ruleList.add(ScreenPrule.INSTANCE);
+    ruleList.add(ExpandConversionRule.INSTANCE);
+    ruleList.add(FilterPrule.INSTANCE);
+    ruleList.add(LimitPrule.INSTANCE);
+    ruleList.add(WriterPrule.INSTANCE);
+    ruleList.add(WindowPrule.INSTANCE);
+    ruleList.add(PushLimitToTopN.INSTANCE);
+    ruleList.add(LimitUnionExchangeTransposeRule.INSTANCE);
+    ruleList.add(UnionAllPrule.INSTANCE);
+    ruleList.add(ValuesPrule.INSTANCE);
+
+    if (ps.isHashAggEnabled()) {
+      ruleList.add(HashAggPrule.INSTANCE);
+    }
+
+    if (ps.isStreamAggEnabled()) {
+      ruleList.add(StreamAggPrule.INSTANCE);
+    }
+
+    if (ps.isHashJoinEnabled()) {
+      ruleList.add(HashJoinPrule.DIST_INSTANCE);
+
+      if(ps.isBroadcastJoinEnabled()){
+        ruleList.add(HashJoinPrule.BROADCAST_INSTANCE);
+      }
+    }
+
+    if (ps.isMergeJoinEnabled()) {
+      ruleList.add(MergeJoinPrule.DIST_INSTANCE);
+
+      if(ps.isBroadcastJoinEnabled()){
+        ruleList.add(MergeJoinPrule.BROADCAST_INSTANCE);
+      }
+
+    }
+
+    // NLJ plans consist of broadcasting the right child, hence we need
+    // broadcast join enabled.
+    if (ps.isNestedLoopJoinEnabled() && ps.isBroadcastJoinEnabled()) {
+      ruleList.add(NestedLoopJoinPrule.INSTANCE);
+    }
+
+    return RuleSets.ofList(ImmutableSet.copyOf(ruleList));
+  }
+
+  static RuleSet create(ImmutableSet<RelOptRule> rules) {
+    return RuleSets.ofList(rules);
+  }
+
+  static RuleSet mergedRuleSets(RuleSet... ruleSets) {
+    final Builder<RelOptRule> relOptRuleSetBuilder = ImmutableSet.builder();
+    for (final RuleSet ruleSet : ruleSets) {
+      for (final RelOptRule relOptRule : ruleSet) {
+        relOptRuleSetBuilder.add(relOptRule);
+      }
+    }
+    return RuleSets.ofList(relOptRuleSetBuilder.build());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PlannerType.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PlannerType.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PlannerType.java
new file mode 100644
index 0000000..d1abbd7
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PlannerType.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner;
+
+public enum PlannerType {
+  HEP, HEP_BOTTOM_UP, VOLCANO
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillRuleSets.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillRuleSets.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillRuleSets.java
deleted file mode 100644
index 230cee2..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillRuleSets.java
+++ /dev/null
@@ -1,329 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.planner.logical;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.plan.volcano.AbstractConverter.ExpandConversionRule;
-import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.rel.rules.AggregateExpandDistinctAggregatesRule;
-import org.apache.calcite.rel.rules.AggregateRemoveRule;
-import org.apache.calcite.rel.rules.FilterAggregateTransposeRule;
-import org.apache.calcite.rel.rules.FilterMergeRule;
-import org.apache.calcite.rel.rules.FilterSetOpTransposeRule;
-import org.apache.calcite.rel.rules.JoinPushExpressionsRule;
-import org.apache.calcite.rel.rules.JoinPushThroughJoinRule;
-import org.apache.calcite.rel.rules.JoinToMultiJoinRule;
-import org.apache.calcite.rel.rules.LoptOptimizeJoinRule;
-import org.apache.calcite.rel.rules.ProjectRemoveRule;
-import org.apache.calcite.rel.rules.ProjectWindowTransposeRule;
-import org.apache.calcite.rel.rules.ReduceExpressionsRule;
-import org.apache.calcite.rel.rules.SortRemoveRule;
-import org.apache.calcite.rel.rules.UnionToDistinctRule;
-import org.apache.calcite.tools.RuleSet;
-import org.apache.commons.digester.Rules;
-import org.apache.drill.exec.ops.OptimizerRulesContext;
-import org.apache.drill.exec.planner.logical.partition.ParquetPruneScanRule;
-import org.apache.drill.exec.planner.logical.partition.PruneScanRule;
-import org.apache.drill.exec.planner.physical.ConvertCountToDirectScan;
-import org.apache.drill.exec.planner.physical.FilterPrule;
-import org.apache.drill.exec.planner.physical.HashAggPrule;
-import org.apache.drill.exec.planner.physical.HashJoinPrule;
-import org.apache.drill.exec.planner.physical.LimitPrule;
-import org.apache.drill.exec.planner.physical.LimitUnionExchangeTransposeRule;
-import org.apache.drill.exec.planner.physical.MergeJoinPrule;
-import org.apache.drill.exec.planner.physical.NestedLoopJoinPrule;
-import org.apache.drill.exec.planner.physical.PlannerSettings;
-import org.apache.drill.exec.planner.physical.ProjectPrule;
-import org.apache.drill.exec.planner.physical.PushLimitToTopN;
-import org.apache.drill.exec.planner.physical.ScanPrule;
-import org.apache.drill.exec.planner.physical.ScreenPrule;
-import org.apache.drill.exec.planner.physical.SortConvertPrule;
-import org.apache.drill.exec.planner.physical.SortPrule;
-import org.apache.drill.exec.planner.physical.StreamAggPrule;
-import org.apache.drill.exec.planner.physical.UnionAllPrule;
-import org.apache.drill.exec.planner.physical.ValuesPrule;
-import org.apache.drill.exec.planner.physical.WindowPrule;
-import org.apache.drill.exec.planner.physical.WriterPrule;
-
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableSet.Builder;
-
-public class DrillRuleSets {
-  //private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillRuleSets.class);
-
-  public static final RelOptRule DRILL_JOIN_TO_MULTIJOIN_RULE = new JoinToMultiJoinRule(DrillJoinRel.class);
-  public static final RelOptRule DRILL_LOPT_OPTIMIZE_JOIN_RULE = new LoptOptimizeJoinRule(
-      DrillRelFactories.DRILL_LOGICAL_JOIN_FACTORY,
-      DrillRelFactories.DRILL_LOGICAL_PROJECT_FACTORY,
-      DrillRelFactories.DRILL_LOGICAL_FILTER_FACTORY);
-
-  /**
-   * Get a list of logical rules that can be turned on or off by session/system options.
-   *
-   * If a rule is intended to always be included with the logical set, it should be added
-   * to the immutable list created in the getDrillBasicRules() method below.
-   *
-   * @param optimizerRulesContext - used to get the list of planner settings, other rules may
-   *                                also in the future need to get other query state from this,
-   *                                such as the available list of UDFs (as is used by the
-   *                                DrillMergeProjectRule created in getDrillBasicRules())
-   * @return - a list of rules that have been filtered to leave out
-   *         rules that have been turned off by system or session settings
-   */
-  public static RuleSet getDrillUserConfigurableLogicalRules(OptimizerRulesContext optimizerRulesContext) {
-    final PlannerSettings ps = optimizerRulesContext.getPlannerSettings();
-
-    // This list is used to store rules that can be turned on an off
-    // by user facing planning options
-    final Builder<RelOptRule> userConfigurableRules = ImmutableSet.<RelOptRule>builder();
-
-    if (ps.isConstantFoldingEnabled()) {
-      // TODO - DRILL-2218
-      userConfigurableRules.add(ReduceExpressionsRule.PROJECT_INSTANCE);
-      userConfigurableRules.add(DrillReduceExpressionsRule.FILTER_INSTANCE_DRILL);
-      userConfigurableRules.add(DrillReduceExpressionsRule.CALC_INSTANCE_DRILL);
-    }
-
-    return new DrillRuleSet(userConfigurableRules.build());
-  }
-
-  /*
-   * These basic rules don't require any context, so singleton instances can be used.
-   * These are merged with per-query rules in getDrillBasicRules() below.
-   */
-  private final static ImmutableSet<RelOptRule> staticRuleSet = ImmutableSet.<RelOptRule>builder().add(
-      // Add support for Distinct Union (by using Union-All followed by Distinct)
-      UnionToDistinctRule.INSTANCE,
-
-      // Add support for WHERE style joins.
-      DrillFilterJoinRules.DRILL_FILTER_ON_JOIN,
-      DrillFilterJoinRules.DRILL_JOIN,
-      JoinPushExpressionsRule.INSTANCE,
-      // End support for WHERE style joins.
-
-      /*
-       Filter push-down related rules
-       */
-      DrillPushFilterPastProjectRule.INSTANCE,
-      // Due to infinite loop in planning (DRILL-3257), temporarily disable this rule
-      //FilterSetOpTransposeRule.INSTANCE,
-      FilterAggregateTransposeRule.INSTANCE,
-
-      FilterMergeRule.INSTANCE,
-      AggregateRemoveRule.INSTANCE,
-      ProjectRemoveRule.INSTANCE,
-      SortRemoveRule.INSTANCE,
-
-      AggregateExpandDistinctAggregatesRule.JOIN,
-      DrillReduceAggregatesRule.INSTANCE,
-
-      /*
-       Projection push-down related rules
-       */
-      DrillPushProjectPastFilterRule.INSTANCE,
-      DrillPushProjectPastJoinRule.INSTANCE,
-      // Due to infinite loop in planning (DRILL-3257), temporarily disable this rule
-      //DrillProjectSetOpTransposeRule.INSTANCE,
-      ProjectWindowTransposeRule.INSTANCE,
-      DrillPushProjIntoScan.INSTANCE,
-
-      /*
-       Convert from Calcite Logical to Drill Logical Rules.
-       */
-      ExpandConversionRule.INSTANCE,
-      DrillScanRule.INSTANCE,
-      DrillFilterRule.INSTANCE,
-      DrillProjectRule.INSTANCE,
-      DrillWindowRule.INSTANCE,
-      DrillAggregateRule.INSTANCE,
-
-      DrillLimitRule.INSTANCE,
-      DrillSortRule.INSTANCE,
-      DrillJoinRule.INSTANCE,
-      DrillUnionAllRule.INSTANCE,
-      DrillValuesRule.INSTANCE
-      ).build();
-
-  /**
-   * Get an immutable list of rules that will always be used when running
-   * logical planning.
-   *
-   * This cannot be a static singleton because some of the rules need to
-   * reference state owned by the current query (including its allocator).
-   *
-   * If a logical rule needs to be user configurable, such as turning
-   * it on and off with a system/session option, add it in the
-   * getDrillUserConfigurableLogicalRules() method instead of here.
-   *
-   * @param optimizerRulesContext - shared state used during planning, currently used here
-   *                                to gain access to the function registry described above.
-   * @return - a RuleSet containing the logical rules that will always
-   *           be used, either by VolcanoPlanner directly, or
-   *           used VolcanoPlanner as pre-processing for LOPTPlanner.
-   *
-   * Note : Join permutation rule is excluded here.
-   */
-  public static RuleSet getDrillBasicRules(OptimizerRulesContext optimizerRulesContext) {
-    /*
-     * We have to create another copy of the ruleset with the context dependent elements;
-     * this cannot be reused across queries.
-     */
-    final ImmutableSet<RelOptRule> basicRules = ImmutableSet.<RelOptRule>builder()
-        .addAll(staticRuleSet)
-        .add(
-            DrillMergeProjectRule.getInstance(true, RelFactories.DEFAULT_PROJECT_FACTORY,
-                optimizerRulesContext.getFunctionRegistry())
-            )
-        .build();
-
-    return new DrillRuleSet(basicRules);
-  }
-
-  /**
-   *   Get an immutable list of partition pruning rules that will be used in logical planning.
-   */
-  public static RuleSet getPruneScanRules(OptimizerRulesContext optimizerRulesContext) {
-    final ImmutableSet<RelOptRule> pruneRules = ImmutableSet.<RelOptRule>builder()
-        .add(
-            PruneScanRule.getDirFilterOnProject(optimizerRulesContext),
-            PruneScanRule.getDirFilterOnScan(optimizerRulesContext),
-            ParquetPruneScanRule.getFilterOnProjectParquet(optimizerRulesContext),
-            ParquetPruneScanRule.getFilterOnScanParquet(optimizerRulesContext),
-            DrillPushLimitToScanRule.LIMIT_ON_SCAN,
-            DrillPushLimitToScanRule.LIMIT_ON_PROJECT
-        )
-        .build();
-
-    return new DrillRuleSet(pruneRules);
-  }
-
-  /**
-   *  Get an immutable list of directory-based partition pruing rules that will be used in Calcite logical planning.
-   * @param optimizerRulesContext
-   * @return
-   */
-  public static RuleSet getDirPruneScanRules(OptimizerRulesContext optimizerRulesContext) {
-    final ImmutableSet<RelOptRule> pruneRules = ImmutableSet.<RelOptRule>builder()
-        .add(
-            PruneScanRule.getDirFilterOnProject(optimizerRulesContext),
-            PruneScanRule.getDirFilterOnScan(optimizerRulesContext)
-        )
-        .build();
-
-    return new DrillRuleSet(pruneRules);
-
-  }
-
-  // Ruleset for join permutation, used only in VolcanoPlanner.
-  public static RuleSet getJoinPermRules(OptimizerRulesContext optimizerRulesContext) {
-    return new DrillRuleSet(ImmutableSet.<RelOptRule> builder().add( //
-        JoinPushThroughJoinRule.RIGHT,
-        JoinPushThroughJoinRule.LEFT
-        ).build());
-  }
-
-  public static final RuleSet DRILL_PHYSICAL_DISK = new DrillRuleSet(ImmutableSet.of(
-      ProjectPrule.INSTANCE
-    ));
-
-  public static final RuleSet getPhysicalRules(OptimizerRulesContext optimizerRulesContext) {
-    final List<RelOptRule> ruleList = new ArrayList<RelOptRule>();
-    final PlannerSettings ps = optimizerRulesContext.getPlannerSettings();
-
-    ruleList.add(ConvertCountToDirectScan.AGG_ON_PROJ_ON_SCAN);
-    ruleList.add(ConvertCountToDirectScan.AGG_ON_SCAN);
-    ruleList.add(SortConvertPrule.INSTANCE);
-    ruleList.add(SortPrule.INSTANCE);
-    ruleList.add(ProjectPrule.INSTANCE);
-    ruleList.add(ScanPrule.INSTANCE);
-    ruleList.add(ScreenPrule.INSTANCE);
-    ruleList.add(ExpandConversionRule.INSTANCE);
-    ruleList.add(FilterPrule.INSTANCE);
-    ruleList.add(LimitPrule.INSTANCE);
-    ruleList.add(WriterPrule.INSTANCE);
-    ruleList.add(WindowPrule.INSTANCE);
-    ruleList.add(PushLimitToTopN.INSTANCE);
-    ruleList.add(LimitUnionExchangeTransposeRule.INSTANCE);
-    ruleList.add(UnionAllPrule.INSTANCE);
-    ruleList.add(ValuesPrule.INSTANCE);
-
-    if (ps.isHashAggEnabled()) {
-      ruleList.add(HashAggPrule.INSTANCE);
-    }
-
-    if (ps.isStreamAggEnabled()) {
-      ruleList.add(StreamAggPrule.INSTANCE);
-    }
-
-    if (ps.isHashJoinEnabled()) {
-      ruleList.add(HashJoinPrule.DIST_INSTANCE);
-
-      if(ps.isBroadcastJoinEnabled()){
-        ruleList.add(HashJoinPrule.BROADCAST_INSTANCE);
-      }
-    }
-
-    if (ps.isMergeJoinEnabled()) {
-      ruleList.add(MergeJoinPrule.DIST_INSTANCE);
-
-      if(ps.isBroadcastJoinEnabled()){
-        ruleList.add(MergeJoinPrule.BROADCAST_INSTANCE);
-      }
-
-    }
-
-    // NLJ plans consist of broadcasting the right child, hence we need
-    // broadcast join enabled.
-    if (ps.isNestedLoopJoinEnabled() && ps.isBroadcastJoinEnabled()) {
-      ruleList.add(NestedLoopJoinPrule.INSTANCE);
-    }
-
-    return new DrillRuleSet(ImmutableSet.copyOf(ruleList));
-  }
-
-  public static RuleSet create(ImmutableSet<RelOptRule> rules) {
-    return new DrillRuleSet(rules);
-  }
-
-  public static RuleSet mergedRuleSets(RuleSet...ruleSets) {
-    final Builder<RelOptRule> relOptRuleSetBuilder = ImmutableSet.builder();
-    for (final RuleSet ruleSet : ruleSets) {
-      for (final RelOptRule relOptRule : ruleSet) {
-        relOptRuleSetBuilder.add(relOptRule);
-      }
-    }
-    return new DrillRuleSet(relOptRuleSetBuilder.build());
-  }
-
-  private static class DrillRuleSet implements RuleSet{
-    final ImmutableSet<RelOptRule> rules;
-
-    public DrillRuleSet(ImmutableSet<RelOptRule> rules) {
-      this.rules = rules;
-    }
-
-    @Override
-    public Iterator<RelOptRule> iterator() {
-      return rules.iterator();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillConvertletTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillConvertletTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillConvertletTable.java
index 8fddd14..4ade513 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillConvertletTable.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillConvertletTable.java
@@ -31,6 +31,8 @@ public class DrillConvertletTable implements SqlRexConvertletTable{
 
   public static HashMap<SqlOperator, SqlRexConvertlet> map = new HashMap<>();
 
+  public static SqlRexConvertletTable INSTANCE = new DrillConvertletTable();
+
   static {
     // Use custom convertlet for extract function
     map.put(SqlStdOperatorTable.EXTRACT, DrillExtractConvertlet.INSTANCE);
@@ -56,4 +58,7 @@ public class DrillConvertletTable implements SqlRexConvertletTable{
 
     return StandardConvertletTable.INSTANCE.get(call);
   }
+
+  private DrillConvertletTable() {
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
index 1dfc04d..60daac1 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
@@ -18,36 +18,15 @@
 package org.apache.drill.exec.planner.sql;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
 
-import org.apache.calcite.config.Lex;
-import org.apache.calcite.plan.ConventionTraitDef;
-import org.apache.calcite.plan.RelOptCostFactory;
-import org.apache.calcite.plan.RelTraitDef;
-import org.apache.calcite.plan.hep.HepPlanner;
-import org.apache.calcite.plan.hep.HepProgramBuilder;
-import org.apache.calcite.rel.RelCollationTraitDef;
-import org.apache.calcite.rel.rules.ProjectToWindowRule;
-import org.apache.calcite.rel.rules.ReduceExpressionsRule;
 import org.apache.calcite.sql.SqlNode;
 import org.apache.calcite.sql.parser.SqlParseException;
-import org.apache.calcite.sql.parser.SqlParser;
-import org.apache.calcite.sql.parser.SqlParserPos;
-import org.apache.calcite.tools.FrameworkConfig;
-import org.apache.calcite.tools.Frameworks;
-import org.apache.calcite.tools.Planner;
 import org.apache.calcite.tools.RelConversionException;
-import org.apache.calcite.tools.RuleSet;
 import org.apache.calcite.tools.ValidationException;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.exec.ops.QueryContext;
+import org.apache.drill.exec.ops.UdfUtilities;
 import org.apache.drill.exec.physical.PhysicalPlan;
-import org.apache.drill.exec.planner.cost.DrillCostBase;
-import org.apache.drill.exec.planner.logical.DrillConstExecutor;
-import org.apache.drill.exec.planner.logical.DrillRuleSets;
-import org.apache.drill.exec.planner.physical.DrillDistributionTraitDef;
-import org.apache.drill.exec.planner.physical.PlannerSettings;
 import org.apache.drill.exec.planner.sql.handlers.AbstractSqlHandler;
 import org.apache.drill.exec.planner.sql.handlers.DefaultSqlHandler;
 import org.apache.drill.exec.planner.sql.handlers.ExplainHandler;
@@ -55,8 +34,6 @@ import org.apache.drill.exec.planner.sql.handlers.SetOptionHandler;
 import org.apache.drill.exec.planner.sql.handlers.SqlHandlerConfig;
 import org.apache.drill.exec.planner.sql.parser.DrillSqlCall;
 import org.apache.drill.exec.planner.sql.parser.SqlCreateTable;
-import org.apache.drill.exec.planner.sql.parser.impl.DrillParserWithCompoundIdConverter;
-import org.apache.drill.exec.planner.types.DrillRelDataTypeSystem;
 import org.apache.drill.exec.testing.ControlsInjector;
 import org.apache.drill.exec.testing.ControlsInjectorFactory;
 import org.apache.drill.exec.util.Pointer;
@@ -68,111 +45,29 @@ public class DrillSqlWorker {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillSqlWorker.class);
   private static final ControlsInjector injector = ControlsInjectorFactory.getInjector(DrillSqlWorker.class);
 
-  private final Planner planner;
-  private final HepPlanner hepPlanner;
-  public final static int LOGICAL_RULES = 0;
-  public final static int PHYSICAL_MEM_RULES = 1;
-  public final static int LOGICAL_HEP_JOIN_RULES = 2;
-  public final static int LOGICAL_HEP_JOIN__PP_RULES = 3;
-
-  private final QueryContext context;
-
-  public DrillSqlWorker(QueryContext context) {
-    // Calcite is not fully generified
-    @SuppressWarnings("rawtypes")
-    final List<RelTraitDef> traitDefs = new ArrayList<>();
-
-    traitDefs.add(ConventionTraitDef.INSTANCE);
-    traitDefs.add(DrillDistributionTraitDef.INSTANCE);
-    traitDefs.add(RelCollationTraitDef.INSTANCE);
-    this.context = context;
-    RelOptCostFactory costFactory = (context.getPlannerSettings().useDefaultCosting()) ?
-        null : new DrillCostBase.DrillCostFactory() ;
-    int idMaxLength = (int)context.getPlannerSettings().getIdentifierMaxLength();
-
-    FrameworkConfig config = Frameworks.newConfigBuilder() //
-        .parserConfig(SqlParser.configBuilder()
-            .setLex(Lex.MYSQL)
-            .setIdentifierMaxLength(idMaxLength)
-            .setParserFactory(DrillParserWithCompoundIdConverter.FACTORY)
-            .build()) //
-        .defaultSchema(context.getNewDefaultSchema()) //
-        .operatorTable(context.getDrillOperatorTable()) //
-        .traitDefs(traitDefs) //
-        .convertletTable(new DrillConvertletTable()) //
-        .context(context.getPlannerSettings()) //
-        .ruleSets(getRules(context)) //
-        .costFactory(costFactory) //
-        .executor(new DrillConstExecutor(context.getFunctionRegistry(), context, context.getPlannerSettings()))
-        .typeSystem(DrillRelDataTypeSystem.DRILL_REL_DATATYPE_SYSTEM) //
-        .build();
-    this.planner = Frameworks.getPlanner(config);
-    HepProgramBuilder builder = new HepProgramBuilder();
-    builder.addRuleClass(ReduceExpressionsRule.class);
-    builder.addRuleClass(ProjectToWindowRule.class);
-    this.hepPlanner = new HepPlanner(builder.build());
-    hepPlanner.addRule(ReduceExpressionsRule.CALC_INSTANCE);
-    hepPlanner.addRule(ProjectToWindowRule.PROJECT);
-  }
-
-  private RuleSet[] getRules(QueryContext context) {
-    final RuleSet[] storagePluginRules = context.getStorage().getStoragePluginRuleSet(context);
-
-    // Ruleset for the case where VolcanoPlanner is used for everything : join, filter/project pushdown, partition pruning.
-    RuleSet drillLogicalVolOnlyRules = DrillRuleSets.mergedRuleSets(
-        DrillRuleSets.getDrillBasicRules(context),
-        DrillRuleSets.getPruneScanRules(context),
-        DrillRuleSets.getJoinPermRules(context),
-        DrillRuleSets.getDrillUserConfigurableLogicalRules(context),
-        storagePluginRules[0]);
-
-    // Ruleset for the case where join planning is done in Hep-LOPT, filter/project pushdown and parttion pruning are done in VolcanoPlanner
-    RuleSet drillLogicalHepJoinRules = DrillRuleSets.mergedRuleSets(
-        DrillRuleSets.getDrillBasicRules(context),
-        DrillRuleSets.getPruneScanRules(context),
-        DrillRuleSets.getDrillUserConfigurableLogicalRules(context),
-        storagePluginRules[0]);
-
-    // Ruleset for the case where join planning and partition pruning is done in Hep, filter/project pushdown are done in VolcanoPlanner
-    RuleSet drillLogicalHepJoinPPRules = DrillRuleSets.mergedRuleSets(
-        DrillRuleSets.getDrillBasicRules(context),
-        DrillRuleSets.getDrillUserConfigurableLogicalRules(context),
-        storagePluginRules[0]);
-
-    // Ruleset for physical planning rules
-    RuleSet drillPhysicalMem = DrillRuleSets.mergedRuleSets(
-        DrillRuleSets.getPhysicalRules(context),
-        storagePluginRules[1]);
-
-    RuleSet[] allRules = new RuleSet[] {drillLogicalVolOnlyRules, drillPhysicalMem, drillLogicalHepJoinRules, drillLogicalHepJoinPPRules};
-
-    return allRules;
+  private DrillSqlWorker() {
   }
 
-  public PhysicalPlan getPlan(String sql) throws SqlParseException, ValidationException, ForemanSetupException{
-    return getPlan(sql, null);
+  public static PhysicalPlan getPlan(QueryContext context, String sql) throws SqlParseException, ValidationException,
+      ForemanSetupException {
+    return getPlan(context, sql, null);
   }
 
-  public PhysicalPlan getPlan(String sql, Pointer<String> textPlan) throws ForemanSetupException {
-    final PlannerSettings ps = this.context.getPlannerSettings();
+  public static PhysicalPlan getPlan(QueryContext context, String sql, Pointer<String> textPlan)
+      throws ForemanSetupException {
 
-    SqlNode sqlNode;
-    try {
-      injector.injectChecked(context.getExecutionControls(), "sql-parsing", ForemanSetupException.class);
-      sqlNode = planner.parse(sql);
-    } catch (SqlParseException e) {
-      throw UserException
-        .parseError(e)
-        .addContext(
-            "while parsing SQL query:\n" +
-            formatSQLParsingError(sql, e.getPos()))
-        .build(logger);
-    }
+    final SqlConverter parser = new SqlConverter(
+        context.getPlannerSettings(),
+        context.getNewDefaultSchema(),
+        context.getDrillOperatorTable(),
+        (UdfUtilities) context,
+        context.getFunctionRegistry());
 
-    AbstractSqlHandler handler;
-    SqlHandlerConfig config = new SqlHandlerConfig(hepPlanner, planner, context);
+    injector.injectChecked(context.getExecutionControls(), "sql-parsing", ForemanSetupException.class);
+    final SqlNode sqlNode = parser.parse(sql);
+    final AbstractSqlHandler handler;
+    final SqlHandlerConfig config = new SqlHandlerConfig(context, parser);
 
-    // TODO: make this use path scanning or something similar.
     switch(sqlNode.getKind()){
     case EXPLAIN:
       handler = new ExplainHandler(config);
@@ -213,25 +108,5 @@ public class DrillSqlWorker {
     }
   }
 
-  /**
-   *
-   * @param sql the SQL sent to the server
-   * @param pos the position of the error
-   * @return The sql with a ^ character under the error
-   */
-  static String formatSQLParsingError(String sql, SqlParserPos pos) {
-    StringBuilder sb = new StringBuilder();
-    String[] lines = sql.split("\n");
-    for (int i = 0; i < lines.length; i++) {
-      String line = lines[i];
-      sb.append(line).append("\n");
-      if (i == (pos.getLineNum() - 1)) {
-        for (int j = 0; j < pos.getColumnNum() - 1; j++) {
-          sb.append(" ");
-        }
-        sb.append("^\n");
-      }
-    }
-    return sb.toString();
-  }
+
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
new file mode 100644
index 0000000..2e0afea
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
@@ -0,0 +1,367 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.sql;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.calcite.adapter.java.JavaTypeFactory;
+import org.apache.calcite.avatica.util.Casing;
+import org.apache.calcite.avatica.util.Quoting;
+import org.apache.calcite.jdbc.CalciteSchemaImpl;
+import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
+import org.apache.calcite.plan.ConventionTraitDef;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptCostFactory;
+import org.apache.calcite.plan.RelOptTable;
+import org.apache.calcite.plan.volcano.VolcanoPlanner;
+import org.apache.calcite.prepare.CalciteCatalogReader;
+import org.apache.calcite.rel.RelCollationTraitDef;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeSystemImpl;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.sql.SqlNode;
+import org.apache.calcite.sql.SqlOperatorTable;
+import org.apache.calcite.sql.parser.SqlParseException;
+import org.apache.calcite.sql.parser.SqlParser;
+import org.apache.calcite.sql.parser.SqlParserImplFactory;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.util.ChainedSqlOperatorTable;
+import org.apache.calcite.sql.validate.SqlConformance;
+import org.apache.calcite.sql.validate.SqlValidatorCatalogReader;
+import org.apache.calcite.sql.validate.SqlValidatorImpl;
+import org.apache.calcite.sql2rel.RelDecorrelator;
+import org.apache.calcite.sql2rel.SqlToRelConverter;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
+import org.apache.drill.exec.ops.UdfUtilities;
+import org.apache.drill.exec.planner.cost.DrillCostBase;
+import org.apache.drill.exec.planner.logical.DrillConstExecutor;
+import org.apache.drill.exec.planner.physical.DrillDistributionTraitDef;
+import org.apache.drill.exec.planner.physical.PlannerSettings;
+import org.apache.drill.exec.planner.sql.parser.impl.DrillParserWithCompoundIdConverter;
+
+import com.google.common.base.Joiner;
+
+/**
+ * Class responsible for managing parsing, validation and toRel conversion for sql statements.
+ */
+public class SqlConverter {
+  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(SqlConverter.class);
+
+  private static DrillTypeSystem DRILL_TYPE_SYSTEM = new DrillTypeSystem();
+
+  private final JavaTypeFactory typeFactory;
+  private final SqlParser.Config parserConfig;
+  private final CalciteCatalogReader catalog;
+  private final PlannerSettings settings;
+  private final SchemaPlus rootSchema;
+  private final SchemaPlus defaultSchema;
+  private final SqlOperatorTable opTab;
+  private final RelOptCostFactory costFactory;
+  private final DrillValidator validator;
+  private final boolean isInnerQuery;
+  private final UdfUtilities util;
+  private final FunctionImplementationRegistry functions;
+
+  private String sql;
+  private VolcanoPlanner planner;
+
+
+  public SqlConverter(PlannerSettings settings, SchemaPlus defaultSchema,
+      final SqlOperatorTable operatorTable, UdfUtilities util, FunctionImplementationRegistry functions) {
+    this.settings = settings;
+    this.util = util;
+    this.functions = functions;
+    this.parserConfig = new ParserConfig();
+    this.isInnerQuery = false;
+    this.typeFactory = new JavaTypeFactoryImpl(DRILL_TYPE_SYSTEM);
+    this.defaultSchema = defaultSchema;
+    this.rootSchema = rootSchema(defaultSchema);
+    this.catalog = new CalciteCatalogReader(
+        CalciteSchemaImpl.from(rootSchema),
+        parserConfig.caseSensitive(),
+        CalciteSchemaImpl.from(defaultSchema).path(null),
+        typeFactory);
+    this.opTab = new ChainedSqlOperatorTable(Arrays.asList(operatorTable, catalog));
+    this.costFactory = (settings.useDefaultCosting()) ? null : new DrillCostBase.DrillCostFactory();
+    this.validator = new DrillValidator(opTab, catalog, typeFactory, SqlConformance.DEFAULT);
+    validator.setIdentifierExpansion(true);
+  }
+
+  private SqlConverter(SqlConverter parent, SchemaPlus defaultSchema, SchemaPlus rootSchema,
+      CalciteCatalogReader catalog) {
+    this.parserConfig = parent.parserConfig;
+    this.defaultSchema = defaultSchema;
+    this.functions = parent.functions;
+    this.util = parent.util;
+    this.isInnerQuery = true;
+    this.typeFactory = parent.typeFactory;
+    this.costFactory = parent.costFactory;
+    this.settings = parent.settings;
+    this.rootSchema = rootSchema;
+    this.catalog = catalog;
+    this.opTab = parent.opTab;
+    this.planner = parent.planner;
+    this.validator = new DrillValidator(opTab, catalog, typeFactory, SqlConformance.DEFAULT);
+    validator.setIdentifierExpansion(true);
+  }
+
+
+  public SqlNode parse(String sql) {
+    try {
+      SqlParser parser = SqlParser.create(sql, parserConfig);
+      return parser.parseStmt();
+    } catch (SqlParseException e) {
+      UserException.Builder builder = UserException
+          .parseError(e)
+          .addContext("SQL Query", formatSQLParsingError(sql, e.getPos()));
+      if (isInnerQuery) {
+        builder.message("Failure parsing a view your query is dependent upon.");
+      }
+      throw builder.build(logger);
+    }
+
+  }
+
+  public SqlNode validate(final SqlNode parsedNode) {
+    try {
+      SqlNode validatedNode = validator.validate(parsedNode);
+      return validatedNode;
+    } catch (RuntimeException e) {
+      UserException.Builder builder = UserException
+          .validationError(e)
+          .addContext("SQL Query", sql);
+      if (isInnerQuery) {
+        builder.message("Failure validating a view your query is dependent upon.");
+      }
+      throw builder.build(logger);
+    }
+  }
+
+  public RelDataType getOutputType(SqlNode validatedNode) {
+    return validator.getValidatedNodeType(validatedNode);
+  }
+
+  public JavaTypeFactory getTypeFactory() {
+    return typeFactory;
+  }
+
+  public SqlOperatorTable getOpTab() {
+    return opTab;
+  }
+
+  public RelOptCostFactory getCostFactory() {
+    return costFactory;
+  }
+
+  public SchemaPlus getRootSchema() {
+    return rootSchema;
+  }
+
+  public SchemaPlus getDefaultSchema() {
+    return defaultSchema;
+  }
+
+  private class DrillValidator extends SqlValidatorImpl {
+    protected DrillValidator(SqlOperatorTable opTab, SqlValidatorCatalogReader catalogReader,
+        RelDataTypeFactory typeFactory, SqlConformance conformance) {
+      super(opTab, catalogReader, typeFactory, conformance);
+    }
+  }
+
+  private static class DrillTypeSystem extends RelDataTypeSystemImpl {
+
+    @Override
+    public int getDefaultPrecision(SqlTypeName typeName) {
+      switch (typeName) {
+      case CHAR:
+      case BINARY:
+      case VARCHAR:
+      case VARBINARY:
+        return 65536;
+      default:
+        return super.getDefaultPrecision(typeName);
+      }
+    }
+
+    @Override
+    public int getMaxNumericScale() {
+      return 38;
+    }
+
+    @Override
+    public int getMaxNumericPrecision() {
+      return 38;
+    }
+
+  }
+
+  public RelNode toRel(
+      final SqlNode validatedNode) {
+    final RexBuilder rexBuilder = new RexBuilder(typeFactory);
+    if (planner == null) {
+      planner = new VolcanoPlanner(costFactory, settings);
+      planner.setExecutor(new DrillConstExecutor(functions, util, settings));
+      planner.clearRelTraitDefs();
+      planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
+      planner.addRelTraitDef(DrillDistributionTraitDef.INSTANCE);
+      planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+    }
+
+    final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
+    final SqlToRelConverter sqlToRelConverter =
+        new SqlToRelConverter(new Expander(), validator, catalog, cluster, DrillConvertletTable.INSTANCE);
+
+    sqlToRelConverter.setTrimUnusedFields(false);
+    sqlToRelConverter.enableTableAccessConversion(false);
+    final RelNode rel = sqlToRelConverter.convertQuery(validatedNode, false, !isInnerQuery);
+    final RelNode rel2 = sqlToRelConverter.flattenTypes(rel, true);
+    final RelNode rel3 = RelDecorrelator.decorrelateQuery(rel2);
+    return rel3;
+
+  }
+
+  private class Expander implements RelOptTable.ViewExpander {
+
+    public Expander() {
+    }
+
+    public RelNode expandView(
+        RelDataType rowType,
+        String queryString,
+        List<String> schemaPath) {
+      SqlConverter parser = new SqlConverter(SqlConverter.this, defaultSchema, rootSchema,
+          catalog.withSchemaPath(schemaPath));
+      return expandView(queryString, parser);
+    }
+
+    @Override
+    public RelNode expandView(
+        RelDataType rowType,
+        String queryString,
+        SchemaPlus rootSchema, // new root schema
+        List<String> schemaPath) {
+      final CalciteCatalogReader catalogReader = new CalciteCatalogReader(
+          CalciteSchemaImpl.from(rootSchema),
+          parserConfig.caseSensitive(),
+          schemaPath,
+          typeFactory);
+      SchemaPlus schema = rootSchema;
+      for (String s : schemaPath) {
+        SchemaPlus newSchema = schema.getSubSchema(s);
+
+        if (newSchema == null) {
+          throw UserException
+              .validationError()
+              .message(
+              "Failure while attempting to expand view. Requested schema %s not available in schema %s.", s,
+                  schema.getName())
+              .addContext("View Context", Joiner.on(", ").join(schemaPath))
+              .addContext("View SQL", queryString)
+              .build(logger);
+        }
+
+        schema = newSchema;
+      }
+      SqlConverter parser = new SqlConverter(SqlConverter.this, schema, rootSchema, catalogReader);
+      return expandView(queryString, parser);
+    }
+
+    private RelNode expandView(String queryString, SqlConverter converter) {
+      final SqlNode parsedNode = converter.parse(queryString);
+      final SqlNode validatedNode = converter.validate(parsedNode);
+      return converter.toRel(validatedNode);
+    }
+
+  }
+
+  private class ParserConfig implements SqlParser.Config {
+
+    final long identifierMaxLength = settings.getIdentifierMaxLength();
+
+    @Override
+    public int identifierMaxLength() {
+      return (int) identifierMaxLength;
+    }
+
+    @Override
+    public Casing quotedCasing() {
+      return Casing.UNCHANGED;
+    }
+
+    @Override
+    public Casing unquotedCasing() {
+      return Casing.UNCHANGED;
+    }
+
+    @Override
+    public Quoting quoting() {
+      return Quoting.BACK_TICK;
+    }
+
+    @Override
+    public boolean caseSensitive() {
+      return false;
+    }
+
+    @Override
+    public SqlParserImplFactory parserFactory() {
+      return DrillParserWithCompoundIdConverter.FACTORY;
+    }
+
+  }
+
+  /**
+   *
+   * @param sql
+   *          the SQL sent to the server
+   * @param pos
+   *          the position of the error
+   * @return The sql with a ^ character under the error
+   */
+  static String formatSQLParsingError(String sql, SqlParserPos pos) {
+    StringBuilder sb = new StringBuilder();
+    String[] lines = sql.split("\n");
+    for (int i = 0; i < lines.length; i++) {
+      String line = lines[i];
+      sb.append(line).append("\n");
+      if (i == (pos.getLineNum() - 1)) {
+        for (int j = 0; j < pos.getColumnNum() - 1; j++) {
+          sb.append(" ");
+        }
+        sb.append("^\n");
+      }
+    }
+    return sb.toString();
+  }
+
+  private static SchemaPlus rootSchema(SchemaPlus schema) {
+    while (true) {
+      if (schema.getParentSchema() == null) {
+        return schema;
+      }
+      schema = schema.getParentSchema();
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/84b3a8a8/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/CreateTableHandler.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/CreateTableHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/CreateTableHandler.java
index e39074e..9f5d7e4 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/CreateTableHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/CreateTableHandler.java
@@ -22,10 +22,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
 import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.type.RelDataType;
@@ -37,16 +34,13 @@ import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.sql.SqlNode;
 import org.apache.calcite.tools.RelConversionException;
 import org.apache.calcite.tools.ValidationException;
-
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.exec.physical.PhysicalPlan;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
-import org.apache.drill.exec.planner.logical.CreateTableEntry;
 import org.apache.drill.exec.planner.logical.DrillRel;
 import org.apache.drill.exec.planner.logical.DrillScreenRel;
 import org.apache.drill.exec.planner.logical.DrillWriterRel;
-import org.apache.drill.exec.planner.physical.DrillDistributionTrait;
 import org.apache.drill.exec.planner.physical.Prel;
 import org.apache.drill.exec.planner.physical.ProjectAllowDupPrel;
 import org.apache.drill.exec.planner.physical.ProjectPrel;
@@ -60,6 +54,9 @@ import org.apache.drill.exec.util.Pointer;
 import org.apache.drill.exec.work.foreman.ForemanSetupException;
 import org.apache.drill.exec.work.foreman.SqlUnsupportedException;
 
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
 public class CreateTableHandler extends DefaultSqlHandler {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(CreateTableHandler.class);
 
@@ -80,9 +77,9 @@ public class CreateTableHandler extends DefaultSqlHandler {
     final RelNode newTblRelNode =
         SqlHandlerUtil.resolveNewTableRel(false, sqlCreateTable.getFieldNames(), validatedRowType, queryRelNode);
 
-
     final AbstractSchema drillSchema =
-        SchemaUtilites.resolveToMutableDrillSchema(context.getNewDefaultSchema(), sqlCreateTable.getSchemaPath());
+        SchemaUtilites.resolveToMutableDrillSchema(config.getConverter().getDefaultSchema(),
+            sqlCreateTable.getSchemaPath());
     final String schemaPath = drillSchema.getFullSchemaName();
 
     if (SqlHandlerUtil.getTableFromSchema(drillSchema, newTblName) != null) {
@@ -93,13 +90,11 @@ public class CreateTableHandler extends DefaultSqlHandler {
 
     final RelNode newTblRelNodeWithPCol = SqlHandlerUtil.qualifyPartitionCol(newTblRelNode, sqlCreateTable.getPartitionColumns());
 
-    log("Optiq Logical", newTblRelNodeWithPCol, logger);
+    log("Calcite", newTblRelNodeWithPCol, logger, null);
 
     // Convert the query to Drill Logical plan and insert a writer operator on top.
     DrillRel drel = convertToDrel(newTblRelNodeWithPCol, drillSchema, newTblName, sqlCreateTable.getPartitionColumns(), newTblRelNode.getRowType());
-    log("Drill Logical", drel, logger);
     Prel prel = convertToPrel(drel, newTblRelNode.getRowType(), sqlCreateTable.getPartitionColumns());
-    log("Drill Physical", prel, logger);
     PhysicalOperator pop = convertToPop(prel);
     PhysicalPlan plan = convertToPlan(pop);
     log("Drill Plan", plan, logger);


Mime
View raw message