hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jcama...@apache.org
Subject [17/17] hive git commit: HIVE-12478: Improve Hive/Calcite Transitive Predicate inference (Jesus Camacho Rodriguez, reviewed by Laljo John Pullokkaran)
Date Wed, 27 Jan 2016 08:31:01 GMT
HIVE-12478: Improve Hive/Calcite Transitive Predicate inference (Jesus Camacho Rodriguez, reviewed by Laljo John Pullokkaran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/48b201ee
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/48b201ee
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/48b201ee

Branch: refs/heads/master
Commit: 48b201ee163252b2127ce04fbf660df70312888a
Parents: bf8becd
Author: Jesus Camacho Rodriguez <jcamacho@apache.org>
Authored: Wed Jan 27 09:10:01 2016 +0100
Committer: Jesus Camacho Rodriguez <jcamacho@apache.org>
Committed: Wed Jan 27 09:10:22 2016 +0100

----------------------------------------------------------------------
 .../ql/optimizer/calcite/HiveCalciteUtil.java   |   55 +-
 .../calcite/HiveHepPlannerContext.java          |   37 -
 .../optimizer/calcite/HivePlannerContext.java   |   43 +
 .../calcite/HiveVolcanoPlannerContext.java      |   37 -
 .../optimizer/calcite/cost/HiveCostModel.java   |   12 +-
 .../calcite/cost/HiveVolcanoPlanner.java        |    6 +-
 .../calcite/reloperators/HiveJoin.java          |    9 +-
 .../calcite/reloperators/HiveSemiJoin.java      |    9 +-
 .../calcite/reloperators/HiveUnion.java         |    4 +-
 .../calcite/rules/HiveJoinAddNotNullRule.java   |  119 +-
 .../HiveJoinPushTransitivePredicatesRule.java   |   99 +-
 .../calcite/rules/HivePreFilteringRule.java     |    8 +-
 .../calcite/rules/HiveRulesRegistry.java        |   29 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   93 +-
 .../calcite/TestCBORuleFiredOnlyOnce.java       |    2 +-
 .../clientpositive/annotate_stats_join.q.out    |   18 +-
 .../annotate_stats_join_pkfk.q.out              |    6 +-
 .../results/clientpositive/auto_join12.q.out    |   10 +-
 .../results/clientpositive/auto_join16.q.out    |    2 +-
 .../results/clientpositive/auto_join32.q.out    |    2 +-
 .../auto_join_without_localtask.q.out           |    6 +-
 .../bucketsortoptimize_insert_6.q.out           |   38 +-
 .../bucketsortoptimize_insert_7.q.out           |    2 +-
 .../test/results/clientpositive/cbo_const.q.out |    2 +-
 .../clientpositive/cbo_rp_lineage2.q.out        |    8 +-
 .../clientpositive/constprog_partitioner.q.out  |    4 +-
 .../clientpositive/correlationoptimizer1.q.out  |    8 +-
 .../clientpositive/correlationoptimizer10.q.out |   12 +-
 .../clientpositive/correlationoptimizer13.q.out |    2 +-
 .../clientpositive/correlationoptimizer8.q.out  |    6 +-
 .../clientpositive/correlationoptimizer9.q.out  |    4 +-
 .../clientpositive/dynamic_rdd_cache.q.out      |   54 +-
 .../clientpositive/filter_cond_pushdown.q.out   |    8 +-
 .../clientpositive/filter_join_breaktask.q.out  |    2 +-
 .../clientpositive/groupby_position.q.out       |    2 +-
 .../results/clientpositive/groupby_ppd.q.out    |   28 +-
 .../clientpositive/index_auto_mult_tables.q.out |   10 +-
 .../index_auto_mult_tables_compact.q.out        |   10 +-
 .../clientpositive/index_auto_self_join.q.out   |   16 +-
 .../results/clientpositive/index_bitmap3.q.out  |    4 +-
 .../clientpositive/index_bitmap_auto.q.out      |    4 +-
 ql/src/test/results/clientpositive/join12.q.out |   14 +-
 ql/src/test/results/clientpositive/join16.q.out |    2 +-
 ql/src/test/results/clientpositive/join34.q.out |    2 +-
 ql/src/test/results/clientpositive/join35.q.out |    2 +-
 ql/src/test/results/clientpositive/join42.q.out |    4 +-
 ql/src/test/results/clientpositive/join43.q.out |    4 +-
 .../clientpositive/join_alt_syntax.q.out        |   12 +-
 .../clientpositive/join_cond_pushdown_2.q.out   |   12 +-
 .../clientpositive/join_cond_pushdown_4.q.out   |   12 +-
 .../join_cond_pushdown_unqual2.q.out            |   12 +-
 .../join_cond_pushdown_unqual4.q.out            |   12 +-
 .../clientpositive/join_grp_diff_keys.q.out     |    8 +-
 .../test/results/clientpositive/lineage2.q.out  |   10 +-
 .../test/results/clientpositive/lineage3.q.out  |    8 +-
 .../llap/dynamic_partition_pruning.q.out        |  134 +-
 .../llap/tez_dynpart_hashjoin_1.q.out           |   24 +-
 .../clientpositive/llap/tez_self_join.q.out     |    2 +-
 .../llap/tez_vector_dynpart_hashjoin_1.q.out    |   24 +-
 .../vectorized_dynamic_partition_pruning.q.out  |  122 +-
 .../clientpositive/louter_join_ppr.q.out        |    2 +-
 .../clientpositive/mapjoin_mapjoin.q.out        |    2 +-
 .../test/results/clientpositive/mergejoin.q.out |    8 -
 .../results/clientpositive/mergejoins.q.out     |   19 +-
 .../clientpositive/mergejoins_mixed.q.out       |   76 +-
 .../test/results/clientpositive/orc_llap.q.out  |   16 +-
 .../results/clientpositive/perf/query13.q.out   |    4 +-
 .../results/clientpositive/perf/query17.q.out   |    6 +-
 .../results/clientpositive/perf/query18.q.out   |    4 +-
 .../results/clientpositive/perf/query19.q.out   |    2 +-
 .../results/clientpositive/perf/query25.q.out   |   12 +-
 .../results/clientpositive/perf/query29.q.out   |    8 +-
 .../results/clientpositive/perf/query31.q.out   |   12 +-
 .../results/clientpositive/perf/query32.q.out   |    6 +-
 .../results/clientpositive/perf/query34.q.out   |    4 +-
 .../results/clientpositive/perf/query39.q.out   |   68 +-
 .../results/clientpositive/perf/query40.q.out   |   69 +-
 .../results/clientpositive/perf/query42.q.out   |    2 +-
 .../results/clientpositive/perf/query45.q.out   |    2 +-
 .../results/clientpositive/perf/query46.q.out   |    2 +-
 .../results/clientpositive/perf/query48.q.out   |    4 +-
 .../results/clientpositive/perf/query50.q.out   |    6 +-
 .../results/clientpositive/perf/query52.q.out   |    2 +-
 .../results/clientpositive/perf/query54.q.out   |    4 +-
 .../results/clientpositive/perf/query55.q.out   |    2 +-
 .../results/clientpositive/perf/query58.q.out   |    8 +-
 .../results/clientpositive/perf/query64.q.out   |  218 +--
 .../results/clientpositive/perf/query65.q.out   |    2 +-
 .../results/clientpositive/perf/query66.q.out   |  629 +++++----
 .../results/clientpositive/perf/query68.q.out   |    2 +-
 .../results/clientpositive/perf/query70.q.out   |    2 +-
 .../results/clientpositive/perf/query71.q.out   |   10 +-
 .../results/clientpositive/perf/query72.q.out   |   77 +-
 .../results/clientpositive/perf/query73.q.out   |    2 +-
 .../results/clientpositive/perf/query75.q.out   | 1251 +++++++++---------
 .../results/clientpositive/perf/query76.q.out   |    4 +-
 .../results/clientpositive/perf/query80.q.out   |  257 ++--
 .../results/clientpositive/perf/query82.q.out   |    2 +-
 .../results/clientpositive/perf/query84.q.out   |    2 +-
 .../results/clientpositive/perf/query85.q.out   |    8 +-
 .../results/clientpositive/perf/query87.q.out   |    6 +-
 .../results/clientpositive/perf/query88.q.out   |    6 +-
 .../results/clientpositive/perf/query89.q.out   |    2 +-
 .../results/clientpositive/perf/query90.q.out   |    2 +-
 .../results/clientpositive/perf/query91.q.out   |    6 +-
 .../results/clientpositive/perf/query92.q.out   |    4 +-
 .../results/clientpositive/perf/query93.q.out   |   51 +-
 .../results/clientpositive/perf/query94.q.out   |   75 +-
 .../results/clientpositive/perf/query95.q.out   |    6 +-
 .../results/clientpositive/perf/query96.q.out   |    2 +-
 .../results/clientpositive/perf/query97.q.out   |  239 ++--
 .../results/clientpositive/ppd_gby_join.q.out   |    8 +-
 .../test/results/clientpositive/ppd_join.q.out  |    8 +-
 .../test/results/clientpositive/ppd_join2.q.out |   12 +-
 .../test/results/clientpositive/ppd_join3.q.out |   12 +-
 .../test/results/clientpositive/ppd_join5.q.out |    8 +-
 .../clientpositive/ppd_outer_join4.q.out        |    8 +-
 .../clientpositive/ppd_outer_join5.q.out        |   19 +-
 .../clientpositive/ppd_repeated_alias.q.out     |    2 +-
 .../results/clientpositive/ppd_udf_case.q.out   |    8 +-
 .../results/clientpositive/ppd_union_view.q.out |    4 +-
 .../results/clientpositive/quotedid_basic.q.out |   16 +-
 .../test/results/clientpositive/regex_col.q.out |    4 +-
 .../clientpositive/router_join_ppr.q.out        |    2 +-
 .../test/results/clientpositive/semijoin.q.out  |    2 +-
 .../test/results/clientpositive/semijoin4.q.out |   16 +-
 .../test/results/clientpositive/skewjoin.q.out  |   20 +-
 .../results/clientpositive/skewjoinopt12.q.out  |    8 +-
 .../results/clientpositive/skewjoinopt16.q.out  |    8 +-
 .../results/clientpositive/skewjoinopt17.q.out  |    8 +-
 .../results/clientpositive/skewjoinopt2.q.out   |   16 +-
 .../spark/annotate_stats_join.q.out             |   18 +-
 .../clientpositive/spark/auto_join12.q.out      |   36 +-
 .../clientpositive/spark/auto_join16.q.out      |    2 +-
 .../clientpositive/spark/auto_join32.q.out      |    2 +-
 .../spark/auto_join_without_localtask.q.out     |    2 +-
 .../spark/bucketsortoptimize_insert_6.q.out     |   28 +-
 .../spark/bucketsortoptimize_insert_7.q.out     |    4 +-
 .../spark/constprog_partitioner.q.out           |    4 +-
 .../spark/dynamic_rdd_cache.q.out               |   54 +-
 .../spark/filter_join_breaktask.q.out           |    2 +-
 .../clientpositive/spark/groupby_position.q.out |    2 +-
 .../spark/index_auto_self_join.q.out            |   12 +-
 .../clientpositive/spark/index_bitmap3.q.out    |    4 +-
 .../spark/index_bitmap_auto.q.out               |    4 +-
 .../results/clientpositive/spark/join12.q.out   |   14 +-
 .../results/clientpositive/spark/join16.q.out   |    2 +-
 .../results/clientpositive/spark/join34.q.out   |    2 +-
 .../results/clientpositive/spark/join35.q.out   |    2 +-
 .../clientpositive/spark/join_alt_syntax.q.out  |   12 +-
 .../spark/join_cond_pushdown_2.q.out            |   12 +-
 .../spark/join_cond_pushdown_4.q.out            |   12 +-
 .../spark/join_cond_pushdown_unqual2.q.out      |   12 +-
 .../spark/join_cond_pushdown_unqual4.q.out      |   12 +-
 .../clientpositive/spark/louter_join_ppr.q.out  |    2 +-
 .../clientpositive/spark/mapjoin_mapjoin.q.out  |    2 +-
 .../clientpositive/spark/mergejoins.q.out       |   19 +-
 .../clientpositive/spark/mergejoins_mixed.q.out |   76 +-
 .../clientpositive/spark/ppd_gby_join.q.out     |    8 +-
 .../results/clientpositive/spark/ppd_join.q.out |    8 +-
 .../clientpositive/spark/ppd_join2.q.out        |   12 +-
 .../clientpositive/spark/ppd_join3.q.out        |   12 +-
 .../clientpositive/spark/ppd_join5.q.out        |    8 +-
 .../clientpositive/spark/ppd_outer_join4.q.out  |    8 +-
 .../clientpositive/spark/ppd_outer_join5.q.out  |   19 +-
 .../clientpositive/spark/router_join_ppr.q.out  |    2 +-
 .../results/clientpositive/spark/semijoin.q.out |    2 +-
 .../results/clientpositive/spark/skewjoin.q.out |   20 +-
 .../clientpositive/spark/skewjoinopt12.q.out    |    8 +-
 .../clientpositive/spark/skewjoinopt16.q.out    |    8 +-
 .../clientpositive/spark/skewjoinopt17.q.out    |    8 +-
 .../clientpositive/spark/skewjoinopt2.q.out     |   16 +-
 .../clientpositive/spark/subquery_exists.q.out  |    2 +-
 .../clientpositive/spark/subquery_in.q.out      |    8 +-
 .../clientpositive/spark/union_remove_25.q.out  |   16 +-
 .../clientpositive/spark/union_view.q.out       |   60 +-
 .../spark/vector_mapjoin_reduce.q.out           |    6 +-
 .../clientpositive/subquery_exists.q.out        |    2 +-
 .../results/clientpositive/subquery_in.q.out    |    8 +-
 .../subquery_unqualcolumnrefs.q.out             |   10 +-
 .../results/clientpositive/subquery_views.q.out |    8 +-
 .../tez/correlationoptimizer1.q.out             |    8 +-
 .../tez/dynamic_partition_pruning.q.out         |  134 +-
 .../clientpositive/tez/explainuser_1.q.out      |   40 +-
 .../clientpositive/tez/explainuser_2.q.out      |   12 +-
 .../tez/filter_join_breaktask.q.out             |    2 +-
 .../clientpositive/tez/mapjoin_mapjoin.q.out    |    2 +-
 .../results/clientpositive/tez/mergejoin.q.out  |    8 -
 .../results/clientpositive/tez/skewjoin.q.out   |   16 +-
 .../clientpositive/tez/subquery_exists.q.out    |    2 +-
 .../clientpositive/tez/subquery_in.q.out        |    8 +-
 .../tez/tez_dynpart_hashjoin_1.q.out            |   24 +-
 .../clientpositive/tez/tez_self_join.q.out      |    2 +-
 .../clientpositive/tez/tez_smb_empty.q.out      |   18 +-
 .../tez/tez_vector_dynpart_hashjoin_1.q.out     |   24 +-
 .../tez/vector_interval_mapjoin.q.out           |    4 +-
 .../tez/vector_leftsemi_mapjoin.q.out           |   12 +-
 .../tez/vector_mapjoin_reduce.q.out             |    6 +-
 .../vectorized_dynamic_partition_pruning.q.out  |  122 +-
 .../clientpositive/udf_folder_constants.q.out   |    8 +-
 .../clientpositive/union_remove_25.q.out        |   20 +-
 .../results/clientpositive/union_view.q.out     |   60 +-
 .../vector_interval_mapjoin.q.out               |    4 +-
 .../vector_leftsemi_mapjoin.q.out               |   12 +-
 .../clientpositive/vector_mapjoin_reduce.q.out  |    6 +-
 205 files changed, 2934 insertions(+), 2886 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
index 4825a61..1c15012 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
@@ -26,7 +26,6 @@ import java.util.Map.Entry;
 import java.util.Set;
 
 import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelOptPredicateList;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.plan.RelOptUtil.InputFinder;
 import org.apache.calcite.plan.RelOptUtil.InputReferencedVisitor;
@@ -75,6 +74,7 @@ import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap.Builder;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
 /**
@@ -628,16 +628,55 @@ public class HiveCalciteUtil {
                                                               }
                                                             };
 
-  public static ImmutableList<RexNode> getPredsNotPushedAlready(RelNode inp, List<RexNode> predsToPushDown) {
-    final RelOptPredicateList predicates = RelMetadataQuery.getPulledUpPredicates(inp);
-    final ImmutableSet<String> alreadyPushedPreds = ImmutableSet.copyOf(Lists.transform(
-        predicates.pulledUpPredicates, REX_STR_FN));
-    final ImmutableList.Builder<RexNode> newConjuncts = ImmutableList.builder();
+  public static ImmutableList<RexNode> getPredsNotPushedAlready(RelNode inp, List<RexNode> predsToPushDown) {   
+    return getPredsNotPushedAlready(Sets.<String>newHashSet(), inp, predsToPushDown);
+  }
+
+  /**
+   * Given a list of predicates to push down, this methods returns the set of predicates
+   * that still need to be pushed. Predicates need to be pushed because 1) their String
+   * representation is not included in input set of predicates to exclude, or 2) they are
+   * already in the subtree rooted at the input node.
+   * This method updates the set of predicates to exclude with the String representation
+   * of the predicates in the output and in the subtree.
+   *
+   * @param predicatesToExclude String representation of predicates that should be excluded
+   * @param inp root of the subtree
+   * @param predsToPushDown candidate predicates to push down through the subtree
+   * @return list of predicates to push down
+   */
+  public static ImmutableList<RexNode> getPredsNotPushedAlready(Set<String> predicatesToExclude,
+          RelNode inp, List<RexNode> predsToPushDown) {
+    // Bail out if there is nothing to push
+    if (predsToPushDown.isEmpty()) {
+      return ImmutableList.of();
+    }
+    // Build map to not convert multiple times, further remove already included predicates
+    Map<String,RexNode> stringToRexNode = Maps.newLinkedHashMap();
     for (RexNode r : predsToPushDown) {
-      if (!alreadyPushedPreds.contains(r.toString())) {
-        newConjuncts.add(r);
+      String rexNodeString = r.toString();
+      if (predicatesToExclude.add(rexNodeString)) {
+        stringToRexNode.put(rexNodeString, r);
+      }
+    }
+    if (stringToRexNode.isEmpty()) {
+      return ImmutableList.of();
+    }
+    // Finally exclude preds that are already in the subtree as given by the metadata provider
+    // Note: this is the last step, trying to avoid the expensive call to the metadata provider
+    //       if possible
+    Set<String> predicatesInSubtree = Sets.newHashSet();
+    for (RexNode pred : RelMetadataQuery.getPulledUpPredicates(inp).pulledUpPredicates) {
+      predicatesInSubtree.add(pred.toString());
+      predicatesInSubtree.addAll(Lists.transform(RelOptUtil.conjunctions(pred), REX_STR_FN));
+    }
+    final ImmutableList.Builder<RexNode> newConjuncts = ImmutableList.builder();
+    for (Entry<String,RexNode> e : stringToRexNode.entrySet()) {
+      if (predicatesInSubtree.add(e.getKey())) {
+        newConjuncts.add(e.getValue());
       }
     }
+    predicatesToExclude.addAll(predicatesInSubtree);
     return newConjuncts.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveHepPlannerContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveHepPlannerContext.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveHepPlannerContext.java
deleted file mode 100644
index ad79aee..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveHepPlannerContext.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite;
-
-import org.apache.calcite.plan.Context;
-import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry;
-
-
-public class HiveHepPlannerContext implements Context {
-  private HiveRulesRegistry registry;
-
-  public HiveHepPlannerContext(HiveRulesRegistry registry) {
-    this.registry = registry;
-  }
-
-  public <T> T unwrap(Class<T> clazz) {
-    if (clazz.isInstance(registry)) {
-      return clazz.cast(registry);
-    }
-    return null;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
new file mode 100644
index 0000000..aeb4e7d
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite;
+
+import org.apache.calcite.plan.Context;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveAlgorithmsConf;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry;
+
+
+public class HivePlannerContext implements Context {
+  private HiveAlgorithmsConf config;
+  private HiveRulesRegistry registry;
+
+  public HivePlannerContext(HiveAlgorithmsConf config, HiveRulesRegistry registry) {
+    this.config = config;
+    this.registry = registry;
+  }
+
+  public <T> T unwrap(Class<T> clazz) {
+    if (clazz.isInstance(config)) {
+      return clazz.cast(config);
+    }
+    if (clazz.isInstance(registry)) {
+      return clazz.cast(registry);
+    }
+    return null;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveVolcanoPlannerContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveVolcanoPlannerContext.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveVolcanoPlannerContext.java
deleted file mode 100644
index 8859fc2..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveVolcanoPlannerContext.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite;
-
-import org.apache.calcite.plan.Context;
-import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveAlgorithmsConf;
-
-
-public class HiveVolcanoPlannerContext implements Context {
-  private HiveAlgorithmsConf config;
-
-  public HiveVolcanoPlannerContext(HiveAlgorithmsConf config) {
-    this.config = config;
-  }
-
-  public <T> T unwrap(Class<T> clazz) {
-    if (clazz.isInstance(config)) {
-      return clazz.cast(config);
-    }
-    return null;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostModel.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostModel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostModel.java
index d15d885..4af1f8d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostModel.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostModel.java
@@ -56,8 +56,8 @@ public abstract class HiveCostModel {
     JoinAlgorithm joinAlgorithm = null;
     RelOptCost minJoinCost = null;
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Join algorithm selection for:\n" + RelOptUtil.toString(join));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Join algorithm selection for:\n" + RelOptUtil.toString(join));
     }
 
     for (JoinAlgorithm possibleAlgorithm : this.joinAlgorithms) {
@@ -65,8 +65,8 @@ public abstract class HiveCostModel {
         continue;
       }
       RelOptCost joinCost = possibleAlgorithm.getCost(join);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(possibleAlgorithm + " cost: " + joinCost);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(possibleAlgorithm + " cost: " + joinCost);
       }
       if (minJoinCost == null || joinCost.isLt(minJoinCost) ) {
         joinAlgorithm = possibleAlgorithm;
@@ -74,8 +74,8 @@ public abstract class HiveCostModel {
       }
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(joinAlgorithm + " selected");
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(joinAlgorithm + " selected");
     }
 
     join.setJoinAlgorithm(joinAlgorithm);

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java
index 8610edc..1bd12b7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java
@@ -22,7 +22,7 @@ import org.apache.calcite.plan.ConventionTraitDef;
 import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.volcano.VolcanoPlanner;
 import org.apache.calcite.rel.RelCollationTraitDef;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveVolcanoPlannerContext;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HivePlannerContext;
 
 /**
  * Refinement of {@link org.apache.calcite.plan.volcano.VolcanoPlanner} for Hive.
@@ -35,11 +35,11 @@ public class HiveVolcanoPlanner extends VolcanoPlanner {
   private static final boolean ENABLE_COLLATION_TRAIT = true;
 
   /** Creates a HiveVolcanoPlanner. */
-  public HiveVolcanoPlanner(HiveVolcanoPlannerContext conf) {
+  public HiveVolcanoPlanner(HivePlannerContext conf) {
     super(HiveCost.FACTORY, conf);
   }
 
-  public static RelOptPlanner createPlanner(HiveVolcanoPlannerContext conf) {
+  public static RelOptPlanner createPlanner(HivePlannerContext conf) {
     final VolcanoPlanner planner = new HiveVolcanoPlanner(conf);
     planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
     if (ENABLE_COLLATION_TRAIT) {

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
index 27b1e76..c323564 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelOptUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCostModel.JoinAlgorithm;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveDefaultCostModel.DefaultJoinAlgorithm;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry;
 
 import com.google.common.collect.ImmutableList;
 
@@ -103,8 +104,14 @@ public class HiveJoin extends Join implements HiveRelNode {
       RelNode right, JoinRelType joinType, boolean semiJoinDone) {
     try {
       Set<String> variablesStopped = Collections.emptySet();
-      return new HiveJoin(getCluster(), traitSet, left, right, conditionExpr, joinType,
+      HiveJoin join = new HiveJoin(getCluster(), traitSet, left, right, conditionExpr, joinType,
           variablesStopped, joinAlgorithm, leftSemiJoin);
+      // If available, copy state to registry for optimization rules
+      HiveRulesRegistry registry = join.getCluster().getPlanner().getContext().unwrap(HiveRulesRegistry.class);
+      if (registry != null) {
+        registry.copyPushedPredicates(this, join);
+      }
+      return join;
     } catch (InvalidRelException | CalciteSemanticException e) {
       // Semantic error not possible. Must be a bug. Convert to
       // internal error.

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java
index 3558676..4fac13e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java
@@ -35,6 +35,7 @@ import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.util.ImmutableIntList;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelOptUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry;
 
 import com.google.common.collect.ImmutableList;
 
@@ -87,8 +88,14 @@ public class HiveSemiJoin extends SemiJoin implements HiveRelNode {
           RelNode left, RelNode right, JoinRelType joinType, boolean semiJoinDone) {
     try {
       final JoinInfo joinInfo = JoinInfo.of(left, right, condition);
-      return new HiveSemiJoin(getCluster(), traitSet, left, right, condition,
+      HiveSemiJoin semijoin = new HiveSemiJoin(getCluster(), traitSet, left, right, condition,
               joinInfo.leftKeys, joinInfo.rightKeys);
+      // If available, copy state to registry for optimization rules
+      HiveRulesRegistry registry = semijoin.getCluster().getPlanner().getContext().unwrap(HiveRulesRegistry.class);
+      if (registry != null) {
+        registry.copyPushedPredicates(this, semijoin);
+      }
+      return semijoin;
     } catch (InvalidRelException | CalciteSemanticException e) {
       // Semantic error not possible. Must be a bug. Convert to
       // internal error.

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnion.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnion.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnion.java
index 8b57b35..7cfb007 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnion.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnion.java
@@ -24,9 +24,8 @@ import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.SetOp;
 import org.apache.calcite.rel.core.Union;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode.Implementor;
 
-public class HiveUnion extends Union {
+public class HiveUnion extends Union implements HiveRelNode {
 
   public HiveUnion(RelOptCluster cluster, RelTraitSet traits, List<RelNode> inputs) {
     super(cluster, traits, inputs, true);
@@ -37,6 +36,7 @@ public class HiveUnion extends Union {
     return new HiveUnion(this.getCluster(), traitSet, inputs);
   }
 
+  @Override
   public void implement(Implementor implementor) {
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
index de880ce..1cb6a08 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
@@ -17,10 +17,8 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
 
-import java.util.Collection;
-import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Map;
+import java.util.List;
 import java.util.Set;
 
 import org.apache.calcite.plan.RelOptCluster;
@@ -29,28 +27,31 @@ import org.apache.calcite.plan.RelOptRuleCall;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Join;
 import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.calcite.rel.core.RelFactories;
 import org.apache.calcite.rel.core.RelFactories.FilterFactory;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinLeafPredicateInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSemiJoin;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 
 public final class HiveJoinAddNotNullRule extends RelOptRule {
 
-  private static final String NOT_NULL_FUNC_NAME = "isnotnull";
+  public static final HiveJoinAddNotNullRule INSTANCE_JOIN =
+          new HiveJoinAddNotNullRule(HiveJoin.class, HiveRelFactories.HIVE_FILTER_FACTORY);
 
-  /** The singleton. */
-  public static final HiveJoinAddNotNullRule INSTANCE =
-      new HiveJoinAddNotNullRule(HiveRelFactories.HIVE_FILTER_FACTORY);
+  public static final HiveJoinAddNotNullRule INSTANCE_SEMIJOIN =
+          new HiveJoinAddNotNullRule(HiveSemiJoin.class, HiveRelFactories.HIVE_FILTER_FACTORY);
 
   private final FilterFactory filterFactory;
 
@@ -59,10 +60,9 @@ public final class HiveJoinAddNotNullRule extends RelOptRule {
   /**
    * Creates an HiveJoinAddNotNullRule.
    */
-  public HiveJoinAddNotNullRule(FilterFactory filterFactory) {
-    super(operand(Join.class,
-              operand(RelNode.class, any()),
-              operand(RelNode.class, any())));
+  public HiveJoinAddNotNullRule(Class<? extends Join> clazz,
+          RelFactories.FilterFactory filterFactory) {
+    super(operand(clazz, any()));
     this.filterFactory = filterFactory;
   }
 
@@ -71,8 +71,11 @@ public final class HiveJoinAddNotNullRule extends RelOptRule {
   @Override
   public void onMatch(RelOptRuleCall call) {
     final Join join = call.rel(0);
-    RelNode leftInput = call.rel(1);
-    RelNode rightInput = call.rel(2);
+    RelNode lChild = join.getLeft();
+    RelNode rChild = join.getRight();
+
+    HiveRulesRegistry registry = call.getPlanner().getContext().unwrap(HiveRulesRegistry.class);
+    assert registry != null;
 
     if (join.getJoinType() != JoinRelType.INNER) {
       return;
@@ -102,51 +105,46 @@ public final class HiveJoinAddNotNullRule extends RelOptRule {
     final RelOptCluster cluster = join.getCluster();
     final RexBuilder rexBuilder = join.getCluster().getRexBuilder();
 
-    final Map<String,RexNode> newLeftConditions = getNotNullConditions(cluster,
-            rexBuilder, leftInput, joinLeftKeyPositions);
-    final Map<String,RexNode> newRightConditions = getNotNullConditions(cluster,
-            rexBuilder, rightInput, joinRightKeyPositions);
+    Set<String> leftPushedPredicates = Sets.newHashSet(registry.getPushedPredicates(join, 0));
+    final List<RexNode> newLeftConditions = getNotNullConditions(cluster,
+            rexBuilder, join.getLeft(), joinLeftKeyPositions, leftPushedPredicates);
+    Set<String> rightPushedPredicates = Sets.newHashSet(registry.getPushedPredicates(join, 1));
+    final List<RexNode> newRightConditions = getNotNullConditions(cluster,
+            rexBuilder, join.getRight(), joinRightKeyPositions, rightPushedPredicates);
 
     // Nothing will be added to the expression
-    if (newLeftConditions == null && newRightConditions == null) {
+    RexNode newLeftPredicate = RexUtil.composeConjunction(rexBuilder, newLeftConditions, false);
+    RexNode newRightPredicate = RexUtil.composeConjunction(rexBuilder, newRightConditions, false);
+    if (newLeftPredicate.isAlwaysTrue() && newRightPredicate.isAlwaysTrue()) {
       return;
     }
 
-    if (newLeftConditions != null) {
-      if (leftInput instanceof HiveFilter) {
-        leftInput = leftInput.getInput(0);
-      }
-      leftInput = createHiveFilterConjunctiveCondition(filterFactory, rexBuilder,
-              leftInput, newLeftConditions.values());
+    if (!newLeftPredicate.isAlwaysTrue()) {
+      RelNode curr = lChild;
+      lChild = filterFactory.createFilter(lChild, newLeftPredicate);
+      call.getPlanner().onCopy(curr, lChild);
     }
-    if (newRightConditions != null) {
-      if (rightInput instanceof HiveFilter) {
-        rightInput = rightInput.getInput(0);
-      }
-      rightInput = createHiveFilterConjunctiveCondition(filterFactory, rexBuilder,
-              rightInput, newRightConditions.values());
+    if (!newRightPredicate.isAlwaysTrue()) {
+      RelNode curr = rChild;
+      rChild = filterFactory.createFilter(rChild, newRightPredicate);
+      call.getPlanner().onCopy(curr, rChild);
     }
 
     Join newJoin = join.copy(join.getTraitSet(), join.getCondition(),
-            leftInput, rightInput, join.getJoinType(), join.isSemiJoinDone());
-
+            lChild, rChild, join.getJoinType(), join.isSemiJoinDone());
     call.getPlanner().onCopy(join, newJoin);
 
+    // Register information about created predicates
+    registry.getPushedPredicates(newJoin, 0).addAll(leftPushedPredicates);
+    registry.getPushedPredicates(newJoin, 1).addAll(rightPushedPredicates);
+
     call.transformTo(newJoin);
   }
 
-  private static Map<String,RexNode> getNotNullConditions(RelOptCluster cluster,
-          RexBuilder rexBuilder, RelNode input, Set<Integer> inputKeyPositions) {
-
-    boolean added = false;
-
-    final Map<String,RexNode> newConditions;
-    if (input instanceof HiveFilter) {
-      newConditions = splitCondition(((HiveFilter) input).getCondition());
-    }
-    else {
-      newConditions = new HashMap<String,RexNode>();
-    }
+  private static List<RexNode> getNotNullConditions(RelOptCluster cluster,
+          RexBuilder rexBuilder, RelNode input, Set<Integer> inputKeyPositions,
+          Set<String> pushedPredicates) {
+    final List<RexNode> newConditions = Lists.newArrayList();
     for (int pos : inputKeyPositions) {
       RelDataType keyType = input.getRowType().getFieldList().get(pos).getType();
       // Nothing to do if key cannot be null
@@ -156,34 +154,11 @@ public final class HiveJoinAddNotNullRule extends RelOptRule {
       RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_NULL,
               rexBuilder.makeInputRef(input, pos));
       String digest = cond.toString();
-      if (!newConditions.containsKey(digest)) {
-        newConditions.put(digest,cond);
-        added = true;
+      if (pushedPredicates.add(digest)) {
+        newConditions.add(cond);
       }
     }
-    // Nothing will be added to the expression
-    if (!added) {
-      return null;
-    }
     return newConditions;
   }
 
-  private static Map<String,RexNode> splitCondition(RexNode condition) {
-    Map<String,RexNode> newConditions = new HashMap<String,RexNode>();
-    if (condition.getKind() == SqlKind.AND) {
-      for (RexNode node : ((RexCall) condition).getOperands()) {
-        newConditions.put(node.toString(), node);
-      }
-    }
-    else {
-      newConditions.put(condition.toString(), condition);
-    }
-    return newConditions;
-  }
-
-  private static RelNode createHiveFilterConjunctiveCondition(FilterFactory filterFactory,
-          RexBuilder rexBuilder, RelNode input, Collection<RexNode> conditions) {
-    final RexNode newCondition = RexUtil.composeConjunction(rexBuilder, conditions, false);
-    return filterFactory.createFilter(input, newCondition);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinPushTransitivePredicatesRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinPushTransitivePredicatesRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinPushTransitivePredicatesRule.java
index 703c8c6..07928d8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinPushTransitivePredicatesRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinPushTransitivePredicatesRule.java
@@ -1,12 +1,13 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,6 +19,7 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptPredicateList;
@@ -25,7 +27,7 @@ import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.plan.RelOptRuleCall;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Join;
-import org.apache.calcite.rel.core.RelFactories;
+import org.apache.calcite.rel.core.RelFactories.FilterFactory;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
@@ -38,10 +40,14 @@ import org.apache.calcite.rex.RexVisitorImpl;
 import org.apache.calcite.util.Util;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSemiJoin;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotNull;
 import org.apache.hive.common.util.AnnotationUtils;
 
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Sets;
 
 /**
  * Planner rule that infers predicates from on a
@@ -55,51 +61,55 @@ import com.google.common.collect.ImmutableList;
  * and applies them appropriately.
  */
 public class HiveJoinPushTransitivePredicatesRule extends RelOptRule {
-  private final RelFactories.FilterFactory filterFactory;
 
-  /** The singleton. */
-  public static final HiveJoinPushTransitivePredicatesRule INSTANCE =
-      new HiveJoinPushTransitivePredicatesRule(Join.class,
-          RelFactories.DEFAULT_FILTER_FACTORY);
+  public static final HiveJoinPushTransitivePredicatesRule INSTANCE_JOIN =
+          new HiveJoinPushTransitivePredicatesRule(HiveJoin.class, HiveRelFactories.HIVE_FILTER_FACTORY);
+
+  public static final HiveJoinPushTransitivePredicatesRule INSTANCE_SEMIJOIN =
+          new HiveJoinPushTransitivePredicatesRule(HiveSemiJoin.class, HiveRelFactories.HIVE_FILTER_FACTORY);
+
+  private final FilterFactory filterFactory;
 
   public HiveJoinPushTransitivePredicatesRule(Class<? extends Join> clazz,
-      RelFactories.FilterFactory filterFactory) {
-    super(operand(clazz, operand(RelNode.class, any()),
-        operand(RelNode.class, any())));
+      FilterFactory filterFactory) {
+    super(operand(clazz, any()));
     this.filterFactory = filterFactory;
   }
 
-  @Override public void onMatch(RelOptRuleCall call) {
+  @Override
+  public void onMatch(RelOptRuleCall call) {
     Join join = call.rel(0);
-
-    // Register that we have visited this operator in this rule
-    HiveRulesRegistry registry = call.getPlanner().getContext().unwrap(HiveRulesRegistry.class);
-    if (registry != null) {
-      registry.registerVisited(this, join);
-    }
-
+    
     RelOptPredicateList preds = RelMetadataQuery.getPulledUpPredicates(join);
 
+    HiveRulesRegistry registry = call.getPlanner().getContext().unwrap(HiveRulesRegistry.class);
+    assert registry != null;
     RexBuilder rB = join.getCluster().getRexBuilder();
-    RelNode lChild = call.rel(1);
-    RelNode rChild = call.rel(2);
-
-    List<RexNode> leftPreds = getValidPreds(join.getCluster(), lChild, preds.leftInferredPredicates, lChild.getRowType());
-    List<RexNode> rightPreds = getValidPreds(join.getCluster(), rChild, preds.rightInferredPredicates, rChild.getRowType());
-
-    if (leftPreds.isEmpty() && rightPreds.isEmpty()) {
+    RelNode lChild = join.getLeft();
+    RelNode rChild = join.getRight();
+
+    Set<String> leftPushedPredicates = Sets.newHashSet(registry.getPushedPredicates(join, 0));
+    List<RexNode> leftPreds = getValidPreds(join.getCluster(), lChild,
+            leftPushedPredicates, preds.leftInferredPredicates, lChild.getRowType());
+    Set<String> rightPushedPredicates = Sets.newHashSet(registry.getPushedPredicates(join, 1));
+    List<RexNode> rightPreds = getValidPreds(join.getCluster(), rChild,
+            rightPushedPredicates, preds.rightInferredPredicates, rChild.getRowType());
+
+    RexNode newLeftPredicate = RexUtil.composeConjunction(rB, leftPreds, false);
+    RexNode newRightPredicate = RexUtil.composeConjunction(rB, rightPreds, false);
+    if (newLeftPredicate.isAlwaysTrue() && newRightPredicate.isAlwaysTrue()) {
       return;
     }
 
-    if (leftPreds.size() > 0) {
+    if (!newLeftPredicate.isAlwaysTrue()) {
       RelNode curr = lChild;
-      lChild = filterFactory.createFilter(lChild, RexUtil.composeConjunction(rB, leftPreds, false));
+      lChild = filterFactory.createFilter(lChild, newLeftPredicate);
       call.getPlanner().onCopy(curr, lChild);
     }
 
-    if (rightPreds.size() > 0) {
+    if (!newRightPredicate.isAlwaysTrue()) {
       RelNode curr = rChild;
-      rChild = filterFactory.createFilter(rChild, RexUtil.composeConjunction(rB, rightPreds, false));
+      rChild = filterFactory.createFilter(rChild, newRightPredicate);
       call.getPlanner().onCopy(curr, rChild);
     }
 
@@ -107,16 +117,15 @@ public class HiveJoinPushTransitivePredicatesRule extends RelOptRule {
         lChild, rChild, join.getJoinType(), join.isSemiJoinDone());
     call.getPlanner().onCopy(join, newRel);
 
-    // We register new Join rel so we do not fire the rule on them again
-    if (registry != null) {
-      registry.registerVisited(this, newRel);
-    }
+    // Register information about pushed predicates
+    registry.getPushedPredicates(newRel, 0).addAll(leftPushedPredicates);
+    registry.getPushedPredicates(newRel, 1).addAll(rightPushedPredicates);
 
     call.transformTo(newRel);
   }
 
-  private ImmutableList<RexNode> getValidPreds(RelOptCluster cluster, RelNode rn,
-      List<RexNode> rexs, RelDataType rType) {
+  private ImmutableList<RexNode> getValidPreds(RelOptCluster cluster, RelNode child,
+      Set<String> predicatesToExclude, List<RexNode> rexs, RelDataType rType) {
     InputRefValidator validator = new InputRefValidator(rType.getFieldList());
     List<RexNode> valids = new ArrayList<RexNode>(rexs.size());
     for (RexNode rex : rexs) {
@@ -128,7 +137,11 @@ public class HiveJoinPushTransitivePredicatesRule extends RelOptRule {
       }
     }
 
-    return HiveCalciteUtil.getPredsNotPushedAlready(rn, valids);
+    // We need to filter i) those that have been pushed already as stored in the join,
+    // and ii) those that were already in the subtree rooted at child
+    ImmutableList<RexNode> toPush = HiveCalciteUtil.getPredsNotPushedAlready(predicatesToExclude,
+            child, valids);
+    return toPush;
   }
 
   private RexNode getTypeSafePred(RelOptCluster cluster, RexNode rex, RelDataType rType) {

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java
index d37fc0e..17fcc82 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java
@@ -37,10 +37,10 @@ import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.sql.SqlKind;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.LinkedHashMultimap;
@@ -169,13 +169,13 @@ public class HivePreFilteringRule extends RelOptRule {
     // 3. If the new conjuncts are already present in the plan, we bail out
     final List<RexNode> newConjuncts = HiveCalciteUtil.getPredsNotPushedAlready(filter.getInput(),
         operandsToPushDown);
-    if (newConjuncts.isEmpty()) {
+    RexNode newPredicate = RexUtil.composeConjunction(rexBuilder, newConjuncts, false);
+    if (newPredicate.isAlwaysTrue()) {
       return;
     }
 
     // 4. Otherwise, we create a new condition
-    final RexNode newChildFilterCondition = RexUtil.pullFactors(rexBuilder,
-        RexUtil.composeConjunction(rexBuilder, newConjuncts, false));
+    final RexNode newChildFilterCondition = RexUtil.pullFactors(rexBuilder, newPredicate);
 
     // 5. We create the new filter that might be pushed down
     RelNode newChildFilter = filterFactory.createFilter(filter.getInput(), newChildFilterCondition);

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRulesRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRulesRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRulesRegistry.java
index 18a065e..ff6cb75 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRulesRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRulesRegistry.java
@@ -22,23 +22,44 @@ import java.util.Set;
 import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.rel.RelNode;
 
+import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ListMultimap;
 import com.google.common.collect.SetMultimap;
+import com.google.common.collect.Sets;
 
 public class HiveRulesRegistry {
 
-  private SetMultimap<RelOptRule, RelNode> registry;
+  private SetMultimap<RelOptRule, RelNode> registryVisited;
+  private ListMultimap<RelNode,Set<String>> registryPushedPredicates;
 
   public HiveRulesRegistry() {
-    this.registry = HashMultimap.create();
+    this.registryVisited = HashMultimap.create();
+    this.registryPushedPredicates = ArrayListMultimap.create();
   }
 
   public void registerVisited(RelOptRule rule, RelNode operator) {
-    this.registry.put(rule, operator);
+    this.registryVisited.put(rule, operator);
   }
 
   public Set<RelNode> getVisited(RelOptRule rule) {
-    return this.registry.get(rule);
+    return this.registryVisited.get(rule);
   }
 
+  public Set<String> getPushedPredicates(RelNode operator, int pos) {
+    if (!this.registryPushedPredicates.containsKey(operator)) {
+      for (int i = 0; i < operator.getInputs().size(); i++) {
+        this.registryPushedPredicates.get(operator).add(Sets.<String>newHashSet());
+      }
+    }
+    return this.registryPushedPredicates.get(operator).get(pos);
+  }
+
+  public void copyPushedPredicates(RelNode operator, RelNode otherOperator) {
+    if (this.registryPushedPredicates.containsKey(operator)) {
+      for (Set<String> s : this.registryPushedPredicates.get(operator)) {
+        this.registryPushedPredicates.put(otherOperator, Sets.newHashSet(s));
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 8cc3747..cc9dc23 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -59,7 +59,6 @@ import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Aggregate;
 import org.apache.calcite.rel.core.AggregateCall;
 import org.apache.calcite.rel.core.Filter;
-import org.apache.calcite.rel.core.Join;
 import org.apache.calcite.rel.core.JoinRelType;
 import org.apache.calcite.rel.metadata.CachingRelMetadataProvider;
 import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
@@ -116,11 +115,10 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException.UnsupportedFeature;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveDefaultRelMetadataProvider;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveHepPlannerContext;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HivePlannerContext;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRexExecutorImpl;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveTypeSystemImpl;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveVolcanoPlannerContext;
 import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveAlgorithmsConf;
@@ -857,7 +855,8 @@ public class CalcitePlanner extends SemanticAnalyzer {
       final Double maxMemory = (double) HiveConf.getLongVar(
               conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
       HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory);
-      HiveVolcanoPlannerContext confContext = new HiveVolcanoPlannerContext(algorithmsConf);
+      HiveRulesRegistry registry = new HiveRulesRegistry();
+      HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry);
       RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(confContext);
       final RelOptQuery query = new RelOptQuery(planner);
       final RexBuilder rexBuilder = cluster.getRexBuilder();
@@ -1072,34 +1071,28 @@ public class CalcitePlanner extends SemanticAnalyzer {
       perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
         "Calcite: Prejoin ordering transformation, factor out common filter elements and separating deterministic vs non-deterministic UDF");
 
-      // 3. PPD for old Join Syntax
-      // NOTE: PPD needs to run before adding not null filters in order to
-      // support old style join syntax (so that on-clauses will get filled up).
-      // TODO: Add in ReduceExpressionrules (Constant folding) to below once
-      // HIVE-11927 is fixed.
-      perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
-      basePlan = hepPlan(basePlan, true, mdProvider, null, HiveFilterProjectTransposeRule.INSTANCE_DETERMINISTIC,
-          HiveFilterSetOpTransposeRule.INSTANCE, HiveFilterSortTransposeRule.INSTANCE, HiveFilterJoinRule.JOIN,
-          HiveFilterJoinRule.FILTER_ON_JOIN, new HiveFilterAggregateTransposeRule(Filter.class,
-              HiveRelFactories.HIVE_FILTER_FACTORY, Aggregate.class), new FilterMergeRule(
-              HiveRelFactories.HIVE_FILTER_FACTORY));
-      perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
-        "Calcite: Prejoin ordering transformation, PPD for old join syntax");
-
-
-      // TODO: Transitive inference, constant prop & Predicate push down has to
-      // do multiple passes till no more inference is left
-      // Currently doing so would result in a spin. Just checking for if inferred
-      // pred is present below may not be sufficient as inferred & pushed pred
-      // could have been mutated by constant folding/prop
-      // 4. Transitive inference for join on clauses
+      // 3. Run exhaustive PPD, add not null filters, transitive inference, 
+      // constant propagation, constant folding
       perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
-      basePlan = hepPlan(basePlan, true, mdProvider, null, new HiveJoinPushTransitivePredicatesRule(
-          Join.class, HiveRelFactories.HIVE_FILTER_FACTORY));
+      basePlan = hepPlan(basePlan, true, mdProvider, executorProvider, HepMatchOrder.BOTTOM_UP,
+          HiveFilterProjectTransposeRule.INSTANCE_DETERMINISTIC,
+          HiveFilterSetOpTransposeRule.INSTANCE,
+          HiveFilterSortTransposeRule.INSTANCE,
+          HiveFilterJoinRule.JOIN,
+          HiveFilterJoinRule.FILTER_ON_JOIN,
+          new HiveFilterAggregateTransposeRule(Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY, Aggregate.class),
+          new FilterMergeRule(HiveRelFactories.HIVE_FILTER_FACTORY),
+          HiveJoinAddNotNullRule.INSTANCE_JOIN,
+          HiveJoinAddNotNullRule.INSTANCE_SEMIJOIN,
+          HiveJoinPushTransitivePredicatesRule.INSTANCE_JOIN,
+          HiveJoinPushTransitivePredicatesRule.INSTANCE_SEMIJOIN,
+          HiveReduceExpressionsRule.PROJECT_INSTANCE,
+          HiveReduceExpressionsRule.FILTER_INSTANCE,
+          HiveReduceExpressionsRule.JOIN_INSTANCE);
       perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
-        "Calcite: Prejoin ordering transformation, Transitive inference for join on clauses");
+        "Calcite: Prejoin ordering transformation, PPD, not null predicates, transitive inference, constant folding");
 
-      // 5. Push down limit through outer join
+      // 4. Push down limit through outer join
       // NOTE: We run this after PPD to support old style join syntax.
       // Ex: select * from R1 left outer join R2 where ((R1.x=R2.x) and R1.y<10) or
       // ((R1.x=R2.x) and R1.z=10)) and rand(1) < 0.1 order by R1.x limit 10
@@ -1121,46 +1114,20 @@ public class CalcitePlanner extends SemanticAnalyzer {
           "Calcite: Prejoin ordering transformation, Push down limit through outer join");
       }
 
-      // 6. Add not null filters
-      perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
-      basePlan = hepPlan(basePlan, true, mdProvider, null, HiveJoinAddNotNullRule.INSTANCE);
-      perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
-        "Calcite: Prejoin ordering transformation, Add not null filters");
-
-      // 7. Rerun Constant propagation and PPD now that we have added Not NULL filters & did transitive inference
-      // TODO: Add in ReduceExpressionrules (Constant folding) to below once
-      // HIVE-11927 is fixed.
-      perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
-      basePlan = hepPlan(basePlan, true, mdProvider, null, HiveFilterProjectTransposeRule.INSTANCE_DETERMINISTIC,
-          HiveFilterSetOpTransposeRule.INSTANCE, HiveFilterSortTransposeRule.INSTANCE, HiveFilterJoinRule.JOIN,
-          HiveFilterJoinRule.FILTER_ON_JOIN, new HiveFilterAggregateTransposeRule(Filter.class,
-              HiveRelFactories.HIVE_FILTER_FACTORY, Aggregate.class), new FilterMergeRule(
-              HiveRelFactories.HIVE_FILTER_FACTORY));
-      perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
-        "Calcite: Prejoin ordering transformation, Constant propagation and PPD");
-
-      // 8. Push Down Semi Joins
+      // 5. Push Down Semi Joins
       perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
       basePlan = hepPlan(basePlan, true, mdProvider, null, SemiJoinJoinTransposeRule.INSTANCE,
           SemiJoinFilterTransposeRule.INSTANCE, SemiJoinProjectTransposeRule.INSTANCE);
       perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
         "Calcite: Prejoin ordering transformation, Push Down Semi Joins");
 
-      // 9. Constant folding
-      perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
-      basePlan = hepPlan(basePlan, true, mdProvider, executorProvider,
-          HiveReduceExpressionsRule.PROJECT_INSTANCE, HiveReduceExpressionsRule.FILTER_INSTANCE,
-          HiveReduceExpressionsRule.JOIN_INSTANCE);
-      perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
-          "Calcite: Prejoin ordering transformation, Constant folding");
-
-      // 10. Apply Partition Pruning
+      // 6. Apply Partition Pruning
       perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
       basePlan = hepPlan(basePlan, false, mdProvider, null, new HivePartitionPruneRule(conf));
       perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
         "Calcite: Prejoin ordering transformation, Partition Pruning");
 
-      // 11. Projection Pruning (this introduces select above TS & hence needs to be run last due to PP)
+      // 7. Projection Pruning (this introduces select above TS & hence needs to be run last due to PP)
       perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
       HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null,
           HiveRelFactories.HIVE_BUILDER.create(cluster, null));
@@ -1168,14 +1135,14 @@ public class CalcitePlanner extends SemanticAnalyzer {
       perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
         "Calcite: Prejoin ordering transformation, Projection Pruning");
 
-      // 12. Merge Project-Project if possible
+      // 8. Merge Project-Project if possible
       perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
       basePlan = hepPlan(basePlan, false, mdProvider, null, new ProjectMergeRule(true,
           HiveRelFactories.HIVE_PROJECT_FACTORY));
       perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
         "Calcite: Prejoin ordering transformation, Merge Project-Project");
 
-      // 13. Rerun PPD through Project as column pruning would have introduced
+      // 9. Rerun PPD through Project as column pruning would have introduced
       // DT above scans; By pushing filter just above TS, Hive can push it into
       // storage (incase there are filters on non partition cols). This only
       // matches FIL-PROJ-TS
@@ -1231,9 +1198,9 @@ public class CalcitePlanner extends SemanticAnalyzer {
           programBuilder.addRuleInstance(r);
       }
 
-      HiveRulesRegistry registry = new HiveRulesRegistry();
-      HiveHepPlannerContext context = new HiveHepPlannerContext(registry);
-      HepPlanner planner = new HepPlanner(programBuilder.build(), context);
+      // Create planner and copy context
+      HepPlanner planner = new HepPlanner(programBuilder.build(),
+              basePlan.getCluster().getPlanner().getContext());
 
       List<RelMetadataProvider> list = Lists.newArrayList();
       list.add(mdProvider);

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
index f1d8d1d..44e157b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
@@ -61,7 +61,7 @@ public class TestCBORuleFiredOnlyOnce {
 
     // Create rules registry to not trigger a rule more than once
     HiveRulesRegistry registry = new HiveRulesRegistry();
-    HiveHepPlannerContext context = new HiveHepPlannerContext(registry);
+    HivePlannerContext context = new HivePlannerContext(null, registry);
     HepPlanner planner = new HepPlanner(programBuilder.build(), context);
 
     // Cluster

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/test/results/clientpositive/annotate_stats_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_join.q.out b/ql/src/test/results/clientpositive/annotate_stats_join.q.out
index 7fc754d..9073dc2 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_join.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_join.q.out
@@ -237,7 +237,7 @@ STAGE PLANS:
             alias: emp
             Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: (deptid is not null and lastname is not null) (type: boolean)
+              predicate: (lastname is not null and deptid is not null) (type: boolean)
               Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: lastname (type: string), deptid (type: int), locid (type: int)
@@ -253,7 +253,7 @@ STAGE PLANS:
             alias: dept
             Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: (deptname is not null and deptid is not null) (type: boolean)
+              predicate: (deptid is not null and deptname is not null) (type: boolean)
               Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: deptid (type: int), deptname (type: string)
@@ -303,7 +303,7 @@ STAGE PLANS:
             alias: e
             Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: (deptid is not null and lastname is not null) (type: boolean)
+              predicate: (lastname is not null and deptid is not null) (type: boolean)
               Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: lastname (type: string), deptid (type: int), locid (type: int)
@@ -319,7 +319,7 @@ STAGE PLANS:
             alias: d
             Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: (deptname is not null and deptid is not null) (type: boolean)
+              predicate: (deptid is not null and deptname is not null) (type: boolean)
               Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: deptid (type: int), deptname (type: string)
@@ -373,7 +373,7 @@ STAGE PLANS:
             alias: emp
             Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: (deptid is not null and lastname is not null) (type: boolean)
+              predicate: (lastname is not null and deptid is not null) (type: boolean)
               Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: lastname (type: string), deptid (type: int), locid (type: int)
@@ -389,7 +389,7 @@ STAGE PLANS:
             alias: dept
             Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: (deptname is not null and deptid is not null) (type: boolean)
+              predicate: (deptid is not null and deptname is not null) (type: boolean)
               Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: deptid (type: int), deptname (type: string)
@@ -619,7 +619,7 @@ STAGE PLANS:
             alias: e
             Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: (deptid is not null and lastname is not null) (type: boolean)
+              predicate: (lastname is not null and deptid is not null) (type: boolean)
               Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: lastname (type: string), deptid (type: int), locid (type: int)
@@ -635,7 +635,7 @@ STAGE PLANS:
             alias: d
             Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: (deptname is not null and deptid is not null) (type: boolean)
+              predicate: (deptid is not null and deptname is not null) (type: boolean)
               Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: deptid (type: int), deptname (type: string)
@@ -650,7 +650,7 @@ STAGE PLANS:
             alias: l
             Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: (locid is not null and state is not null) (type: boolean)
+              predicate: (state is not null and locid is not null) (type: boolean)
               Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
index f13643e..26656e2 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
@@ -406,7 +406,7 @@ STAGE PLANS:
             alias: s
             Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: PARTIAL
             Filter Operator
-              predicate: ((s_company_id > 0) and s_store_sk is not null) (type: boolean)
+              predicate: (s_store_sk is not null and (s_company_id > 0)) (type: boolean)
               Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
               Select Operator
                 expressions: s_store_sk (type: int)
@@ -471,7 +471,7 @@ STAGE PLANS:
             alias: s
             Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: ((s_floor_space > 0) and s_store_sk is not null) (type: boolean)
+              predicate: (s_store_sk is not null and (s_floor_space > 0)) (type: boolean)
               Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: s_store_sk (type: int)
@@ -788,7 +788,7 @@ STAGE PLANS:
             alias: s
             Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: ((s_floor_space > 1000) and s_store_sk is not null) (type: boolean)
+              predicate: (s_store_sk is not null and (s_floor_space > 1000)) (type: boolean)
               Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: s_store_sk (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/test/results/clientpositive/auto_join12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join12.q.out b/ql/src/test/results/clientpositive/auto_join12.q.out
index 8ef3664..27858e7 100644
--- a/ql/src/test/results/clientpositive/auto_join12.q.out
+++ b/ql/src/test/results/clientpositive/auto_join12.q.out
@@ -76,12 +76,12 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (UDFToDouble(key) < 100.0) (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) < 80.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
@@ -91,11 +91,11 @@ STAGE PLANS:
                     1 _col0 (type: string)
                     2 _col0 (type: string)
                   outputColumnNames: _col0, _col3
-                  Statistics: Num rows: 365 Data size: 3878 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: hash(_col0,_col3) (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 365 Data size: 3878 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_col0)
                       mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/test/results/clientpositive/auto_join16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join16.q.out b/ql/src/test/results/clientpositive/auto_join16.q.out
index c1da6d2..9a36f96 100644
--- a/ql/src/test/results/clientpositive/auto_join16.q.out
+++ b/ql/src/test/results/clientpositive/auto_join16.q.out
@@ -50,7 +50,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(value) < 200.0) and (UDFToDouble(key) > 20.0)) and (UDFToDouble(key) > 10.0)) (type: boolean)
+              predicate: (((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0)) and (UDFToDouble(value) < 200.0)) (type: boolean)
               Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/test/results/clientpositive/auto_join32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join32.q.out b/ql/src/test/results/clientpositive/auto_join32.q.out
index 9b32047..17192cb 100644
--- a/ql/src/test/results/clientpositive/auto_join32.q.out
+++ b/ql/src/test/results/clientpositive/auto_join32.q.out
@@ -412,7 +412,7 @@ STAGE PLANS:
             alias: s
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((p = 'bar') and name is not null) (type: boolean)
+              predicate: (name is not null and (p = 'bar')) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: name (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/48b201ee/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out b/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out
index d40b165..1521a71 100644
--- a/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out
+++ b/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out
@@ -704,7 +704,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 100.0) and value is not null) (type: boolean)
+              predicate: (value is not null and (UDFToDouble(key) > 100.0)) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -889,7 +889,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 100.0) and value is not null) (type: boolean)
+              predicate: (value is not null and (UDFToDouble(key) > 100.0)) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -937,7 +937,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 100.0) and value is not null) (type: boolean)
+              predicate: (value is not null and (UDFToDouble(key) > 100.0)) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)


Mime
View raw message