hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject svn commit: r1674738 [1/11] - in /hive/trunk: ./ common/src/java/org/apache/hadoop/hive/conf/ hbase-handler/ metastore/bin/ ql/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ ql/src/java/org/apache/hadoop/h...
Date Mon, 20 Apr 2015 06:14:39 GMT
Author: hashutosh
Date: Mon Apr 20 06:14:38 2015
New Revision: 1674738

URL: http://svn.apache.org/r1674738
Log:
HIVE-10268 : Merge cbo branch into trunk (Ashutosh Chauhan)

Added:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveConfigContext.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveConfigContext.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelCollation.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelCollation.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelDistribution.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelDistribution.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveAlgorithmsConf.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveAlgorithmsConf.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveAlgorithmsUtil.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveAlgorithmsUtil.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostModel.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostModel.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveDefaultCostModel.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveDefaultCostModel.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveRelMdCost.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveRelMdCost.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortExchange.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSortExchange.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdCollation.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdCollation.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdDistribution.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdDistribution.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdMemory.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdMemory.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdParallelism.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdParallelism.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSize.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSize.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
      - copied unchanged from r1674737, hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
Removed:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostUtil.java
Modified:
    hive/trunk/   (props changed)
    hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/trunk/hbase-handler/pom.xml   (props changed)
    hive/trunk/metastore/bin/.gitignore
    hive/trunk/ql/.gitignore
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RowSchema.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveLimit.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdRowCount.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdUniqueKeys.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java
    hive/trunk/ql/src/test/queries/clientpositive/cbo_join.q
    hive/trunk/ql/src/test/queries/clientpositive/cbo_simple_select.q
    hive/trunk/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
    hive/trunk/ql/src/test/results/clientpositive/cbo_join.q.out
    hive/trunk/ql/src/test/results/clientpositive/cbo_simple_select.q.out
    hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer12.q.out
    hive/trunk/ql/src/test/results/clientpositive/ctas_colname.q.out
    hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_window.q.out
    hive/trunk/ql/src/test/results/clientpositive/groupby_resolution.q.out
    hive/trunk/ql/src/test/results/clientpositive/join32.q.out
    hive/trunk/ql/src/test/results/clientpositive/join32_lessSize.q.out
    hive/trunk/ql/src/test/results/clientpositive/join33.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_alt_syntax.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out
    hive/trunk/ql/src/test/results/clientpositive/ptf.q.out
    hive/trunk/ql/src/test/results/clientpositive/ptf_streaming.q.out
    hive/trunk/ql/src/test/results/clientpositive/quotedid_basic.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join32.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join33.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_cond_pushdown_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_cond_pushdown_4.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ptf.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ptf_streaming.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/subquery_in.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
    hive/trunk/ql/src/test/results/clientpositive/subquery_in.q.out
    hive/trunk/ql/src/test/results/clientpositive/subquery_in_explain_rewrite.q.out
    hive/trunk/ql/src/test/results/clientpositive/subquery_in_having.q.out
    hive/trunk/ql/src/test/results/clientpositive/subquery_notin.q.out
    hive/trunk/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/cbo_join.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/cbo_simple_select.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/ptf.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/ptf_streaming.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/subquery_in.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out
    hive/trunk/ql/src/test/results/clientpositive/union_remove_6_subq.q.out
    hive/trunk/ql/src/test/results/clientpositive/vectorized_ptf.q.out
    hive/trunk/ql/src/test/results/clientpositive/windowing_streaming.q.out

Propchange: hive/trunk/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Apr 20 06:14:38 2015
@@ -1,6 +1,6 @@
 /hive/branches/branch-0.11:1480385,1480458,1481120,1481344,1481346,1481348,1481352,1483872,1505184
 /hive/branches/branch-1.1:1658284,1659437,1659724
-/hive/branches/cbo:1605012-1627125
+/hive/branches/cbo:1605012-1674737
 /hive/branches/spark:1608589-1660298
 /hive/branches/tez:1494760-1622766
 /hive/branches/vectorization:1466908-1527856

Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Mon Apr 20 06:14:38 2015
@@ -703,6 +703,21 @@ public class HiveConf extends Configurat
 
     // CBO related
     HIVE_CBO_ENABLED("hive.cbo.enable", true, "Flag to control enabling Cost Based Optimizations using Calcite framework."),
+    HIVE_CBO_RETPATH_HIVEOP("hive.cbo.returnpath.hiveop", false, "Flag to control calcite plan to hive operator conversion"),
+    HIVE_CBO_EXTENDED_COST_MODEL("hive.cbo.costmodel.extended", false, "Flag to control enabling the extended cost model based on"
+                                 + "CPU, IO and cardinality. Otherwise, the cost model is based on cardinality."),
+    HIVE_CBO_COST_MODEL_CPU("hive.cbo.costmodel.cpu", "0.000001", "Default cost of a comparison"),
+    HIVE_CBO_COST_MODEL_NET("hive.cbo.costmodel.network", "150.0", "Default cost of a transfering a byte over network;"
+                                                                  + " expressed as multiple of CPU cost"),
+    HIVE_CBO_COST_MODEL_LFS_WRITE("hive.cbo.costmodel.local.fs.write", "4.0", "Default cost of writing a byte to local FS;"
+                                                                             + " expressed as multiple of NETWORK cost"),
+    HIVE_CBO_COST_MODEL_LFS_READ("hive.cbo.costmodel.local.fs.read", "4.0", "Default cost of reading a byte from local FS;"
+                                                                           + " expressed as multiple of NETWORK cost"),
+    HIVE_CBO_COST_MODEL_HDFS_WRITE("hive.cbo.costmodel.hdfs.write", "10.0", "Default cost of writing a byte to HDFS;"
+                                                                 + " expressed as multiple of Local FS write cost"),
+    HIVE_CBO_COST_MODEL_HDFS_READ("hive.cbo.costmodel.hdfs.read", "1.5", "Default cost of reading a byte from HDFS;"
+                                                                 + " expressed as multiple of Local FS read cost"),
+
 
     // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row,
     // need to remove by hive .13. Also, do not change default (see SMB operator)

Propchange: hive/trunk/hbase-handler/pom.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Apr 20 06:14:38 2015
@@ -1,5 +1,5 @@
 /hive/branches/branch-0.11/hbase-handler/pom.xml:1480385,1480458,1481120,1481344,1481346,1481348,1481352,1483872,1505184
-/hive/branches/cbo/hbase-handler/pom.xml:1605012-1627125
+/hive/branches/cbo/hbase-handler/pom.xml:1605012-1674737
 /hive/branches/spark/hbase-handler/pom.xml:1608589-1660298
 /hive/branches/tez/hbase-handler/pom.xml:1494760-1622766
 /hive/branches/vectorization/hbase-handler/pom.xml:1466908-1527856

Modified: hive/trunk/metastore/bin/.gitignore
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/bin/.gitignore?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/metastore/bin/.gitignore (original)
+++ hive/trunk/metastore/bin/.gitignore Mon Apr 20 06:14:38 2015
@@ -1 +1,2 @@
-# Dummy file to make Git recognize this empty directory
+/scripts/
+/src/

Modified: hive/trunk/ql/.gitignore
URL: http://svn.apache.org/viewvc/hive/trunk/ql/.gitignore?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/.gitignore (original)
+++ hive/trunk/ql/.gitignore Mon Apr 20 06:14:38 2015
@@ -1 +1,3 @@
 dependency-reduced-pom.xml
+/bin/
+/target/

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java Mon Apr 20 06:14:38 2015
@@ -1536,6 +1536,14 @@ public final class FunctionRegistry {
     return false;
   }
 
+  public static boolean pivotResult(String functionName) throws SemanticException {
+    WindowFunctionInfo windowInfo = getWindowFunctionInfo(functionName);
+    if (windowInfo != null) {
+      return windowInfo.isPivotResult();
+    }
+    return false;
+  }
+
   public static boolean isTableFunction(String functionName)
       throws SemanticException {
     FunctionInfo tFInfo = getFunctionInfo(functionName);

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RowSchema.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RowSchema.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RowSchema.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RowSchema.java Mon Apr 20 06:14:38 2015
@@ -22,6 +22,7 @@ import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Set;
 
 /**
@@ -102,6 +103,14 @@ public class RowSchema implements Serial
     return tableNames;
   }
 
+  public List<String> getColumnNames() {
+    List<String> columnNames = new ArrayList<String>();
+    for (ColumnInfo var : this.signature) {
+      columnNames.add(var.getInternalName());
+    }
+    return columnNames;
+  }
+
   @Override
   public boolean equals(Object obj) {
     if (!(obj instanceof RowSchema) || (obj == null)) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java Mon Apr 20 06:14:38 2015
@@ -531,14 +531,15 @@ public final class ColumnPrunerProcFacto
 
       Operator<? extends OperatorDesc> child = op.getChildOperators().get(0);
 
-      List<String> childCols;
+      List<String> childCols = null;
       if (child instanceof CommonJoinOperator) {
-        childCols = cppCtx.getJoinPrunedColLists().get(child)
-            .get((byte) conf.getTag());
+        childCols = cppCtx.getJoinPrunedColLists().get(child) == null
+                ? null : cppCtx.getJoinPrunedColLists().get(child)
+                        .get((byte) conf.getTag());
       } else {
         childCols = cppCtx.getPrunedColList(child);
-
       }
+
       List<ExprNodeDesc> valCols = conf.getValueCols();
       List<String> valColNames = conf.getOutputValueColumnNames();
 
@@ -749,6 +750,7 @@ public final class ColumnPrunerProcFacto
         conf.setOutputColumnNames(newOutputColumnNames);
         handleChildren(op, cols, cppCtx);
       }
+
       return null;
     }
 
@@ -971,16 +973,16 @@ public final class ColumnPrunerProcFacto
         .getChildOperators();
 
     LOG.info("JOIN " + op.getIdentifier() + " oldExprs: " + conf.getExprs());
+
     List<String> childColLists = cppCtx.genColLists(op);
     if (childColLists == null) {
-        return;
-      }
-
+      return;
+    }
 
-  Map<Byte, List<String>> prunedColLists = new HashMap<Byte, List<String>>();
-  for (byte tag : conf.getTagOrder()) {
-    prunedColLists.put(tag, new ArrayList<String>());
-  }
+    Map<Byte, List<String>> prunedColLists = new HashMap<Byte, List<String>>();
+    for (byte tag : conf.getTagOrder()) {
+      prunedColLists.put(tag, new ArrayList<String>());
+    }
 
     //add the columns in join filters
     Set<Map.Entry<Byte, List<ExprNodeDesc>>> filters =
@@ -1076,6 +1078,7 @@ public final class ColumnPrunerProcFacto
     }
 
     LOG.info("JOIN " + op.getIdentifier() + " newExprs: " + conf.getExprs());
+
     op.setColumnExprMap(newColExprMap);
     conf.setOutputColumnNames(outputCols);
     op.getSchema().setSignature(rs);

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java Mon Apr 20 06:14:38 2015
@@ -242,4 +242,4 @@ public class NonBlockingOpDeDupProc impl
       return null;
     }
   }
-}
+}
\ No newline at end of file

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java Mon Apr 20 06:14:38 2015
@@ -64,10 +64,10 @@ public class Optimizer {
     // Add the transformation that computes the lineage information.
     transformations.add(new Generator());
     if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD)) {
-    transformations.add(new PredicateTransitivePropagate());
-    if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION)) {
-      transformations.add(new ConstantPropagate());
-    }
+      transformations.add(new PredicateTransitivePropagate());
+      if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION)) {
+        transformations.add(new ConstantPropagate());
+      }
       transformations.add(new SyntheticJoinPredicate());
       transformations.add(new PredicatePushDown());
     }
@@ -142,8 +142,11 @@ public class Optimizer {
     if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATION)) {
       transformations.add(new ReduceSinkDeDuplication());
     }
-    transformations.add(new NonBlockingOpDeDupProc());
-    if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEIDENTITYPROJECTREMOVER)) {
+    if(!HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP)) {
+      transformations.add(new NonBlockingOpDeDupProc());
+    }
+    if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEIDENTITYPROJECTREMOVER)
+        && !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP)) {
       transformations.add(new IdentityProjectRemover());
     }
     if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVELIMITOPTENABLE)) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java Mon Apr 20 06:14:38 2015
@@ -28,8 +28,10 @@ import java.util.Set;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.plan.RelOptUtil.InputReferencedVisitor;
 import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Join;
 import org.apache.calcite.rel.core.RelFactories.ProjectFactory;
 import org.apache.calcite.rel.core.Sort;
+import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexCall;
@@ -50,13 +52,17 @@ import org.apache.calcite.sql.validate.S
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.Pair;
 import org.apache.calcite.util.Util;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
+import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ExprNodeConverter;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 
 import com.google.common.base.Function;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMap.Builder;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
 
@@ -319,11 +325,11 @@ public class HiveCalciteUtil {
       return this.mapOfProjIndxInJoinSchemaToLeafPInfo;
     }
 
-    public static JoinPredicateInfo constructJoinPredicateInfo(HiveJoin j) {
+    public static JoinPredicateInfo constructJoinPredicateInfo(Join j) {
       return constructJoinPredicateInfo(j, j.getCondition());
     }
 
-    public static JoinPredicateInfo constructJoinPredicateInfo(HiveJoin j, RexNode predicate) {
+    public static JoinPredicateInfo constructJoinPredicateInfo(Join j, RexNode predicate) {
       JoinPredicateInfo jpi = null;
       JoinLeafPredicateInfo jlpi = null;
       List<JoinLeafPredicateInfo> equiLPIList = new ArrayList<JoinLeafPredicateInfo>();
@@ -432,6 +438,16 @@ public class HiveCalciteUtil {
           .copyOf(projsFromRightPartOfJoinKeysInJoinSchema);
     }
 
+    public List<RexNode> getJoinKeyExprs(int input) {
+      if (input == 0) {
+        return this.joinKeyExprsFromLeft;
+      }
+      if (input == 1) {
+        return this.joinKeyExprsFromRight;
+      }
+      return null;
+    }
+
     public List<RexNode> getJoinKeyExprsFromLeft() {
       return this.joinKeyExprsFromLeft;
     }
@@ -461,7 +477,7 @@ public class HiveCalciteUtil {
       return this.projsFromRightPartOfJoinKeysInJoinSchema;
     }
 
-    private static JoinLeafPredicateInfo constructJoinLeafPredicateInfo(HiveJoin j, RexNode pe) {
+    private static JoinLeafPredicateInfo constructJoinLeafPredicateInfo(Join j, RexNode pe) {
       JoinLeafPredicateInfo jlpi = null;
       List<Integer> filterNulls = new ArrayList<Integer>();
       List<RexNode> joinKeyExprsFromLeft = new ArrayList<RexNode>();
@@ -472,7 +488,7 @@ public class HiveCalciteUtil {
       int rightOffSet = j.getLeft().getRowType().getFieldCount();
 
       // 1. Split leaf join predicate to expressions from left, right
-      RelOptUtil.splitJoinCondition(j.getSystemFieldList(), j.getLeft(), j.getRight(), pe,
+      HiveRelOptUtil.splitJoinCondition(j.getSystemFieldList(), j.getLeft(), j.getRight(), pe,
           joinKeyExprsFromLeft, joinKeyExprsFromRight, filterNulls, null);
 
       // 2. For left expressions, collect child projection indexes used
@@ -561,6 +577,107 @@ public class HiveCalciteUtil {
     return deterministic;
   }
 
+  public static <T> ImmutableMap<Integer, T> getColInfoMap(List<T> hiveCols,
+      int startIndx) {
+    Builder<Integer, T> bldr = ImmutableMap.<Integer, T> builder();
+
+    int indx = startIndx;
+    for (T ci : hiveCols) {
+      bldr.put(indx, ci);
+      indx++;
+    }
+
+    return bldr.build();
+  }
+  
+  public static ImmutableMap<Integer, VirtualColumn> shiftVColsMap(Map<Integer, VirtualColumn> hiveVCols,
+      int shift) {
+    Builder<Integer, VirtualColumn> bldr = ImmutableMap.<Integer, VirtualColumn> builder();
+
+    for (Integer pos : hiveVCols.keySet()) {
+      bldr.put(shift + pos, hiveVCols.get(pos));
+    }
+
+    return bldr.build();
+  }
+
+  public static ImmutableMap<Integer, VirtualColumn> getVColsMap(List<VirtualColumn> hiveVCols,
+      int startIndx) {
+    Builder<Integer, VirtualColumn> bldr = ImmutableMap.<Integer, VirtualColumn> builder();
+
+    int indx = startIndx;
+    for (VirtualColumn vc : hiveVCols) {
+      bldr.put(indx, vc);
+      indx++;
+    }
+
+    return bldr.build();
+  }
+
+  public static ImmutableMap<String, Integer> getColNameIndxMap(List<FieldSchema> tableFields) {
+    Builder<String, Integer> bldr = ImmutableMap.<String, Integer> builder();
+
+    int indx = 0;
+    for (FieldSchema fs : tableFields) {
+      bldr.put(fs.getName(), indx);
+      indx++;
+    }
+
+    return bldr.build();
+  }
+
+  public static ImmutableMap<String, Integer> getRowColNameIndxMap(List<RelDataTypeField> rowFields) {
+    Builder<String, Integer> bldr = ImmutableMap.<String, Integer> builder();
+
+    int indx = 0;
+    for (RelDataTypeField rdt : rowFields) {
+      bldr.put(rdt.getName(), indx);
+      indx++;
+    }
+
+    return bldr.build();
+  }
+
+  public static ImmutableList<RexNode> getInputRef(List<Integer> inputRefs, RelNode inputRel) {
+    ImmutableList.Builder<RexNode> bldr = ImmutableList.<RexNode> builder();
+    for (int i : inputRefs) {
+      bldr.add(new RexInputRef(i, (RelDataType) inputRel.getRowType().getFieldList().get(i).getType()));
+    }
+    return bldr.build();
+  }
+
+  public static ExprNodeDesc getExprNode(Integer inputRefIndx, RelNode inputRel,
+      ExprNodeConverter exprConv) {
+    ExprNodeDesc exprNode = null;
+    RexNode rexInputRef = new RexInputRef(inputRefIndx, (RelDataType) inputRel.getRowType()
+        .getFieldList().get(inputRefIndx).getType());
+    exprNode = rexInputRef.accept(exprConv);
+
+    return exprNode;
+  }
+
+  public static List<ExprNodeDesc> getExprNodes(List<Integer> inputRefs, RelNode inputRel,
+      String inputTabAlias) {
+    List<ExprNodeDesc> exprNodes = new ArrayList<ExprNodeDesc>();
+    List<RexNode> rexInputRefs = getInputRef(inputRefs, inputRel);
+    // TODO: Change ExprNodeConverter to be independent of Partition Expr
+    ExprNodeConverter exprConv = new ExprNodeConverter(inputTabAlias, inputRel.getRowType(), false, inputRel.getCluster().getTypeFactory());
+    for (RexNode iRef : rexInputRefs) {
+      exprNodes.add(iRef.accept(exprConv));
+    }
+    return exprNodes;
+  }
+  
+  public static List<String> getFieldNames(List<Integer> inputRefs, RelNode inputRel) {
+    List<String> fieldNames = new ArrayList<String>();
+    List<String> schemaNames = inputRel.getRowType().getFieldNames();
+    for (Integer iRef : inputRefs) {
+      fieldNames.add(schemaNames.get(iRef));
+    }
+    
+    return fieldNames;
+  }  
+
   /**
    * Walks over an expression and determines whether it is constant.
    */

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java Mon Apr 20 06:14:38 2015
@@ -20,21 +20,62 @@ package org.apache.hadoop.hive.ql.optimi
 import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
 import org.apache.calcite.rel.metadata.DefaultRelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMetadataProvider;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCostModel;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveDefaultCostModel;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveOnTezCostModel;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveRelMdCost;
+import org.apache.hadoop.hive.ql.optimizer.calcite.stats.HiveRelMdCollation;
 import org.apache.hadoop.hive.ql.optimizer.calcite.stats.HiveRelMdDistinctRowCount;
+import org.apache.hadoop.hive.ql.optimizer.calcite.stats.HiveRelMdDistribution;
+import org.apache.hadoop.hive.ql.optimizer.calcite.stats.HiveRelMdMemory;
+import org.apache.hadoop.hive.ql.optimizer.calcite.stats.HiveRelMdParallelism;
 import org.apache.hadoop.hive.ql.optimizer.calcite.stats.HiveRelMdRowCount;
 import org.apache.hadoop.hive.ql.optimizer.calcite.stats.HiveRelMdSelectivity;
+import org.apache.hadoop.hive.ql.optimizer.calcite.stats.HiveRelMdSize;
 import org.apache.hadoop.hive.ql.optimizer.calcite.stats.HiveRelMdUniqueKeys;
 
 import com.google.common.collect.ImmutableList;
 
 public class HiveDefaultRelMetadataProvider {
-  private HiveDefaultRelMetadataProvider() {
+
+  private final HiveConf hiveConf;
+
+
+  public HiveDefaultRelMetadataProvider(HiveConf hiveConf) {
+    this.hiveConf = hiveConf;
+  }
+
+  public RelMetadataProvider getMetadataProvider() {
+
+    // Create cost metadata provider
+    final HiveCostModel cm;
+    if (HiveConf.getVar(this.hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")
+            && HiveConf.getBoolVar(this.hiveConf, HiveConf.ConfVars.HIVE_CBO_EXTENDED_COST_MODEL)) {
+      cm = HiveOnTezCostModel.getCostModel(hiveConf);
+    } else {
+      cm = HiveDefaultCostModel.getCostModel();
+    }
+
+    // Get max split size for HiveRelMdParallelism
+    final Double maxSplitSize = (double) HiveConf.getLongVar(
+            this.hiveConf,
+            HiveConf.ConfVars.MAPREDMAXSPLITSIZE);
+
+    // Return MD provider
+    return ChainedRelMetadataProvider.of(ImmutableList
+            .of(
+                    HiveRelMdDistinctRowCount.SOURCE,
+                    new HiveRelMdCost(cm).getMetadataProvider(),
+                    HiveRelMdSelectivity.SOURCE,
+                    HiveRelMdRowCount.SOURCE,
+                    HiveRelMdUniqueKeys.SOURCE,
+                    HiveRelMdSize.SOURCE,
+                    HiveRelMdMemory.SOURCE,
+                    new HiveRelMdParallelism(maxSplitSize).getMetadataProvider(),
+                    HiveRelMdDistribution.SOURCE,
+                    HiveRelMdCollation.SOURCE,
+                    new DefaultRelMetadataProvider()));
   }
 
-  public static final RelMetadataProvider INSTANCE = ChainedRelMetadataProvider.of(ImmutableList
-                                                       .of(HiveRelMdDistinctRowCount.SOURCE,
-                                                           HiveRelMdSelectivity.SOURCE,
-                                                           HiveRelMdRowCount.SOURCE,
-                                                           HiveRelMdUniqueKeys.SOURCE,
-                                                           new DefaultRelMetadataProvider()));
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java Mon Apr 20 06:14:38 2015
@@ -28,21 +28,31 @@ import java.util.concurrent.atomic.Atomi
 import org.apache.calcite.plan.RelOptAbstractTable;
 import org.apache.calcite.plan.RelOptSchema;
 import org.apache.calcite.plan.RelOptUtil.InputFinder;
+import org.apache.calcite.rel.RelCollation;
+import org.apache.calcite.rel.RelCollationTraitDef;
+import org.apache.calcite.rel.RelDistribution;
+import org.apache.calcite.rel.RelFieldCollation;
+import org.apache.calcite.rel.RelFieldCollation.Direction;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.logical.LogicalTableScan;
 import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ExprNodeConverter;
 import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
+import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
 import org.apache.hadoop.hive.ql.plan.ColStatistics;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -51,15 +61,16 @@ import org.apache.hadoop.hive.ql.stats.S
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableMap.Builder;
+import com.google.common.collect.Lists;
 
 public class RelOptHiveTable extends RelOptAbstractTable {
   private final Table                             hiveTblMetadata;
-  private final String                            tblAlias;
   private final ImmutableList<ColumnInfo>         hiveNonPartitionCols;
+  private final ImmutableList<ColumnInfo>         hivePartitionCols;
   private final ImmutableMap<Integer, ColumnInfo> hiveNonPartitionColsMap;
   private final ImmutableMap<Integer, ColumnInfo> hivePartitionColsMap;
-  private final int                               noOfProjs;
+  private final ImmutableList<VirtualColumn>      hiveVirtualCols;
+  private final int                               noOfNonVirtualCols;
   final HiveConf                                  hiveConf;
 
   private double                                  rowCount        = -1;
@@ -68,36 +79,61 @@ public class RelOptHiveTable extends Rel
   Map<String, PrunedPartitionList>                partitionCache;
   AtomicInteger                                   noColsMissingStats;
 
-  protected static final Log                      LOG               = LogFactory
-                                                                        .getLog(RelOptHiveTable.class
-                                                                            .getName());
-
-  public RelOptHiveTable(RelOptSchema calciteSchema, String qualifiedTblName, String tblAlias, RelDataType rowType,
-      Table hiveTblMetadata, List<ColumnInfo> hiveNonPartitionCols,
-      List<ColumnInfo> hivePartitionCols, HiveConf hconf, Map<String, PrunedPartitionList> partitionCache, AtomicInteger noColsMissingStats) {
+  protected static final Log                      LOG             = LogFactory
+                                                                      .getLog(RelOptHiveTable.class
+                                                                          .getName());
+
+  public RelOptHiveTable(RelOptSchema calciteSchema, String qualifiedTblName,
+      RelDataType rowType, Table hiveTblMetadata, List<ColumnInfo> hiveNonPartitionCols,
+      List<ColumnInfo> hivePartitionCols, List<VirtualColumn> hiveVirtualCols, HiveConf hconf,
+      Map<String, PrunedPartitionList> partitionCache, AtomicInteger noColsMissingStats) {
     super(calciteSchema, qualifiedTblName, rowType);
     this.hiveTblMetadata = hiveTblMetadata;
-    this.tblAlias = tblAlias;
     this.hiveNonPartitionCols = ImmutableList.copyOf(hiveNonPartitionCols);
-    this.hiveNonPartitionColsMap = getColInfoMap(hiveNonPartitionCols, 0);
-    this.hivePartitionColsMap = getColInfoMap(hivePartitionCols, hiveNonPartitionColsMap.size());
-    this.noOfProjs = hiveNonPartitionCols.size() + hivePartitionCols.size();
+    this.hiveNonPartitionColsMap = HiveCalciteUtil.getColInfoMap(hiveNonPartitionCols, 0);
+    this.hivePartitionCols = ImmutableList.copyOf(hivePartitionCols);
+    this.hivePartitionColsMap = HiveCalciteUtil.getColInfoMap(hivePartitionCols, hiveNonPartitionColsMap.size());
+    this.noOfNonVirtualCols = hiveNonPartitionCols.size() + hivePartitionCols.size();
+    this.hiveVirtualCols = ImmutableList.copyOf(hiveVirtualCols);
     this.hiveConf = hconf;
     this.partitionCache = partitionCache;
     this.noColsMissingStats = noColsMissingStats;
   }
 
-  private static ImmutableMap<Integer, ColumnInfo> getColInfoMap(List<ColumnInfo> hiveCols,
-      int startIndx) {
-    Builder<Integer, ColumnInfo> bldr = ImmutableMap.<Integer, ColumnInfo> builder();
-
-    int indx = startIndx;
-    for (ColumnInfo ci : hiveCols) {
-      bldr.put(indx, ci);
-      indx++;
+  public RelOptHiveTable copy(RelDataType newRowType) {
+    // 1. Build map of column name to col index of original schema
+    // Assumption: Hive Table can not contain duplicate column names
+    Map<String, Integer> nameToColIndxMap = new HashMap<String, Integer>();
+    for (RelDataTypeField f : this.rowType.getFieldList()) {
+      nameToColIndxMap.put(f.getName(), f.getIndex());
+    }
+
+    // 2. Build nonPart/Part/Virtual column info for new RowSchema
+    List<ColumnInfo> newHiveNonPartitionCols = new ArrayList<ColumnInfo>();
+    List<ColumnInfo> newHivePartitionCols = new ArrayList<ColumnInfo>();
+    List<VirtualColumn> newHiveVirtualCols = new ArrayList<VirtualColumn>();
+    Map<Integer, VirtualColumn> virtualColInfoMap = HiveCalciteUtil.getVColsMap(this.hiveVirtualCols,
+        this.noOfNonVirtualCols);
+    Integer originalColIndx;
+    ColumnInfo cInfo;
+    VirtualColumn vc;
+    for (RelDataTypeField f : newRowType.getFieldList()) {
+      originalColIndx = nameToColIndxMap.get(f.getName());
+      if ((cInfo = hiveNonPartitionColsMap.get(originalColIndx)) != null) {
+        newHiveNonPartitionCols.add(new ColumnInfo(cInfo));
+      } else if ((cInfo = hivePartitionColsMap.get(originalColIndx)) != null) {
+        newHivePartitionCols.add(new ColumnInfo(cInfo));
+      } else if ((vc = virtualColInfoMap.get(originalColIndx)) != null) {
+        newHiveVirtualCols.add(vc);
+      } else {
+        throw new RuntimeException("Copy encountered a column not seen in original TS");
+      }
     }
 
-    return bldr.build();
+    // 3. Build new Table
+    return new RelOptHiveTable(this.schema, this.name, newRowType,
+        this.hiveTblMetadata, newHiveNonPartitionCols, newHivePartitionCols, newHiveVirtualCols,
+        this.hiveConf, this.partitionCache, this.noColsMissingStats);
   }
 
   @Override
@@ -116,16 +152,57 @@ public class RelOptHiveTable extends Rel
   }
 
   @Override
+  public List<RelCollation> getCollationList() {
+    ImmutableList.Builder<RelFieldCollation> collationList = new ImmutableList.Builder<RelFieldCollation>();
+    for (Order sortColumn : this.hiveTblMetadata.getSortCols()) {
+      for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) {
+        FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i);
+        if (field.getName().equals(sortColumn.getCol())) {
+          Direction direction;
+          if (sortColumn.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) {
+            direction = Direction.ASCENDING;
+          }
+          else {
+            direction = Direction.DESCENDING;
+          }
+          collationList.add(new RelFieldCollation(i,direction));
+          break;
+        }
+      }
+    }
+    return new ImmutableList.Builder<RelCollation>()
+            .add(RelCollationTraitDef.INSTANCE.canonize(
+                    new HiveRelCollation(collationList.build())))
+            .build();
+  }
+
+  @Override
+  public RelDistribution getDistribution() {
+    ImmutableList.Builder<Integer> columnPositions = new ImmutableList.Builder<Integer>();
+    for (String bucketColumn : this.hiveTblMetadata.getBucketCols()) {
+      for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) {
+        FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i);
+        if (field.getName().equals(bucketColumn)) {
+          columnPositions.add(i);
+          break;
+        }
+      }
+    }
+    return new HiveRelDistribution(RelDistribution.Type.HASH_DISTRIBUTED,
+            columnPositions.build());
+  }
+
+  @Override
   public double getRowCount() {
     if (rowCount == -1) {
       if (null == partitionList) {
-        // we are here either unpartitioned table or partitioned table with no predicates
+        // we are here either unpartitioned table or partitioned table with no
+        // predicates
         computePartitionList(hiveConf, null);
       }
       if (hiveTblMetadata.isPartitioned()) {
-        List<Long> rowCounts = StatsUtils.getBasicStatForPartitions(
-            hiveTblMetadata, partitionList.getNotDeniedPartns(),
-            StatsSetupConst.ROW_COUNT);
+        List<Long> rowCounts = StatsUtils.getBasicStatForPartitions(hiveTblMetadata,
+            partitionList.getNotDeniedPartns(), StatsSetupConst.ROW_COUNT);
         rowCount = StatsUtils.getSumIgnoreNegatives(rowCounts);
 
       } else {
@@ -143,19 +220,6 @@ public class RelOptHiveTable extends Rel
     return hiveTblMetadata;
   }
 
-  public String getTableAlias() {
-    // NOTE: Calcite considers tbls to be equal if their names are the same. Hence
-    // we need to provide Calcite the fully qualified table name (dbname.tblname)
-    // and not the user provided aliases.
-    // However in HIVE DB name can not appear in select list; in case of join
-    // where table names differ only in DB name, Hive would require user
-    // introducing explicit aliases for tbl.
-    if (tblAlias == null)
-      return hiveTblMetadata.getTableName();
-    else
-      return tblAlias;
-  }
-
   private String getColNamesForLogging(Set<String> colLst) {
     StringBuffer sb = new StringBuffer();
     boolean firstEntry = true;
@@ -173,22 +237,27 @@ public class RelOptHiveTable extends Rel
   public void computePartitionList(HiveConf conf, RexNode pruneNode) {
 
     try {
-      if (!hiveTblMetadata.isPartitioned() || pruneNode == null || InputFinder.bits(pruneNode).length() == 0 ) {
-        // there is no predicate on partitioning column, we need all partitions in this case.
-        partitionList = PartitionPruner.prune(hiveTblMetadata, null, conf, getName(), partitionCache);
+      if (!hiveTblMetadata.isPartitioned() || pruneNode == null
+          || InputFinder.bits(pruneNode).length() == 0) {
+        // there is no predicate on partitioning column, we need all partitions
+        // in this case.
+        partitionList = PartitionPruner.prune(hiveTblMetadata, null, conf, getName(),
+            partitionCache);
         return;
       }
 
       // We have valid pruning expressions, only retrieve qualifying partitions
-      ExprNodeDesc pruneExpr = pruneNode.accept(new ExprNodeConverter(getName(), getRowType(), true, getRelOptSchema().getTypeFactory()));
+      ExprNodeDesc pruneExpr = pruneNode.accept(new ExprNodeConverter(getName(), getRowType(),
+          true, this.getRelOptSchema().getTypeFactory()));
 
-      partitionList = PartitionPruner.prune(hiveTblMetadata, pruneExpr, conf, getName(), partitionCache);
+      partitionList = PartitionPruner.prune(hiveTblMetadata, pruneExpr, conf, getName(),
+          partitionCache);
     } catch (HiveException he) {
       throw new RuntimeException(he);
     }
   }
 
-  private void updateColStats(Set<Integer> projIndxLst) {
+  private void updateColStats(Set<Integer> projIndxLst, boolean allowNullColumnForMissingStats) {
     List<String> nonPartColNamesThatRqrStats = new ArrayList<String>();
     List<Integer> nonPartColIndxsThatRqrStats = new ArrayList<Integer>();
     List<String> partColNamesThatRqrStats = new ArrayList<String>();
@@ -289,10 +358,10 @@ public class RelOptHiveTable extends Rel
     if (colNamesFailedStats.isEmpty() && !partColNamesThatRqrStats.isEmpty()) {
       ColStatistics cStats = null;
       for (int i = 0; i < partColNamesThatRqrStats.size(); i++) {
-        cStats = new ColStatistics(hiveTblMetadata.getTableName(),
-            partColNamesThatRqrStats.get(i), hivePartitionColsMap.get(
-                partColIndxsThatRqrStats.get(i)).getTypeName());
-        cStats.setCountDistint(getDistinctCount(partitionList.getPartitions(),partColNamesThatRqrStats.get(i)));
+        cStats = new ColStatistics(hiveTblMetadata.getTableName(), partColNamesThatRqrStats.get(i),
+            hivePartitionColsMap.get(partColIndxsThatRqrStats.get(i)).getTypeName());
+        cStats.setCountDistint(getDistinctCount(partitionList.getPartitions(),
+            partColNamesThatRqrStats.get(i)));
         hiveColStatsMap.put(partColIndxsThatRqrStats.get(i), cStats);
       }
     }
@@ -301,9 +370,13 @@ public class RelOptHiveTable extends Rel
     if (!colNamesFailedStats.isEmpty()) {
       String logMsg = "No Stats for " + hiveTblMetadata.getCompleteName() + ", Columns: "
           + getColNamesForLogging(colNamesFailedStats);
-      LOG.error(logMsg);
       noColsMissingStats.getAndAdd(colNamesFailedStats.size());
-      throw new RuntimeException(logMsg);
+      if (allowNullColumnForMissingStats) {
+        LOG.warn(logMsg);
+      } else {
+        LOG.error(logMsg);
+        throw new RuntimeException(logMsg);
+      }
     }
   }
 
@@ -316,32 +389,34 @@ public class RelOptHiveTable extends Rel
   }
 
   public List<ColStatistics> getColStat(List<Integer> projIndxLst) {
-    ImmutableList.Builder<ColStatistics> colStatsBldr = ImmutableList.<ColStatistics> builder();
+    return getColStat(projIndxLst, false);
+  }
+
+  public List<ColStatistics> getColStat(List<Integer> projIndxLst, boolean allowNullColumnForMissingStats) {
+    List<ColStatistics> colStatsBldr = Lists.newArrayList();
 
     if (projIndxLst != null) {
-      updateColStats(new HashSet<Integer>(projIndxLst));
+      updateColStats(new HashSet<Integer>(projIndxLst), allowNullColumnForMissingStats);
       for (Integer i : projIndxLst) {
         colStatsBldr.add(hiveColStatsMap.get(i));
       }
     } else {
       List<Integer> pILst = new ArrayList<Integer>();
-      for (Integer i = 0; i < noOfProjs; i++) {
+      for (Integer i = 0; i < noOfNonVirtualCols; i++) {
         pILst.add(i);
       }
-      updateColStats(new HashSet<Integer>(pILst));
+      updateColStats(new HashSet<Integer>(pILst), allowNullColumnForMissingStats);
       for (Integer pi : pILst) {
         colStatsBldr.add(hiveColStatsMap.get(pi));
       }
     }
 
-    return colStatsBldr.build();
+    return colStatsBldr;
   }
 
   /*
-   * use to check if a set of columns are all partition columns.
-   * true only if:
-   * - all columns in BitSet are partition
-   * columns.
+   * use to check if a set of columns are all partition columns. true only if: -
+   * all columns in BitSet are partition columns.
    */
   public boolean containsPartitionColumnsOnly(ImmutableBitSet cols) {
 
@@ -352,4 +427,28 @@ public class RelOptHiveTable extends Rel
     }
     return true;
   }
+
+  public List<VirtualColumn> getVirtualCols() {
+    return this.hiveVirtualCols;
+  }
+
+  public List<ColumnInfo> getPartColumns() {
+    return this.hivePartitionCols;
+  }
+
+  public List<ColumnInfo> getNonPartColumns() {
+    return this.hiveNonPartitionCols;
+  }
+
+  public int getNoOfNonVirtualCols() {
+    return noOfNonVirtualCols;
+  }
+
+  public Map<Integer, ColumnInfo> getPartColInfoMap() {
+    return hivePartitionColsMap;
+  }
+
+  public Map<Integer, ColumnInfo> getNonPartColInfoMap() {
+    return hiveNonPartitionColsMap;
+  }
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java Mon Apr 20 06:14:38 2015
@@ -90,22 +90,17 @@ public class HiveCost implements RelOptC
     return io;
   }
 
-  // TODO: If two cost is equal, could we do any better than comparing
-  // cardinality (may be some other heuristics to break the tie)
   public boolean isLe(RelOptCost other) {
-    return this == other || this.rowCount <= other.getRows();
-    /*
-     * if (((this.dCpu + this.dIo) < (other.getCpu() + other.getIo())) ||
-     * ((this.dCpu + this.dIo) == (other.getCpu() + other.getIo()) && this.dRows
-     * <= other.getRows())) { return true; } else { return false; }
-     */
+    if ( (this.cpu + this.io < other.getCpu() + other.getIo()) ||
+          ((this.cpu + this.io == other.getCpu() + other.getIo()) &&
+          (this.rowCount <= other.getRows()))) {
+      return true;
+    }
+    return false;
   }
 
   public boolean isLt(RelOptCost other) {
-    return this.rowCount < other.getRows();
-    /*
-     * return isLe(other) && !equals(other);
-     */
+    return isLe(other) && !equals(other);
   }
 
   public double getRows() {
@@ -113,21 +108,14 @@ public class HiveCost implements RelOptC
   }
 
   public boolean equals(RelOptCost other) {
-    return (this == other) || ((this.rowCount) == (other.getRows()));
-
-    /*
-     * //TODO: should we consider cardinality as well? return (this == other) ||
-     * ((this.dCpu + this.dIo) == (other.getCpu() + other.getIo()));
-     */
+    return (this == other) ||
+            ((this.cpu + this.io == other.getCpu() + other.getIo()) &&
+            (this.rowCount == other.getRows()));
   }
 
   public boolean isEqWithEpsilon(RelOptCost other) {
-    return (this == other) || (Math.abs((this.rowCount) - (other.getRows())) < RelOptUtil.EPSILON);
-    // Turn this one once we do the Algorithm selection in CBO
-    /*
-     * return (this == other) || (Math.abs((this.dCpu + this.dIo) -
-     * (other.getCpu() + other.getIo())) < RelOptUtil.EPSILON);
-     */
+    return (this == other) || (Math.abs((this.cpu + this.io) -
+            (other.getCpu() + other.getIo())) < RelOptUtil.EPSILON);
   }
 
   public RelOptCost minus(RelOptCost other) {
@@ -135,8 +123,8 @@ public class HiveCost implements RelOptC
       return this;
     }
 
-    return new HiveCost(this.rowCount - other.getRows(), this.cpu - other.getCpu(), this.io
-        - other.getIo());
+    return new HiveCost(this.rowCount - other.getRows(), this.cpu - other.getCpu(),
+        this.io - other.getIo());
   }
 
   public RelOptCost multiplyBy(double factor) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java Mon Apr 20 06:14:38 2015
@@ -22,6 +22,7 @@ import org.apache.calcite.plan.Conventio
 import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.volcano.VolcanoPlanner;
 import org.apache.calcite.rel.RelCollationTraitDef;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveConfigContext;
 
 /**
  * Refinement of {@link org.apache.calcite.plan.volcano.VolcanoPlanner} for Hive.
@@ -34,12 +35,12 @@ public class HiveVolcanoPlanner extends
   private static final boolean ENABLE_COLLATION_TRAIT = true;
 
   /** Creates a HiveVolcanoPlanner. */
-  public HiveVolcanoPlanner() {
-    super(HiveCost.FACTORY, null);
+  public HiveVolcanoPlanner(HiveConfigContext conf) {
+    super(HiveCost.FACTORY, conf);
   }
 
-  public static RelOptPlanner createPlanner() {
-    final VolcanoPlanner planner = new HiveVolcanoPlanner();
+  public static RelOptPlanner createPlanner(HiveConfigContext conf) {
+    final VolcanoPlanner planner = new HiveVolcanoPlanner(conf);
     planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
     if (ENABLE_COLLATION_TRAIT) {
       planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java Mon Apr 20 06:14:38 2015
@@ -31,7 +31,6 @@ import org.apache.calcite.rel.core.RelFa
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
-import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCost;
 
 import com.google.common.collect.ImmutableList;
 
@@ -39,6 +38,8 @@ public class HiveAggregate extends Aggre
 
   public static final HiveAggRelFactory HIVE_AGGR_REL_FACTORY = new HiveAggRelFactory();
 
+
+
   public HiveAggregate(RelOptCluster cluster, RelTraitSet traitSet, RelNode child,
       boolean indicator, ImmutableBitSet groupSet, List<ImmutableBitSet> groupSets,
       List<AggregateCall> aggCalls) throws InvalidRelException {
@@ -66,7 +67,7 @@ public class HiveAggregate extends Aggre
 
   @Override
   public RelOptCost computeSelfCost(RelOptPlanner planner) {
-    return HiveCost.FACTORY.makeZeroCost();
+    return RelMetadataQuery.getNonCumulativeCost(this);
   }
 
   @Override
@@ -75,6 +76,11 @@ public class HiveAggregate extends Aggre
         .makeLiteral(true));
   }
 
+  public boolean isBucketedInput() {
+    return RelMetadataQuery.distribution(this.getInput()).getKeys().
+            containsAll(groupSet.asList());
+  }
+
   private static class HiveAggRelFactory implements AggregateFactory {
 
     @Override

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java Mon Apr 20 06:14:38 2015
@@ -24,9 +24,9 @@ import org.apache.calcite.plan.RelTraitS
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Filter;
 import org.apache.calcite.rel.core.RelFactories.FilterFactory;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rex.RexNode;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
-import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCost;
 
 public class HiveFilter extends Filter implements HiveRelNode {
 
@@ -48,7 +48,7 @@ public class HiveFilter extends Filter i
 
   @Override
   public RelOptCost computeSelfCost(RelOptPlanner planner) {
-    return HiveCost.FACTORY.makeZeroCost();
+    return RelMetadataQuery.getNonCumulativeCost(this);
   }
 
   /**

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java Mon Apr 20 06:14:38 2015
@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
 
+import java.util.ArrayList;
 import java.util.Collections;
+import java.util.List;
 import java.util.Set;
 
 import org.apache.calcite.plan.RelOptCluster;
@@ -25,7 +27,11 @@ import org.apache.calcite.plan.RelOptCos
 import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.InvalidRelException;
+import org.apache.calcite.rel.RelCollation;
+import org.apache.calcite.rel.RelCollations;
+import org.apache.calcite.rel.RelDistribution;
 import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.RelWriter;
 import org.apache.calcite.rel.core.Join;
 import org.apache.calcite.rel.core.JoinRelType;
 import org.apache.calcite.rel.core.RelFactories.JoinFactory;
@@ -33,38 +39,38 @@ import org.apache.calcite.rel.metadata.R
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.util.ImmutableBitSet;
+import org.apache.calcite.util.ImmutableIntList;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
-import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCost;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCostModel.JoinAlgorithm;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveDefaultCostModel.DefaultJoinAlgorithm;
+
+import com.google.common.collect.ImmutableList;
 
 //TODO: Should we convert MultiJoin to be a child of HiveJoin
 public class HiveJoin extends Join implements HiveRelNode {
-  // NOTE: COMMON_JOIN & SMB_JOIN are Sort Merge Join (in case of COMMON_JOIN
-  // each parallel computation handles multiple splits where as in case of SMB
-  // each parallel computation handles one bucket). MAP_JOIN and BUCKET_JOIN is
-  // hash joins where MAP_JOIN keeps the whole data set of non streaming tables
-  // in memory where as BUCKET_JOIN keeps only the b
-  public enum JoinAlgorithm {
-    NONE, COMMON_JOIN, MAP_JOIN, BUCKET_JOIN, SMB_JOIN
-  }
+  
+  public static final JoinFactory HIVE_JOIN_FACTORY = new HiveJoinFactoryImpl();
 
   public enum MapJoinStreamingRelation {
     NONE, LEFT_RELATION, RIGHT_RELATION
   }
 
-  public static final JoinFactory HIVE_JOIN_FACTORY = new HiveJoinFactoryImpl();
-
   private final boolean leftSemiJoin;
-  private final JoinAlgorithm      joinAlgorithm;
-  //This will be used once we do Join Algorithm selection
-  @SuppressWarnings("unused")
-  private final MapJoinStreamingRelation mapJoinStreamingSide = MapJoinStreamingRelation.NONE;
+  private final JoinPredicateInfo joinPredInfo;
+  private JoinAlgorithm joinAlgorithm;
+  private RelOptCost joinCost;
+
 
   public static HiveJoin getJoin(RelOptCluster cluster, RelNode left, RelNode right,
       RexNode condition, JoinRelType joinType, boolean leftSemiJoin) {
     try {
       Set<String> variablesStopped = Collections.emptySet();
-      return new HiveJoin(cluster, null, left, right, condition, joinType, variablesStopped,
-          JoinAlgorithm.NONE, null, leftSemiJoin);
+      HiveJoin join = new HiveJoin(cluster, null, left, right, condition, joinType, variablesStopped,
+              DefaultJoinAlgorithm.INSTANCE, leftSemiJoin);
+      return join;
     } catch (InvalidRelException e) {
       throw new RuntimeException(e);
     }
@@ -72,10 +78,10 @@ public class HiveJoin extends Join imple
 
   protected HiveJoin(RelOptCluster cluster, RelTraitSet traits, RelNode left, RelNode right,
       RexNode condition, JoinRelType joinType, Set<String> variablesStopped,
-      JoinAlgorithm joinAlgo, MapJoinStreamingRelation streamingSideForMapJoin, boolean leftSemiJoin)
-      throws InvalidRelException {
+      JoinAlgorithm joinAlgo, boolean leftSemiJoin) throws InvalidRelException {
     super(cluster, TraitsUtil.getDefaultTraitSet(cluster), left, right, condition, joinType,
         variablesStopped);
+    this.joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(this);
     this.joinAlgorithm = joinAlgo;
     this.leftSemiJoin = leftSemiJoin;
   }
@@ -90,7 +96,7 @@ public class HiveJoin extends Join imple
     try {
       Set<String> variablesStopped = Collections.emptySet();
       return new HiveJoin(getCluster(), traitSet, left, right, conditionExpr, joinType,
-          variablesStopped, JoinAlgorithm.NONE, null, leftSemiJoin);
+          variablesStopped, joinAlgorithm, leftSemiJoin);
     } catch (InvalidRelException e) {
       // Semantic error not possible. Must be a bug. Convert to
       // internal error.
@@ -98,8 +104,97 @@ public class HiveJoin extends Join imple
     }
   }
 
+  public JoinPredicateInfo getJoinPredicateInfo() {
+    return joinPredInfo;
+  }
+
+  public void setJoinAlgorithm(JoinAlgorithm joinAlgorithm) {
+    this.joinAlgorithm = joinAlgorithm;
+  }
+
   public JoinAlgorithm getJoinAlgorithm() {
-    return joinAlgorithm;
+    return this.joinAlgorithm;
+  }
+
+  public ImmutableList<RelCollation> getCollation() {
+    return joinAlgorithm.getCollation(this);
+  }
+
+  public RelDistribution getDistribution() {
+    return joinAlgorithm.getDistribution(this);
+  }
+
+  public Double getMemory() {
+    return joinAlgorithm.getMemory(this);
+  }
+
+  public Double getCumulativeMemoryWithinPhaseSplit() {
+    return joinAlgorithm.getCumulativeMemoryWithinPhaseSplit(this);
+  }
+
+  public Boolean isPhaseTransition() {
+    return joinAlgorithm.isPhaseTransition(this);
+  }
+
+  public Integer getSplitCount() {
+    return joinAlgorithm.getSplitCount(this);
+  }
+
+  public MapJoinStreamingRelation getStreamingSide() {
+    Double leftInputSize = RelMetadataQuery.memory(left);
+    Double rightInputSize = RelMetadataQuery.memory(right);
+    if (leftInputSize == null && rightInputSize == null) {
+      return MapJoinStreamingRelation.NONE;
+    } else if (leftInputSize != null &&
+            (rightInputSize == null ||
+            (leftInputSize < rightInputSize))) {
+      return MapJoinStreamingRelation.RIGHT_RELATION;
+    } else if (rightInputSize != null &&
+            (leftInputSize == null ||
+            (rightInputSize <= leftInputSize))) {
+      return MapJoinStreamingRelation.LEFT_RELATION;
+    }
+    return MapJoinStreamingRelation.NONE;
+  }
+
+  public RelNode getStreamingInput() {
+    MapJoinStreamingRelation mapJoinStreamingSide = getStreamingSide();
+    RelNode smallInput;
+    if (mapJoinStreamingSide == MapJoinStreamingRelation.LEFT_RELATION) {
+      smallInput = this.getRight();
+    } else if (mapJoinStreamingSide == MapJoinStreamingRelation.RIGHT_RELATION) {
+      smallInput = this.getLeft();
+    } else {
+      smallInput = null;
+    }
+    return smallInput;
+  }
+
+  public ImmutableBitSet getSortedInputs() {
+    ImmutableBitSet.Builder sortedInputsBuilder = new ImmutableBitSet.Builder();
+    JoinPredicateInfo joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.
+            constructJoinPredicateInfo(this);
+    List<ImmutableIntList> joinKeysInChildren = new ArrayList<ImmutableIntList>();
+    joinKeysInChildren.add(
+            ImmutableIntList.copyOf(
+                    joinPredInfo.getProjsFromLeftPartOfJoinKeysInChildSchema()));
+    joinKeysInChildren.add(
+            ImmutableIntList.copyOf(
+                    joinPredInfo.getProjsFromRightPartOfJoinKeysInChildSchema()));
+
+    for (int i=0; i<this.getInputs().size(); i++) {
+      boolean correctOrderFound = RelCollations.contains(
+              RelMetadataQuery.collations(this.getInputs().get(i)),
+              joinKeysInChildren.get(i));
+      if (correctOrderFound) {
+        sortedInputsBuilder.set(i);
+      }
+    }
+    return sortedInputsBuilder.build();
+  }
+
+  public void setJoinCost(RelOptCost joinCost) {
+    this.joinCost = joinCost;
   }
 
   public boolean isLeftSemiJoin() {
@@ -111,9 +206,16 @@ public class HiveJoin extends Join imple
    */
   @Override
   public RelOptCost computeSelfCost(RelOptPlanner planner) {
-    double leftRCount = RelMetadataQuery.getRowCount(getLeft());
-    double rightRCount = RelMetadataQuery.getRowCount(getRight());
-    return HiveCost.FACTORY.makeCost(leftRCount + rightRCount, 0.0, 0.0);
+    return RelMetadataQuery.getNonCumulativeCost(this);
+  }
+
+  @Override
+  public RelWriter explainTerms(RelWriter pw) {
+    return super.explainTerms(pw)
+        .item("algorithm", joinAlgorithm == null ?
+                "none" : joinAlgorithm)
+        .item("cost", joinCost == null ?
+                "not available" : joinCost);
   }
 
   /**
@@ -154,4 +256,5 @@ public class HiveJoin extends Join imple
       return getJoin(left.getCluster(), left, right, condition, joinType, false);
     }
   }
+
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveLimit.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveLimit.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveLimit.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveLimit.java Mon Apr 20 06:14:38 2015
@@ -25,9 +25,9 @@ import org.apache.calcite.plan.RelOptPla
 import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.SingleRel;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rex.RexNode;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
-import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCost;
 
 public class HiveLimit extends SingleRel implements HiveRelNode {
   private final RexNode offset;
@@ -52,6 +52,6 @@ public class HiveLimit extends SingleRel
 
   @Override
   public RelOptCost computeSelfCost(RelOptPlanner planner) {
-    return HiveCost.FACTORY.makeZeroCost();
+    return RelMetadataQuery.getNonCumulativeCost(this);
   }
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java Mon Apr 20 06:14:38 2015
@@ -29,6 +29,7 @@ import org.apache.calcite.rel.RelCollati
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Project;
 import org.apache.calcite.rel.core.RelFactories.ProjectFactory;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexBuilder;
@@ -42,7 +43,6 @@ import org.apache.hadoop.hive.ql.optimiz
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException.UnsupportedFeature;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCost;
-
 import com.google.common.collect.ImmutableList;
 
 public class HiveProject extends Project implements HiveRelNode {
@@ -172,7 +172,7 @@ public class HiveProject extends Project
 
   @Override
   public RelOptCost computeSelfCost(RelOptPlanner planner) {
-    return HiveCost.FACTORY.makeZeroCost();
+    return RelMetadataQuery.getNonCumulativeCost(this);
   }
 
   @Override

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java Mon Apr 20 06:14:38 2015
@@ -17,20 +17,34 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
 
+import java.util.ArrayList;
+import java.util.LinkedList;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptCost;
 import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.RelWriter;
+import org.apache.calcite.rel.core.RelFactories;
 import org.apache.calcite.rel.core.TableScan;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.util.ImmutableBitSet;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
-import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCost;
 import org.apache.hadoop.hive.ql.plan.ColStatistics;
 
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableList.Builder;
+
 
 /**
  * Relational expression representing a scan of a HiveDB collection.
@@ -42,6 +56,20 @@ import org.apache.hadoop.hive.ql.plan.Co
  */
 public class HiveTableScan extends TableScan implements HiveRelNode {
 
+  private final RelDataType hiveTableScanRowType;
+  private final ImmutableList<Integer> neededColIndxsFrmReloptHT;
+  private final String tblAlias;
+  private final String concatQbIDAlias;
+  private final boolean useQBIdInDigest;
+
+  public String getTableAlias() {
+    return tblAlias;
+  }
+
+  public String getConcatQbIDAlias() {
+    return concatQbIDAlias;
+  }
+
   /**
    * Creates a HiveTableScan.
    *
@@ -55,9 +83,19 @@ public class HiveTableScan extends Table
    *          HiveDB table
    */
   public HiveTableScan(RelOptCluster cluster, RelTraitSet traitSet, RelOptHiveTable table,
-      RelDataType rowtype) {
+      String alias, String concatQbIDAlias, boolean useQBIdInDigest) {
+    this(cluster, traitSet, table, alias, concatQbIDAlias, table.getRowType(), useQBIdInDigest);
+  }
+
+  private HiveTableScan(RelOptCluster cluster, RelTraitSet traitSet, RelOptHiveTable table,
+      String alias, String concatQbIDAlias, RelDataType newRowtype, boolean useQBIdInDigest) {
     super(cluster, TraitsUtil.getDefaultTraitSet(cluster), table);
     assert getConvention() == HiveRelNode.CONVENTION;
+    this.tblAlias = alias;
+    this.concatQbIDAlias = concatQbIDAlias;
+    this.hiveTableScanRowType = newRowtype;
+    this.neededColIndxsFrmReloptHT = buildNeededColIndxsFrmReloptHT(table.getRowType(), newRowtype);
+    this.useQBIdInDigest = useQBIdInDigest;
   }
 
   @Override
@@ -66,9 +104,31 @@ public class HiveTableScan extends Table
     return this;
   }
 
+  /**
+   * Copy TableScan operator with a new Row Schema. The new Row Schema can only
+   * be a subset of this TS schema.
+   *
+   * @param newRowtype
+   * @return
+   */
+  public HiveTableScan copy(RelDataType newRowtype) {
+    return new HiveTableScan(getCluster(), getTraitSet(), ((RelOptHiveTable) table), this.tblAlias, this.concatQbIDAlias,
+            newRowtype, this.useQBIdInDigest);
+  }
+
   @Override
   public RelOptCost computeSelfCost(RelOptPlanner planner) {
-    return HiveCost.FACTORY.makeZeroCost();
+    return RelMetadataQuery.getNonCumulativeCost(this);
+  }
+
+  @Override public RelWriter explainTerms(RelWriter pw) {
+    if (this.useQBIdInDigest) {
+      // TODO: Only the qualified name should be left here
+      return super.explainTerms(pw)
+          .item("qbid:alias", concatQbIDAlias);
+    } else {
+      return super.explainTerms(pw);
+    }
   }
 
   @Override
@@ -89,4 +149,62 @@ public class HiveTableScan extends Table
   public List<ColStatistics> getColStat(List<Integer> projIndxLst) {
     return ((RelOptHiveTable) table).getColStat(projIndxLst);
   }
-}
\ No newline at end of file
+
+  @Override
+  public RelNode project(ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields,
+      RelFactories.ProjectFactory projectFactory) {
+
+    // 1. If the schema is the same then bail out
+    final int fieldCount = getRowType().getFieldCount();
+    if (fieldsUsed.equals(ImmutableBitSet.range(fieldCount)) && extraFields.isEmpty()) {
+      return this;
+    }
+
+    // 2. Make sure there is no dynamic addition of virtual cols
+    if (extraFields != null && !extraFields.isEmpty()) {
+      throw new RuntimeException("Hive TS does not support adding virtual columns dynamically");
+    }
+
+    // 3. Create new TS schema that is a subset of original
+    final List<RelDataTypeField> fields = getRowType().getFieldList();
+    List<RelDataType> fieldTypes = new LinkedList<RelDataType>();
+    List<String> fieldNames = new LinkedList<String>();
+    List<RexNode> exprList = new ArrayList<RexNode>();
+    RexBuilder rexBuilder = getCluster().getRexBuilder();
+    for (int i : fieldsUsed) {
+      RelDataTypeField field = fields.get(i);
+      fieldTypes.add(field.getType());
+      fieldNames.add(field.getName());
+      exprList.add(rexBuilder.makeInputRef(this, i));
+    }
+
+    // 4. Build new TS
+    HiveTableScan newHT = copy(getCluster().getTypeFactory().createStructType(fieldTypes,
+        fieldNames));
+
+    // 5. Add Proj on top of TS
+    return projectFactory.createProject(newHT, exprList, new ArrayList<String>(fieldNames));
+  }
+
+  public List<Integer> getNeededColIndxsFrmReloptHT() {
+    return neededColIndxsFrmReloptHT;
+  }
+
+  public RelDataType getPrunedRowType() {
+    return hiveTableScanRowType;
+  }
+
+  private static ImmutableList<Integer> buildNeededColIndxsFrmReloptHT(RelDataType htRowtype,
+      RelDataType scanRowType) {
+    Builder<Integer> neededColIndxsFrmReloptHTBldr = new ImmutableList.Builder<Integer>();
+    Map<String, Integer> colNameToPosInReloptHT = HiveCalciteUtil.getRowColNameIndxMap(htRowtype
+        .getFieldList());
+    List<String> colNamesInScanRowType = scanRowType.getFieldNames();
+
+    for (int i = 0; i < colNamesInScanRowType.size(); i++) {
+      neededColIndxsFrmReloptHTBldr.add(colNameToPosInReloptHT.get(colNamesInScanRowType.get(i)));
+    }
+
+    return neededColIndxsFrmReloptHTBldr.build();
+  }
+}

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdRowCount.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdRowCount.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdRowCount.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdRowCount.java Mon Apr 20 06:14:38 2015
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hive.ql.optimizer.calcite.stats;
 
 import java.util.ArrayList;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdUniqueKeys.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdUniqueKeys.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdUniqueKeys.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdUniqueKeys.java Mon Apr 20 06:14:38 2015
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hive.ql.optimizer.calcite.stats;
 
 import java.util.BitSet;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java Mon Apr 20 06:14:38 2015
@@ -30,6 +30,7 @@ import org.apache.calcite.sql.type.SqlTy
 import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
 import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
 import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
@@ -68,7 +69,7 @@ class ASTBuilder {
     // However in HIVE DB name can not appear in select list; in case of join
     // where table names differ only in DB name, Hive would require user
     // introducing explicit aliases for tbl.
-    b.add(HiveParser.Identifier, hTbl.getTableAlias());
+    b.add(HiveParser.Identifier, ((HiveTableScan)scan).getTableAlias());
     return b.node();
   }
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java?rev=1674738&r1=1674737&r2=1674738&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java Mon Apr 20 06:14:38 2015
@@ -54,11 +54,13 @@ import org.apache.calcite.util.Immutable
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.exec.RowSchema;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveGroupingID;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter.HiveToken;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
@@ -69,8 +71,8 @@ import com.google.common.collect.Iterabl
 public class ASTConverter {
   private static final Log LOG = LogFactory.getLog(ASTConverter.class);
 
-  private RelNode          root;
-  private HiveAST          hiveAST;
+  private final RelNode          root;
+  private final HiveAST          hiveAST;
   private RelNode          from;
   private Filter           where;
   private Aggregate        groupBy;
@@ -213,7 +215,7 @@ public class ASTConverter {
 
   private void convertLimitToASTNode(HiveSort limit) {
     if (limit != null) {
-      HiveSort hiveLimit = (HiveSort) limit;
+      HiveSort hiveLimit = limit;
       RexNode limitExpr = hiveLimit.getFetchExpr();
       if (limitExpr != null) {
         Object val = ((RexLiteral) limitExpr).getValue2();
@@ -224,12 +226,12 @@ public class ASTConverter {
 
   private void convertOBToASTNode(HiveSort order) {
     if (order != null) {
-      HiveSort hiveSort = (HiveSort) order;
+      HiveSort hiveSort = order;
       if (!hiveSort.getCollation().getFieldCollations().isEmpty()) {
         // 1 Add order by token
         ASTNode orderAst = ASTBuilder.createAST(HiveParser.TOK_ORDERBY, "TOK_ORDERBY");
 
-        schema = new Schema((HiveSort) hiveSort);
+        schema = new Schema(hiveSort);
         Map<Integer, RexNode> obRefToCallMap = hiveSort.getInputRefToCallMap();
         RexNode obExpr;
         ASTNode astCol;
@@ -370,7 +372,7 @@ public class ASTConverter {
   static class RexVisitor extends RexVisitorImpl<ASTNode> {
 
     private final Schema schema;
-    private boolean useTypeQualInLiteral;
+    private final boolean useTypeQualInLiteral;
 
     protected RexVisitor(Schema schema) {
       this(schema, false);
@@ -567,7 +569,7 @@ public class ASTConverter {
     private static final long serialVersionUID = 1L;
 
     Schema(TableScan scan) {
-      String tabName = ((RelOptHiveTable) scan.getTable()).getTableAlias();
+      String tabName = ((HiveTableScan) scan).getTableAlias();
       for (RelDataTypeField field : scan.getRowType().getFieldList()) {
         add(new ColumnInfo(tabName, field.getName()));
       }
@@ -630,7 +632,7 @@ public class ASTConverter {
      * 1. Project will always be child of Sort.<br>
      * 2. In Calcite every projection in Project is uniquely named
      * (unambigous) without using table qualifier (table name).<br>
-     * 
+     *
      * @param order
      *          Hive Sort Node
      * @return Schema
@@ -641,6 +643,12 @@ public class ASTConverter {
         add(new ColumnInfo(null, projName));
       }
     }
+
+    public Schema(String tabAlias, List<RelDataTypeField> fieldList) {
+      for (RelDataTypeField field : fieldList) {
+        add(new ColumnInfo(tabAlias, field.getName()));
+      }
+    }
   }
 
   /*



Mime
View raw message