hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sze...@apache.org
Subject svn commit: r1649740 [3/10] - in /hive/branches/spark: ./ beeline/src/java/org/apache/hive/beeline/ common/src/java/org/apache/hadoop/hive/conf/ contrib/src/test/results/clientpositive/ data/files/ itests/util/src/main/java/org/apache/hadoop/hive/ql/ho...
Date Tue, 06 Jan 2015 06:58:44 GMT
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Tue Jan  6 06:58:42 2015
@@ -22,95 +22,26 @@ import static org.apache.hadoop.hive.con
 
 import java.io.IOException;
 import java.io.Serializable;
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.math.BigDecimal;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.UUID;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.regex.Pattern;
 import java.util.regex.PatternSyntaxException;
 
 import org.antlr.runtime.ClassicToken;
 import org.antlr.runtime.Token;
 import org.antlr.runtime.tree.Tree;
-import org.antlr.runtime.tree.TreeVisitor;
-import org.antlr.runtime.tree.TreeVisitorAction;
 import org.antlr.runtime.tree.TreeWizard;
 import org.antlr.runtime.tree.TreeWizard.ContextVisitor;
-import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelOptPlanner;
-import org.apache.calcite.plan.RelOptQuery;
-import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.plan.RelOptSchema;
-import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.plan.RelTraitSet;
-import org.apache.calcite.plan.hep.HepMatchOrder;
-import org.apache.calcite.plan.hep.HepPlanner;
-import org.apache.calcite.plan.hep.HepProgram;
-import org.apache.calcite.plan.hep.HepProgramBuilder;
-import org.apache.calcite.rel.InvalidRelException;
-import org.apache.calcite.rel.RelCollation;
-import org.apache.calcite.rel.RelCollationImpl;
-import org.apache.calcite.rel.RelFieldCollation;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.Aggregate;
-import org.apache.calcite.rel.core.AggregateCall;
-import org.apache.calcite.rel.core.Filter;
-import org.apache.calcite.rel.core.Join;
-import org.apache.calcite.rel.core.JoinRelType;
-import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.rel.core.SemiJoin;
-import org.apache.calcite.rel.metadata.CachingRelMetadataProvider;
-import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
-import org.apache.calcite.rel.metadata.RelMetadataProvider;
-import org.apache.calcite.rel.rules.FilterAggregateTransposeRule;
-import org.apache.calcite.rel.rules.FilterMergeRule;
-import org.apache.calcite.rel.rules.FilterProjectTransposeRule;
-import org.apache.calcite.rel.rules.FilterSetOpTransposeRule;
-import org.apache.calcite.rel.rules.JoinPushTransitivePredicatesRule;
-import org.apache.calcite.rel.rules.JoinToMultiJoinRule;
-import org.apache.calcite.rel.rules.LoptOptimizeJoinRule;
-import org.apache.calcite.rel.rules.SemiJoinFilterTransposeRule;
-import org.apache.calcite.rel.rules.SemiJoinJoinTransposeRule;
-import org.apache.calcite.rel.rules.SemiJoinProjectTransposeRule;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeFactory;
-import org.apache.calcite.rel.type.RelDataTypeField;
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexFieldCollation;
-import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.rex.RexWindowBound;
-import org.apache.calcite.schema.SchemaPlus;
-import org.apache.calcite.sql.SqlAggFunction;
-import org.apache.calcite.sql.SqlCall;
-import org.apache.calcite.sql.SqlExplainLevel;
-import org.apache.calcite.sql.SqlKind;
-import org.apache.calcite.sql.SqlLiteral;
-import org.apache.calcite.sql.SqlNode;
-import org.apache.calcite.sql.SqlWindow;
-import org.apache.calcite.sql.parser.SqlParserPos;
-import org.apache.calcite.sql.type.SqlTypeName;
-import org.apache.calcite.sql2rel.RelFieldTrimmer;
-import org.apache.calcite.tools.Frameworks;
-import org.apache.calcite.util.CompositeList;
-import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.calcite.util.ImmutableIntList;
-import org.apache.calcite.util.Pair;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -179,32 +110,9 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.optimizer.Optimizer;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveDefaultRelMetadataProvider;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveTypeSystemImpl;
-import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
-import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
-import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveVolcanoPlanner;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion;
-import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterJoinRule;
-import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterProjectTransposeRule;
-import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterSetOpTransposeRule;
-import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePartitionPruneRule;
-import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter;
-import org.apache.hadoop.hive.ql.optimizer.calcite.translator.JoinCondTypeCheckProcFactory;
-import org.apache.hadoop.hive.ql.optimizer.calcite.translator.JoinTypeCheckCtx;
-import org.apache.hadoop.hive.ql.optimizer.calcite.translator.RexNodeConverter;
-import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter;
-import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter;
 import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec.SpecType;
+import org.apache.hadoop.hive.ql.parse.CalcitePlanner.ASTSearcher;
 import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression;
 import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec;
 import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PTFInputSpec;
@@ -288,7 +196,6 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
@@ -297,12 +204,6 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.OutputFormat;
 
-import com.google.common.base.Function;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableList.Builder;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
-
 /**
  * Implementation of the semantic analyzer. It generates the query plan.
  * There are other specific semantic analyzers for some hive operations such as
@@ -369,8 +270,7 @@ public class SemanticAnalyzer extends Ba
   //flag for partial scan during analyze ... compute statistics
   protected boolean partialscan;
 
-  private volatile boolean runCBO = true; // TODO: why is this volatile?
-  private volatile boolean disableJoinMerge = false;
+  protected volatile boolean disableJoinMerge = false;
 
   /*
    * Capture the CTE definitions in a Query.
@@ -382,18 +282,13 @@ public class SemanticAnalyzer extends Ba
   private ArrayList<String> ctesExpanded;
 
   /** Not thread-safe. */
-  private final ASTSearcher astSearcher = new ASTSearcher();
+  final ASTSearcher astSearcher = new ASTSearcher();
 
-  private static class Phase1Ctx {
+  static class Phase1Ctx {
     String dest;
     int nextNum;
   }
 
-  protected SemanticAnalyzer(HiveConf conf, boolean runCBO) throws SemanticException {
-    this(conf);
-    this.runCBO = runCBO;
-  }
-
   public SemanticAnalyzer(HiveConf conf) throws SemanticException {
     super(conf);
     opToPartPruner = new HashMap<TableScanOperator, ExprNodeDesc>();
@@ -408,7 +303,8 @@ public class SemanticAnalyzer extends Ba
     opParseCtx = new LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext>();
     joinContext = new HashMap<JoinOperator, QBJoinTree>();
     smbMapJoinContext = new HashMap<SMBMapJoinOperator, QBJoinTree>();
-    topToTable = new HashMap<TableScanOperator, Table>();
+    // Must be deterministic order map for consistent q-test output across Java versions
+    topToTable = new LinkedHashMap<TableScanOperator, Table>();
     fsopToTable = new HashMap<FileSinkOperator, Table>();
     reduceSinkOperatorsAddedByEnforceBucketingSorting = new ArrayList<ReduceSinkOperator>();
     topToTableProps = new HashMap<TableScanOperator, Map<String, String>>();
@@ -674,6 +570,14 @@ public class SemanticAnalyzer extends Ba
     return sb.toString();
   }
 
+  ASTNode getAST() {
+    return this.ast;
+  }
+
+  protected void setAST(ASTNode newAST) {
+    this.ast = newAST;
+  }
+
   /**
    * Goes though the tabref tree and finds the alias for the table. Once found,
    * it records the table name-> alias association in aliasToTabs. It also makes
@@ -805,6 +709,10 @@ public class SemanticAnalyzer extends Ba
     return alias;
   }
 
+  Map<String, SplitSample> getNameToSplitSampleMap() {
+    return this.nameToSplitSample;
+  }
+
   // Generate a temp table out of a value clause
   private ASTNode genValuesTempTable(ASTNode originalFrom) throws SemanticException {
     // Pick a name for the table
@@ -1074,7 +982,7 @@ public class SemanticAnalyzer extends Ba
     qb.rewriteCTEToSubq(cteAlias, cteName, cteQBExpr);
   }
 
-  private boolean isJoinToken(ASTNode node) {
+  static boolean isJoinToken(ASTNode node) {
     if ((node.getToken().getType() == HiveParser.TOK_JOIN)
         || (node.getToken().getType() == HiveParser.TOK_CROSSJOIN)
         || isOuterJoinToken(node)
@@ -1086,7 +994,7 @@ public class SemanticAnalyzer extends Ba
     return false;
   }
 
-  private boolean isOuterJoinToken(ASTNode node) {
+  static private boolean isOuterJoinToken(ASTNode node) {
     return (node.getToken().getType() == HiveParser.TOK_LEFTOUTERJOIN)
       || (node.getToken().getType() == HiveParser.TOK_RIGHTOUTERJOIN)
       || (node.getToken().getType() == HiveParser.TOK_FULLOUTERJOIN);
@@ -1182,30 +1090,6 @@ public class SemanticAnalyzer extends Ba
     return alias;
   }
 
-  /** The context that doPhase1 uses to populate information pertaining
-   *  to CBO (currently, this is used for CTAS and insert-as-select). */
-  private static class PreCboCtx {
-    enum Type {
-      NONE,
-      INSERT,
-      CTAS,
-
-      UNEXPECTED
-    }
-    public ASTNode nodeOfInterest;
-    public Type type = Type.NONE;
-    public void set(Type type, ASTNode ast) {
-      if (this.type != Type.NONE) {
-        STATIC_LOG.warn("Setting " + type + " when already " + this.type
-            + "; node " + ast.dump() + " vs old node " + nodeOfInterest.dump());
-        this.type = Type.UNEXPECTED;
-        return;
-      }
-      this.type = type;
-      this.nodeOfInterest = ast;
-    }
-  }
-
   /**
    * Phase 1: (including, but not limited to):
    *
@@ -1223,7 +1107,7 @@ public class SemanticAnalyzer extends Ba
    * @throws SemanticException
    */
   @SuppressWarnings({"fallthrough", "nls"})
-  public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1, PreCboCtx cboCtx)
+  public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1, PlannerContext plannerCtx)
       throws SemanticException {
 
     boolean phase1Result = true;
@@ -1285,9 +1169,11 @@ public class SemanticAnalyzer extends Ba
         if (qbp.getIsSubQ() && !isTmpFileDest) {
           throw new SemanticException(ErrorMsg.NO_INSERT_INSUBQUERY.getMsg(ast));
         }
-        if (cboCtx != null && !isTmpFileDest) {
-          cboCtx.set(PreCboCtx.Type.INSERT, ast);
+
+        if (plannerCtx != null) {
+          plannerCtx.setInsertToken(ast, isTmpFileDest);
         }
+
         qbp.setDestForClause(ctx_1.dest, (ASTNode) ast.getChild(0));
 
         if (qbp.getClauseNamesForDest().size() > 1) {
@@ -1516,17 +1402,12 @@ public class SemanticAnalyzer extends Ba
       for (int child_pos = 0; child_pos < child_count && phase1Result; ++child_pos) {
         // Recurse
         phase1Result = phase1Result && doPhase1(
-            (ASTNode)ast.getChild(child_pos), qb, ctx_1, cboCtx);
+            (ASTNode)ast.getChild(child_pos), qb, ctx_1, plannerCtx);
       }
     }
     return phase1Result;
   }
 
-  private void traceLogAst(ASTNode ast, String what) {
-    if (!LOG.isTraceEnabled()) return;
-    LOG.trace(what + ast.dump());
-  }
-
   private void getMetaData(QBExpr qbexpr, ReadEntity parentInput)
       throws SemanticException {
     if (qbexpr.getOpcode() == QBExpr.Opcode.NULLOP) {
@@ -2761,7 +2642,7 @@ public class SemanticAnalyzer extends Ba
 
   @SuppressWarnings("nls")
   // TODO: make aliases unique, otherwise needless rewriting takes place
-  private Integer genColListRegex(String colRegex, String tabAlias, ASTNode sel,
+ Integer genColListRegex(String colRegex, String tabAlias, ASTNode sel,
     ArrayList<ExprNodeDesc> col_list, HashSet<ColumnInfo> excludeCols, RowResolver input,
     RowResolver colSrcRR, Integer pos, RowResolver output, List<String> aliases,
     boolean ensureUniqueCols) throws SemanticException {
@@ -2907,7 +2788,7 @@ public class SemanticAnalyzer extends Ba
     return (end == -1) ? "" : cmd.substring(end, cmd.length());
   }
 
-  private static int getPositionFromInternalName(String internalName) {
+  static int getPositionFromInternalName(String internalName) {
     return HiveConf.getPositionFromInternalName(internalName);
   }
 
@@ -3365,7 +3246,7 @@ public class SemanticAnalyzer extends Ba
     }
   }
 
-  private static String[] getColAlias(ASTNode selExpr, String defaultName,
+  static String[] getColAlias(ASTNode selExpr, String defaultName,
       RowResolver inputRR, boolean includeFuncName, int colNum) {
     String colAlias = null;
     String tabAlias = null;
@@ -3441,7 +3322,7 @@ public class SemanticAnalyzer extends Ba
    * Returns whether the pattern is a regex expression (instead of a normal
    * string). Normal string is a string with all alphabets/digits and "_".
    */
-  private static boolean isRegex(String pattern, HiveConf conf) {
+  static boolean isRegex(String pattern, HiveConf conf) {
     String qIdSupport = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUOTEDID_SUPPORT);
     if ( "column".equals(qIdSupport)) {
       return false;
@@ -3734,7 +3615,7 @@ public class SemanticAnalyzer extends Ba
     return output;
   }
 
-  private String recommendName(ExprNodeDesc exp, String colAlias) {
+  String recommendName(ExprNodeDesc exp, String colAlias) {
     if (!colAlias.startsWith(autogenColAliasPrfxLbl)) {
       return null;
     }
@@ -3745,6 +3626,14 @@ public class SemanticAnalyzer extends Ba
     return null;
   }
 
+  String getAutogenColAliasPrfxLbl() {
+    return this.autogenColAliasPrfxLbl;
+  }
+
+  boolean autogenColAliasPrfxIncludeFuncName() {
+    return this.autogenColAliasPrfxIncludeFuncName;
+  }
+
   /**
    * Class to store GenericUDAF related information.
    */
@@ -3851,7 +3740,7 @@ public class SemanticAnalyzer extends Ba
     return r;
   }
 
-  private static GenericUDAFEvaluator.Mode groupByDescModeToUDAFMode(
+  static GenericUDAFEvaluator.Mode groupByDescModeToUDAFMode(
       GroupByDesc.Mode mode, boolean isDistinct) {
     switch (mode) {
     case COMPLETE:
@@ -6292,9 +6181,7 @@ public class SemanticAnalyzer extends Ba
           if (!("".equals(nm[0])) && nm[1] != null) {
             colName = unescapeIdentifier(colInfo.getAlias()).toLowerCase(); // remove ``
           }
-          if (runCBO) {
-            colName = fixCtasColumnName(colName);
-          }
+          colName = fixCtasColumnName(colName);
           col.setName(colName);
           col.setType(colInfo.getType().getTypeName());
           field_schemas.add(col);
@@ -6376,7 +6263,7 @@ public class SemanticAnalyzer extends Ba
     } else {
       try {
         StructObjectInspector rowObjectInspector = (StructObjectInspector) table_desc
-            .getDeserializer().getObjectInspector();
+            .getDeserializer(conf).getObjectInspector();
         List<? extends StructField> fields = rowObjectInspector
             .getAllStructFieldRefs();
         for (int i = 0; i < fields.size(); i++) {
@@ -6473,12 +6360,8 @@ public class SemanticAnalyzer extends Ba
     return output;
   }
 
-  private static String fixCtasColumnName(String colName) {
-    int lastDot = colName.lastIndexOf('.');
-    if (lastDot < 0) return colName; // alias is not fully qualified
-    String nqColumnName = colName.substring(lastDot + 1);
-    STATIC_LOG.debug("Replacing " + colName + " (produced by CBO) by " + nqColumnName);
-    return nqColumnName;
+  String fixCtasColumnName(String colName) {
+    return colName;
   }
 
   // Check constraints on acid tables.  This includes
@@ -8334,6 +8217,10 @@ public class SemanticAnalyzer extends Ba
     return new ObjectPair(res, tgtToNodeExprMap);
   }
 
+  boolean continueJoinMerge() {
+    return true;
+  }
+
   // try merge join tree from inner most source
   // (it was merged from outer most to inner, which could be invalid)
   //
@@ -8374,7 +8261,7 @@ public class SemanticAnalyzer extends Ba
           if (!node.getNoOuterJoin() || !target.getNoOuterJoin()) {
             if (node.getRightAliases().length + target.getRightAliases().length + 1 > 16) {
               LOG.info(ErrorMsg.JOINNODE_OUTERJOIN_MORETHAN_16);
-              continueScanning = !runCBO;
+              continueScanning = continueJoinMerge();
               continue;
             }
           }
@@ -8386,7 +8273,7 @@ public class SemanticAnalyzer extends Ba
          * for CBO provided orderings, don't attempt to reorder joins.
          * only convert consecutive joins into n-way joins.
          */
-        continueScanning = !runCBO;
+        continueScanning = continueJoinMerge();
         if (prevType == null) {
           prevType = currType;
         }
@@ -9550,7 +9437,7 @@ public class SemanticAnalyzer extends Ba
     return output;
   }
 
-  private boolean isSkewedCol(String alias, QB qb, String colName) {
+  static boolean isSkewedCol(String alias, QB qb, String colName) {
     boolean isSkewedCol = false;
     List<String> skewedCols = qb.getSkewedColumnNames(alias);
     for (String skewedCol : skewedCols) {
@@ -9759,7 +9646,7 @@ public class SemanticAnalyzer extends Ba
       rewriteRRForSubQ(qb.getAlias(), bodyOpInfo, skipAmbiguityCheck);
     }
 
-    this.qb = qb;
+    setQB(qb);
     return bodyOpInfo;
   }
 
@@ -9990,174 +9877,134 @@ public class SemanticAnalyzer extends Ba
     this.qb = qb;
   }
 
+  boolean analyzeCreateTable(ASTNode child) throws SemanticException {
+    if (ast.getToken().getType() == HiveParser.TOK_CREATETABLE) {
+      // if it is not CTAS, we don't need to go further and just return
+      if ((child = analyzeCreateTable(ast, qb, null)) == null) {
+        return true;
+      }
+    } else {
+      SessionState.get().setCommandType(HiveOperation.QUERY);
+    }
+
+    return false;
+  }
+
   @Override
   @SuppressWarnings("nls")
   public void analyzeInternal(ASTNode ast) throws SemanticException {
+    analyzeInternal(ast, new PlannerContext());
+  }
+
+  /**
+   * Planner specific stuff goen in here.
+   */
+  static class PlannerContext {
+    protected ASTNode   child;
+    protected Phase1Ctx ctx_1;
+
+    void setParseTreeAttr(ASTNode child, Phase1Ctx ctx_1) {
+      this.child = child;
+      this.ctx_1 = ctx_1;
+    }
+
+    void setCTASToken(ASTNode child) {
+    }
+
+    void setInsertToken(ASTNode ast, boolean isTmpFileDest) {
+    }
+  }
+
+  boolean genResolvedParseTree(ASTNode ast, PlannerContext plannerCtx) throws SemanticException {
     ASTNode child = ast;
     this.ast = ast;
     viewsExpanded = new ArrayList<String>();
     ctesExpanded = new ArrayList<String>();
 
-    LOG.info("Starting Semantic Analysis");
-
-    // analyze and process the position alias
+    // 1. analyze and process the position alias
     processPositionAlias(ast);
-    // Check configuration for CBO first.
-    runCBO = runCBO && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CBO_ENABLED);
 
-    // analyze create table command
-    PreCboCtx cboCtx = runCBO ? new PreCboCtx() : null;
+    // 2. analyze create table command
     if (ast.getToken().getType() == HiveParser.TOK_CREATETABLE) {
       // if it is not CTAS, we don't need to go further and just return
-      if ((child = analyzeCreateTable(ast, qb, cboCtx)) == null) {
-        return;
+      if ((child = analyzeCreateTable(ast, qb, plannerCtx)) == null) {
+        return false;
       }
     } else {
       SessionState.get().setCommandType(HiveOperation.QUERY);
     }
 
-    // analyze create view command
-    if (ast.getToken().getType() == HiveParser.TOK_CREATEVIEW ||
-        (ast.getToken().getType() == HiveParser.TOK_ALTERVIEW
-          && ast.getChild(1).getType() == HiveParser.TOK_QUERY)) {
+    // 3. analyze create view command
+    if (ast.getToken().getType() == HiveParser.TOK_CREATEVIEW
+        || (ast.getToken().getType() == HiveParser.TOK_ALTERVIEW && ast.getChild(1).getType() == HiveParser.TOK_QUERY)) {
       child = analyzeCreateView(ast, qb);
       SessionState.get().setCommandType(HiveOperation.CREATEVIEW);
       if (child == null) {
-        return;
+        return false;
       }
       viewSelect = child;
       // prevent view from referencing itself
       viewsExpanded.add(createVwDesc.getViewName());
     }
 
-    // continue analyzing from the child ASTNode.
+    // 4. continue analyzing from the child ASTNode.
     Phase1Ctx ctx_1 = initPhase1Ctx();
-    if (!doPhase1(child, qb, ctx_1, cboCtx)) {
+    if (!doPhase1(child, qb, ctx_1, plannerCtx)) {
       // if phase1Result false return
-      return;
+      return false;
     }
-
     LOG.info("Completed phase 1 of Semantic Analysis");
 
+    // 5. Resolve Parse Tree
     getMetaData(qb);
     LOG.info("Completed getting MetaData in Semantic Analysis");
 
-    // Note: for now, we don't actually pass the queryForCbo to CBO, because it accepts qb, not
-    //    AST, and can also access all the private stuff in SA. We rely on the fact that CBO
-    //    ignores the unknown tokens (create table, destination), so if the query is otherwise ok,
-    //    it is as if we did remove those and gave CBO the proper AST. That is kinda hacky.
-    if (runCBO) {
-      ASTNode queryForCbo = ast;
-      if (cboCtx.type == PreCboCtx.Type.CTAS) {
-        queryForCbo = cboCtx.nodeOfInterest; // nodeOfInterest is the query
-      }
-      runCBO = canHandleAstForCbo(queryForCbo, qb, cboCtx);
-    }
-
-    // Save the result schema derived from the sink operator produced
-    // by genPlan. This has the correct column names, which clients
-    // such as JDBC would prefer instead of the c0, c1 we'll end
-    // up with later.
-    Operator sinkOp = null;
-
-    if (runCBO) {
-      disableJoinMerge = true;
-      CalciteBasedPlanner calcitePlanner = new CalciteBasedPlanner();
-      boolean reAnalyzeAST = false;
+    plannerCtx.setParseTreeAttr(child, ctx_1);
 
-      try {
-        // 1. Gen Optimized AST
-        ASTNode newAST = calcitePlanner.getOptimizedAST(prunedPartitions);
-
-        // 1.1. Fix up the query for insert/ctas
-        newAST = fixUpCtasAndInsertAfterCbo(ast, newAST, cboCtx);
+    return true;
+  }
 
-        // 2. Regen OP plan from optimized AST
-        init(false);
-        if (cboCtx.type == PreCboCtx.Type.CTAS) {
-          // Redo create-table analysis, because it's not part of doPhase1.
-          newAST = reAnalyzeCtasAfterCbo(newAST);
-        }
-        ctx_1 = initPhase1Ctx();
-        if (!doPhase1(newAST, qb, ctx_1, null)) {
-          throw new RuntimeException(
-              "Couldn't do phase1 on CBO optimized query plan");
-        }
-        // unfortunately making prunedPartitions immutable is not possible here
-        // with SemiJoins not all tables are costed in CBO,
-        // so their PartitionList is not evaluated until the run phase.
-        //prunedPartitions = ImmutableMap.copyOf(prunedPartitions);
-        getMetaData(qb);
-
-        disableJoinMerge = false;
-        sinkOp = genPlan(qb);
-        LOG.info("CBO Succeeded; optimized logical plan.");
-        LOG.debug(newAST.dump());
+  Operator genOPTree(ASTNode ast, PlannerContext plannerCtx) throws SemanticException {
+    return genPlan(qb);
+  }
 
-        /*
-         * Use non CBO Result Set Schema so as to preserve user specified names.
-         * Hive seems to have bugs with OB/LIMIT in sub queries. // 3. Reset
-         * result set schema resultSchema =
-         * convertRowSchemaToResultSetSchema(opParseCtx.get(sinkOp)
-         * .getRowResolver(), true);
-         */
-      } catch (Exception e) {
-        boolean isMissingStats = calcitePlanner.noColsMissingStats.get() > 0;
-        if (isMissingStats) {
-          LOG.error("CBO failed due to missing column stats (see previous errors), skipping CBO");
-        } else {
-          LOG.error("CBO failed, skipping CBO. ", e);
-        }
-        if (!conf.getBoolVar(ConfVars.HIVE_IN_TEST) || isMissingStats
-            || e instanceof CalciteSemanticException) {
-          reAnalyzeAST = true;
-        } else if (e instanceof SemanticException) {
-          throw (SemanticException)e;
-        } else if (e instanceof RuntimeException) {
-          throw (RuntimeException)e;
-        } else {
-          throw new SemanticException(e);
-        }
-      } finally {
-        runCBO = false;
-        disableJoinMerge = false;
-        if (reAnalyzeAST) {
-          init(true);
-          prunedPartitions.clear();
-          analyzeInternal(ast);
-          return;
-        }
-      }
-    } else {
-      sinkOp = genPlan(qb);
+  void analyzeInternal(ASTNode ast, PlannerContext plannerCtx) throws SemanticException {
+    // 1. Generate Resolved Parse tree from syntax tree
+    LOG.info("Starting Semantic Analysis");
+    if (!genResolvedParseTree(ast, plannerCtx)) {
+      return;
     }
 
-    if (createVwDesc != null)
+    // 2. Gen OP Tree from resolved Parse Tree
+    Operator sinkOp = genOPTree(ast, plannerCtx);
+
+    // 3. Deduce Resultset Schema
+    if (createVwDesc != null) {
       resultSchema = convertRowSchemaToViewSchema(opParseCtx.get(sinkOp).getRowResolver());
-    else
+    } else {
       resultSchema = convertRowSchemaToResultSetSchema(opParseCtx.get(sinkOp).getRowResolver(),
           HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES));
+    }
 
-    ParseContext pCtx = new ParseContext(conf, qb, child, opToPartPruner,
-        opToPartList, topOps, topSelOps, opParseCtx, joinContext, smbMapJoinContext,
-        topToTable, topToTableProps, fsopToTable,
-        loadTableWork, loadFileWork, ctx, idToTableNameMap, destTableId, uCtx,
-        listMapJoinOpsNoReducer, groupOpToInputTables, prunedPartitions,
-        opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks,
-        opToPartToSkewedPruner, viewAliasToInput,
-        reduceSinkOperatorsAddedByEnforceBucketingSorting, queryProperties);
+    // 4. Generate Parse Context for Optimizer & Physical compiler
+    ParseContext pCtx = new ParseContext(conf, qb, plannerCtx.child, opToPartPruner, opToPartList,
+        topOps, topSelOps, opParseCtx, joinContext, smbMapJoinContext, topToTable, topToTableProps,
+        fsopToTable, loadTableWork, loadFileWork, ctx, idToTableNameMap, destTableId, uCtx,
+        listMapJoinOpsNoReducer, groupOpToInputTables, prunedPartitions, opToSamplePruner,
+        globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner,
+        viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting, queryProperties);
 
+    // 5. Take care of view creation
     if (createVwDesc != null) {
       saveViewDefinition();
 
-      // validate the create view statement
-      // at this point, the createVwDesc gets all the information for semantic check
+      // validate the create view statement at this point, the createVwDesc gets
+      // all the information for semanticcheck
       validateCreateView(createVwDesc);
 
-      // Since we're only creating a view (not executing it), we
-      // don't need to optimize or translate the plan (and in fact, those
-      // procedures can interfere with the view creation). So
-      // skip the rest of this method.
+      // Since we're only creating a view (not executing it), we don't need to
+      // optimize or translate the plan (and in fact, those procedures can
+      // interfere with the view creation). So skip the rest of this method.
       ctx.setResDir(null);
       ctx.setResFile(null);
 
@@ -10169,176 +10016,58 @@ public class SemanticAnalyzer extends Ba
       return;
     }
 
-    // Generate table access stats if required
+    // 6. Generate table access stats if required
     if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_TABLEKEYS) == true) {
       TableAccessAnalyzer tableAccessAnalyzer = new TableAccessAnalyzer(pCtx);
       setTableAccessInfo(tableAccessAnalyzer.analyzeTableAccess());
     }
 
+    // 7. Perform Logical optimization
     if (LOG.isDebugEnabled()) {
       LOG.debug("Before logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
     }
-
     Optimizer optm = new Optimizer();
     optm.setPctx(pCtx);
     optm.initialize(conf);
     pCtx = optm.optimize();
-
     FetchTask origFetchTask = pCtx.getFetchTask();
-
     if (LOG.isDebugEnabled()) {
       LOG.debug("After logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
     }
 
-    // Generate column access stats if required - wait until column pruning takes place
-    // during optimization
+    // 8. Generate column access stats if required - wait until column pruning
+    // takes place during optimization
     boolean isColumnInfoNeedForAuth = SessionState.get().isAuthorizationModeV2()
         && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED);
-
     if (isColumnInfoNeedForAuth
         || HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS) == true) {
       ColumnAccessAnalyzer columnAccessAnalyzer = new ColumnAccessAnalyzer(pCtx);
       setColumnAccessInfo(columnAccessAnalyzer.analyzeColumnAccess());
     }
 
+    // 9. Optimize Physical op tree & Translate to target execution engine (MR,
+    // TEZ..)
     if (!ctx.getExplainLogical()) {
-      // At this point we have the complete operator tree
-      // from which we want to create the map-reduce plan
       TaskCompiler compiler = TaskCompilerFactory.getCompiler(conf, pCtx);
       compiler.init(conf, console, db);
       compiler.compile(pCtx, rootTasks, inputs, outputs);
       fetchTask = pCtx.getFetchTask();
     }
-
     LOG.info("Completed plan generation");
 
-    // put accessed columns to readEntity
+    // 10. put accessed columns to readEntity
     if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS)) {
       putAccessedColumnsToReadEntity(inputs, columnAccessInfo);
     }
 
+    // 11. if desired check we're not going over partition scan limits
     if (!ctx.getExplain()) {
-      // if desired check we're not going over partition scan limits
       enforceScanLimits(pCtx, origFetchTask);
     }
 
     return;
   }
 
-  private ASTNode fixUpCtasAndInsertAfterCbo(
-      ASTNode originalAst, ASTNode newAst, PreCboCtx cboCtx) throws SemanticException {
-    switch (cboCtx.type) {
-    case NONE: return newAst; // nothing to do
-    case CTAS: {
-      // Patch the optimized query back into original CTAS AST, replacing the original query.
-      replaceASTChild(cboCtx.nodeOfInterest, newAst);
-      return originalAst;
-    }
-    case INSERT: {
-      // We need to patch the dest back to original into new query.
-      // This makes assumptions about the structure of the AST.
-      ASTNode newDest = astSearcher.simpleBreadthFirstSearch(
-          newAst, HiveParser.TOK_QUERY, HiveParser.TOK_INSERT, HiveParser.TOK_DESTINATION);
-      if (newDest == null) {
-        LOG.error("Cannot find destination after CBO; new ast is "+ newAst.dump());
-        throw new SemanticException("Cannot find destination after CBO");
-      }
-      replaceASTChild(newDest, cboCtx.nodeOfInterest);
-      return newAst;
-    }
-    default: throw new AssertionError("Unexpected type " + cboCtx.type);
-    }
-  }
-
-  private ASTNode reAnalyzeCtasAfterCbo(ASTNode newAst) throws SemanticException {
-    // analyzeCreateTable uses this.ast, but doPhase1 doesn't, so only reset it here.
-    this.ast = newAst;
-    newAst = analyzeCreateTable(newAst, qb, null);
-    if (newAst == null) {
-      LOG.error("analyzeCreateTable failed to initialize CTAS after CBO;"
-          + " new ast is " + this.ast.dump());
-      throw new SemanticException("analyzeCreateTable failed to initialize CTAS after CBO");
-    }
-    return newAst;
-  }
-
-  private boolean canHandleAstForCbo(ASTNode ast, QB qb, PreCboCtx cboCtx) {
-     int root = ast.getToken().getType();
-     boolean needToLogMessage = LOG.isInfoEnabled();
-     boolean isSupportedRoot =
-         root == HiveParser.TOK_QUERY || root == HiveParser.TOK_EXPLAIN || qb.isCTAS();
-     // Check AST.
-     // Assumption: If top level QB is query then everything below it must also be Query
-     // Can there be an insert or CTAS that wouldn't
-     //        be supported and would require additional checks similar to IsQuery?
-     boolean isSupportedType =
-         qb.getIsQuery() || qb.isCTAS() || cboCtx.type == PreCboCtx.Type.INSERT;
-     boolean noBadTokens = HiveCalciteUtil.validateASTForUnsupportedTokens(ast);
-     boolean result = isSupportedRoot && isSupportedType && createVwDesc == null && noBadTokens;
-     if (!result) {
-       if (needToLogMessage) {
-         String msg = "";
-         if (!isSupportedRoot) msg += "doesn't have QUERY or EXPLAIN as root and not a CTAS; ";
-         if (!isSupportedType) msg += "is not a query, CTAS, or insert; ";
-         if (createVwDesc != null) msg += "has create view; ";
-         if (!noBadTokens) msg += "has unsupported tokens; ";
-
-         if (msg.isEmpty()) msg += "has some unspecified limitations; ";
-         LOG.info("Not invoking CBO because the statement " + msg.substring(0, msg.length() - 2));
-       }
-       return false;
-     }
-     // Now check QB in more detail. canHandleQbForCbo returns null if query can be handled.
-     String msg = canHandleQbForCbo(qb, true, needToLogMessage);
-     if (msg == null) {
-       return true;
-     }
-     if (needToLogMessage) {
-       LOG.info("Not invoking CBO because the statement " + msg.substring(0, msg.length() - 2));
-     }
-     return false;
-  }
-
-  private class ASTSearcher {
-    private final LinkedList<ASTNode> searchQueue = new LinkedList<ASTNode>();
-    /**
-     * Performs breadth-first search of the AST for a nested set of tokens. Tokens don't have to be
-     * each others' direct children, they can be separated by layers of other tokens. For each token
-     * in the list, the first one found is matched and there's no backtracking; thus, if AST has
-     * multiple instances of some token, of which only one matches, it is not guaranteed to be found.
-     * We use this for simple things.
-     * Not thread-safe - reuses searchQueue.
-     */
-    public ASTNode simpleBreadthFirstSearch(ASTNode ast, int... tokens) {
-      searchQueue.clear();
-      searchQueue.add(ast);
-      for (int i = 0; i < tokens.length; ++i) {
-        boolean found = false;
-        int token = tokens[i];
-        while (!searchQueue.isEmpty() && !found) {
-          ASTNode next = searchQueue.poll();
-          found = next.getType() == token;
-          if (found) {
-            if (i == tokens.length - 1) return next;
-            searchQueue.clear();
-          }
-          for (int j = 0; j < next.getChildCount(); ++j) {
-            searchQueue.add((ASTNode)next.getChild(j));
-          }
-        }
-        if (!found) return null;
-      }
-      return null;
-    }
-  }
-
-  private void replaceASTChild(ASTNode child, ASTNode newChild) {
-    ASTNode parent = (ASTNode)child.parent;
-    int childIndex = child.childIndex;
-    parent.deleteChild(childIndex);
-    parent.insertChild(childIndex, newChild);
-  }
-
   private void putAccessedColumnsToReadEntity(HashSet<ReadEntity> inputs, ColumnAccessInfo columnAccessInfo) {
     Map<String, List<String>> tableToColumnAccessMap = columnAccessInfo.getTableToColumnAccessMap();
     if (tableToColumnAccessMap != null && !tableToColumnAccessMap.isEmpty()) {
@@ -10514,13 +10243,13 @@ public class SemanticAnalyzer extends Ba
     createVwDesc.setViewExpandedText(expandedText);
   }
 
-  private List<FieldSchema> convertRowSchemaToViewSchema(RowResolver rr) throws SemanticException {
+  static List<FieldSchema> convertRowSchemaToViewSchema(RowResolver rr) throws SemanticException {
     List<FieldSchema> fieldSchema = convertRowSchemaToResultSetSchema(rr, false);
     ParseUtils.validateColumnNameUniqueness(fieldSchema);
     return fieldSchema;
   }
 
-  private List<FieldSchema> convertRowSchemaToResultSetSchema(RowResolver rr,
+  static List<FieldSchema> convertRowSchemaToResultSetSchema(RowResolver rr,
       boolean useTabAliasIfAvailable) {
     List<FieldSchema> fieldSchemas = new ArrayList<FieldSchema>();
     String[] qualifiedColName;
@@ -10835,8 +10564,8 @@ public class SemanticAnalyzer extends Ba
    * the semantic analyzer need to deal with the select statement with respect
    * to the SerDe and Storage Format.
    */
-  private ASTNode analyzeCreateTable(
-      ASTNode ast, QB qb, PreCboCtx cboCtx) throws SemanticException {
+  ASTNode analyzeCreateTable(
+      ASTNode ast, QB qb, PlannerContext plannerCtx) throws SemanticException {
     String[] qualifiedTabName = getQualifiedTableName((ASTNode) ast.getChild(0));
     String dbDotTab = getDotName(qualifiedTabName);
 
@@ -10926,8 +10655,8 @@ public class SemanticAnalyzer extends Ba
           throw new SemanticException(ErrorMsg.CTAS_EXTTBL_COEXISTENCE.getMsg());
         }
         command_type = CTAS;
-        if (cboCtx != null) {
-          cboCtx.set(PreCboCtx.Type.CTAS, child);
+        if (plannerCtx != null) {
+          plannerCtx.setCTASToken(child);
         }
         selectStmt = child;
         break;
@@ -11184,6 +10913,10 @@ public class SemanticAnalyzer extends Ba
     return selectStmt;
   }
 
+  CreateViewDesc getCreateViewDesc() {
+    return this.createVwDesc;
+  }
+
   // validate the create view statement
   // the statement could be CREATE VIEW, REPLACE VIEW, or ALTER VIEW AS SELECT
   // check semantic conditions
@@ -12495,2160 +12228,4 @@ public class SemanticAnalyzer extends Ba
   protected boolean deleting() {
     return false;
   }
-
-  /**** Temporary Place Holder For Calcite plan Gen, Optimizer ****/
-
-  /**
-   * Entry point to Optimizations using Calcite. Checks whether Calcite can handle the query.
-   * @param qbToChk Query block to check.
-   * @param verbose Whether return value should be verbose in case of failure.
-   * @return null if the query can be handled; non-null reason string if it cannot be.
-   */
-  private String canHandleQbForCbo(QB qbToChk, boolean topLevelQB, boolean verbose) {
-    // Assumption:
-    // 1. If top level QB is query then everything below it must also be Query
-    // 2. Nested Subquery will return false for qbToChk.getIsQuery()
-    boolean isInTest = conf.getBoolVar(ConfVars.HIVE_IN_TEST);
-    boolean isStrictTest = isInTest
-        && !conf.getVar(ConfVars.HIVEMAPREDMODE).equalsIgnoreCase("nonstrict");
-    boolean hasEnoughJoins = !topLevelQB || (queryProperties.getJoinCount() > 1) || isInTest;
-    if (!isStrictTest && hasEnoughJoins && !queryProperties.hasClusterBy()
-        && !queryProperties.hasDistributeBy() && !queryProperties.hasSortBy()
-        && !queryProperties.hasPTF() && !queryProperties.usesScript()
-        && !queryProperties.hasMultiDestQuery() && !queryProperties.hasLateralViews()) {
-      return null; // Ok to run CBO.
-    }
-
-    // Not ok to run CBO, build error message.
-    String msg = "";
-    if (verbose) {
-      if (isStrictTest) msg += "is in test running in mode other than nonstrict; ";
-      if (!hasEnoughJoins) msg += "has too few joins; ";
-      if (queryProperties.hasClusterBy()) msg += "has cluster by; ";
-      if (queryProperties.hasDistributeBy()) msg += "has distribute by; ";
-      if (queryProperties.hasSortBy()) msg += "has sort by; ";
-      if (queryProperties.hasPTF()) msg += "has PTF; ";
-      if (queryProperties.usesScript()) msg += "uses scripts; ";
-      if (queryProperties.hasMultiDestQuery()) msg += "is a multi-destination query; ";
-      if (queryProperties.hasLateralViews()) msg += "has lateral views; ";
-
-      if (msg.isEmpty()) msg += "has some unspecified limitations; ";
-    }
-    return msg;
-  }
-
-  private class CalciteBasedPlanner implements Frameworks.PlannerAction<RelNode> {
-    private RelOptCluster                                 cluster;
-    private RelOptSchema                                  relOptSchema;
-    private SemanticException                             semanticException;
-    private Map<String, PrunedPartitionList>              partitionCache;
-    private final AtomicInteger                           noColsMissingStats = new AtomicInteger(0);
-    List<FieldSchema>                                     topLevelFieldSchema;
-
-    // TODO: Do we need to keep track of RR, ColNameToPosMap for every op or
-    // just last one.
-    LinkedHashMap<RelNode, RowResolver>                   relToHiveRR                 = new LinkedHashMap<RelNode, RowResolver>();
-    LinkedHashMap<RelNode, ImmutableMap<String, Integer>> relToHiveColNameCalcitePosMap = new LinkedHashMap<RelNode, ImmutableMap<String, Integer>>();
-
-    private ASTNode getOptimizedAST(Map<String, PrunedPartitionList> partitionCache)
-        throws SemanticException {
-      ASTNode calciteOptimizedAST = null;
-      RelNode optimizedCalcitePlan = null;
-      this.partitionCache = partitionCache;
-
-      try {
-        optimizedCalcitePlan = Frameworks.withPlanner(this,
-            Frameworks.newConfigBuilder().typeSystem(new HiveTypeSystemImpl()).build());
-      } catch (Exception e) {
-        rethrowCalciteException(e);
-        throw new AssertionError("rethrowCalciteException didn't throw for " + e.getMessage());
-      }
-      calciteOptimizedAST = ASTConverter.convert(optimizedCalcitePlan, topLevelFieldSchema);
-
-      return calciteOptimizedAST;
-    }
-
-    /*
-     * Unwraps a chain of useless UndeclaredThrowableException-s, InvocationTargetException-s
-     * and RuntimeException-s potentially coming from CBO/Calcite code.
-     */
-    private void rethrowCalciteException(Exception e) throws SemanticException {
-      Throwable first = (semanticException != null) ? semanticException : e,
-          current = first, cause = current.getCause();
-      while (cause != null) {
-        Throwable causeOfCause = cause.getCause();
-        if (current == first && causeOfCause == null && isUselessCause(first)) {
-          // "cause" is a root cause, and "e"/"first" is a useless exception it's wrapped in.
-          first = cause;
-          break;
-        } else if (causeOfCause != null && isUselessCause(cause)
-            && ExceptionHelper.resetCause(current, causeOfCause)) {
-          // "cause" was a useless intermediate cause and was replace it with its own cause.
-          cause = causeOfCause;
-          continue; // do loop once again with the new cause of "current"
-        }
-        current = cause;
-        cause = current.getCause();
-      }
-
-      if (first instanceof RuntimeException) {
-        throw (RuntimeException)first;
-      } else if (first instanceof SemanticException) {
-        throw (SemanticException)first;
-      }
-      throw new RuntimeException(first);
-    }
-
-    private boolean isUselessCause(Throwable t) {
-      return t instanceof RuntimeException || t instanceof InvocationTargetException
-          || t instanceof UndeclaredThrowableException;
-    }
-
-    @Override
-    public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlus rootSchema) {
-      RelNode calciteGenPlan = null;
-      RelNode calcitePreCboPlan = null;
-      RelNode calciteOptimizedPlan = null;
-
-      /*
-       * recreate cluster, so that it picks up the additional traitDef
-       */
-      RelOptPlanner planner = HiveVolcanoPlanner.createPlanner();
-      final RelOptQuery query = new RelOptQuery(planner);
-      final RexBuilder rexBuilder = cluster.getRexBuilder();
-      cluster = query.createCluster(rexBuilder.getTypeFactory(), rexBuilder);
-
-      this.cluster = cluster;
-      this.relOptSchema = relOptSchema;
-
-      try {
-        calciteGenPlan = genLogicalPlan(qb, true);
-        topLevelFieldSchema = convertRowSchemaToResultSetSchema(relToHiveRR.get(calciteGenPlan),
-            HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES));
-      } catch (SemanticException e) {
-        semanticException = e;
-        throw new RuntimeException(e);
-      }
-
-      calcitePreCboPlan = applyPreCBOTransforms(calciteGenPlan, HiveDefaultRelMetadataProvider.INSTANCE);
-      List<RelMetadataProvider> list = Lists.newArrayList();
-      list.add(HiveDefaultRelMetadataProvider.INSTANCE);
-      RelTraitSet desiredTraits = cluster.traitSetOf(HiveRelNode.CONVENTION, RelCollationImpl.EMPTY);
-
-      HepProgram hepPgm = null;
-      HepProgramBuilder hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP)
-          .addRuleInstance(new JoinToMultiJoinRule(HiveJoin.class));
-      hepPgmBldr.addRuleInstance(new LoptOptimizeJoinRule(HiveJoin.HIVE_JOIN_FACTORY,
-          HiveProject.DEFAULT_PROJECT_FACTORY, HiveFilter.DEFAULT_FILTER_FACTORY));
-
-      hepPgm = hepPgmBldr.build();
-      HepPlanner hepPlanner = new HepPlanner(hepPgm);
-
-      hepPlanner.registerMetadataProviders(list);
-      RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list);
-      cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner));
-
-      RelNode rootRel = calcitePreCboPlan;
-      hepPlanner.setRoot(rootRel);
-      if (!calcitePreCboPlan.getTraitSet().equals(desiredTraits)) {
-        rootRel = hepPlanner.changeTraits(calcitePreCboPlan, desiredTraits);
-      }
-      hepPlanner.setRoot(rootRel);
-
-      calciteOptimizedPlan = hepPlanner.findBestExp();
-
-      if (LOG.isDebugEnabled() && !conf.getBoolVar(ConfVars.HIVE_IN_TEST)) {
-        LOG.debug("CBO Planning details:\n");
-        LOG.debug("Original Plan:\n" + RelOptUtil.toString(calciteGenPlan));
-        LOG.debug("Plan After PPD, PartPruning, ColumnPruning:\n"
-            + RelOptUtil.toString(calcitePreCboPlan));
-        LOG.debug("Plan After Join Reordering:\n"
-            + RelOptUtil.toString(calciteOptimizedPlan, SqlExplainLevel.ALL_ATTRIBUTES));
-      }
-
-      return calciteOptimizedPlan;
-    }
-
-    public RelNode applyPreCBOTransforms(RelNode basePlan, RelMetadataProvider mdProvider) {
-
-      // TODO: Decorelation of subquery should be done before attempting
-      // Partition Pruning; otherwise Expression evaluation may try to execute
-      // corelated sub query.
-
-      // Push Down Semi Joins
-      basePlan = hepPlan(basePlan, true, mdProvider,
-              SemiJoinJoinTransposeRule.INSTANCE,
-              SemiJoinFilterTransposeRule.INSTANCE,
-              SemiJoinProjectTransposeRule.INSTANCE);
-
-      basePlan = hepPlan(basePlan, true, mdProvider,
-          new HiveFilterProjectTransposeRule(
-          Filter.class, HiveFilter.DEFAULT_FILTER_FACTORY, HiveProject.class,
-          HiveProject.DEFAULT_PROJECT_FACTORY), new HiveFilterSetOpTransposeRule(
-          HiveFilter.DEFAULT_FILTER_FACTORY), new FilterMergeRule(
-          HiveFilter.DEFAULT_FILTER_FACTORY), HiveFilterJoinRule.JOIN,
-          HiveFilterJoinRule.FILTER_ON_JOIN,
-          new FilterAggregateTransposeRule(
-              Filter.class,
-                    HiveFilter.DEFAULT_FILTER_FACTORY,
-                    Aggregate.class));
-
-      basePlan = hepPlan(basePlan, false, mdProvider, new JoinPushTransitivePredicatesRule(
-          Join.class, HiveFilter.DEFAULT_FILTER_FACTORY),
-          // TODO: Enable it after CALCITE-407 is fixed
-          //RemoveTrivialProjectRule.INSTANCE,
-          new HivePartitionPruneRule(SemanticAnalyzer.this.conf));
-
-      RelFieldTrimmer fieldTrimmer = new RelFieldTrimmer(null, HiveProject.DEFAULT_PROJECT_FACTORY,
-          HiveFilter.DEFAULT_FILTER_FACTORY, HiveJoin.HIVE_JOIN_FACTORY, RelFactories.DEFAULT_SEMI_JOIN_FACTORY,
-          HiveSort.HIVE_SORT_REL_FACTORY, HiveAggregate.HIVE_AGGR_REL_FACTORY, HiveUnion.UNION_REL_FACTORY);
-      basePlan = fieldTrimmer.trim(basePlan);
-
-      basePlan = hepPlan(basePlan, true, mdProvider,
-          new FilterProjectTransposeRule(Filter.class,
-              HiveFilter.DEFAULT_FILTER_FACTORY, HiveProject.class,
-              HiveProject.DEFAULT_PROJECT_FACTORY));
-
-      return basePlan;
-    }
-
-    private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges,
-        RelMetadataProvider mdProvider, RelOptRule... rules) {
-
-      RelNode optimizedRelNode = basePlan;
-      HepProgramBuilder programBuilder = new HepProgramBuilder();
-      if (followPlanChanges) {
-        programBuilder.addMatchOrder(HepMatchOrder.TOP_DOWN);
-        programBuilder = programBuilder.addRuleCollection(ImmutableList.copyOf(rules));
-      } else {
-        // TODO: Should this be also TOP_DOWN?
-        for (RelOptRule r : rules)
-          programBuilder.addRuleInstance(r);
-      }
-
-      HepPlanner planner = new HepPlanner(programBuilder.build());
-      List<RelMetadataProvider> list = Lists.newArrayList();
-      list.add(mdProvider);
-      planner.registerMetadataProviders(list);
-      RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list);
-      basePlan.getCluster().setMetadataProvider(
-          new CachingRelMetadataProvider(chainedProvider, planner));
-
-      planner.setRoot(basePlan);
-      optimizedRelNode = planner.findBestExp();
-
-      return optimizedRelNode;
-    }
-
-    @SuppressWarnings("nls")
-    private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode leftRel,
-        String rightalias, RelNode rightRel) throws SemanticException {
-      HiveUnion unionRel = null;
-
-      // 1. Get Row Resolvers, Column map for original left and right input of
-      // Union Rel
-      RowResolver leftRR = this.relToHiveRR.get(leftRel);
-      RowResolver rightRR = this.relToHiveRR.get(rightRel);
-      HashMap<String, ColumnInfo> leftmap = leftRR.getFieldMap(leftalias);
-      HashMap<String, ColumnInfo> rightmap = rightRR.getFieldMap(rightalias);
-
-      // 2. Validate that Union is feasible according to Hive (by using type
-      // info from RR)
-      if (leftmap.size() != rightmap.size()) {
-        throw new SemanticException("Schema of both sides of union should match.");
-      }
-
-      ASTNode tabref = qb.getAliases().isEmpty() ? null : qb.getParseInfo().getSrcForAlias(
-          qb.getAliases().get(0));
-      for (Map.Entry<String, ColumnInfo> lEntry : leftmap.entrySet()) {
-        String field = lEntry.getKey();
-        ColumnInfo lInfo = lEntry.getValue();
-        ColumnInfo rInfo = rightmap.get(field);
-        if (rInfo == null) {
-          throw new SemanticException(generateErrorMessage(tabref,
-              "Schema of both sides of union should match. " + rightalias
-                  + " does not have the field " + field));
-        }
-        if (lInfo == null) {
-          throw new SemanticException(generateErrorMessage(tabref,
-              "Schema of both sides of union should match. " + leftalias
-                  + " does not have the field " + field));
-        }
-        if (!lInfo.getInternalName().equals(rInfo.getInternalName())) {
-          throw new CalciteSemanticException(generateErrorMessage(tabref,
-              "Schema of both sides of union should match: field " + field + ":"
-                  + " appears on the left side of the UNION at column position: "
-                  + getPositionFromInternalName(lInfo.getInternalName())
-                  + ", and on the right side of the UNION at column position: "
-                  + getPositionFromInternalName(rInfo.getInternalName())
-                  + ". Column positions should match for a UNION"));
-        }
-        // try widening coversion, otherwise fail union
-        TypeInfo commonTypeInfo = FunctionRegistry.getCommonClassForUnionAll(lInfo.getType(),
-            rInfo.getType());
-        if (commonTypeInfo == null) {
-          throw new CalciteSemanticException(generateErrorMessage(tabref,
-              "Schema of both sides of union should match: Column " + field + " is of type "
-                  + lInfo.getType().getTypeName() + " on first table and type "
-                  + rInfo.getType().getTypeName() + " on second table"));
-        }
-      }
-
-      // 3. construct Union Output RR using original left & right Input
-      RowResolver unionoutRR = new RowResolver();
-      for (Map.Entry<String, ColumnInfo> lEntry : leftmap.entrySet()) {
-        String field = lEntry.getKey();
-        ColumnInfo lInfo = lEntry.getValue();
-        ColumnInfo rInfo = rightmap.get(field);
-        ColumnInfo unionColInfo = new ColumnInfo(lInfo);
-        unionColInfo.setTabAlias(unionalias);
-        unionColInfo.setType(FunctionRegistry.getCommonClassForUnionAll(lInfo.getType(),
-            rInfo.getType()));
-        unionoutRR.put(unionalias, field, unionColInfo);
-      }
-
-      // 4. Determine which columns requires cast on left/right input (Calcite
-      // requires exact types on both sides of union)
-      boolean leftNeedsTypeCast = false;
-      boolean rightNeedsTypeCast = false;
-      List<RexNode> leftProjs = new ArrayList<RexNode>();
-      List<RexNode> rightProjs = new ArrayList<RexNode>();
-      List<RelDataTypeField> leftRowDT = leftRel.getRowType().getFieldList();
-      List<RelDataTypeField> rightRowDT = rightRel.getRowType().getFieldList();
-
-      RelDataType leftFieldDT;
-      RelDataType rightFieldDT;
-      RelDataType unionFieldDT;
-      for (int i = 0; i < leftRowDT.size(); i++) {
-        leftFieldDT = leftRowDT.get(i).getType();
-        rightFieldDT = rightRowDT.get(i).getType();
-        if (!leftFieldDT.equals(rightFieldDT)) {
-          unionFieldDT = TypeConverter.convert(unionoutRR.getColumnInfos().get(i).getType(),
-          cluster.getTypeFactory());
-          if (!unionFieldDT.equals(leftFieldDT)) {
-            leftNeedsTypeCast = true;
-          }
-          leftProjs.add(cluster.getRexBuilder().ensureType(unionFieldDT,
-              cluster.getRexBuilder().makeInputRef(leftFieldDT, i), true));
-
-          if (!unionFieldDT.equals(rightFieldDT)) {
-            rightNeedsTypeCast = true;
-          }
-          rightProjs.add(cluster.getRexBuilder().ensureType(unionFieldDT,
-              cluster.getRexBuilder().makeInputRef(rightFieldDT, i), true));
-        } else {
-          leftProjs.add(cluster.getRexBuilder().ensureType(leftFieldDT,
-              cluster.getRexBuilder().makeInputRef(leftFieldDT, i), true));
-          rightProjs.add(cluster.getRexBuilder().ensureType(rightFieldDT,
-              cluster.getRexBuilder().makeInputRef(rightFieldDT, i), true));
-        }
-      }
-
-      // 5. Introduce Project Rel above original left/right inputs if cast is
-      // needed for type parity
-      RelNode unionLeftInput = leftRel;
-      RelNode unionRightInput = rightRel;
-      if (leftNeedsTypeCast) {
-        unionLeftInput = HiveProject.create(leftRel, leftProjs, leftRel.getRowType()
-            .getFieldNames());
-      }
-      if (rightNeedsTypeCast) {
-        unionRightInput = HiveProject.create(rightRel, rightProjs, rightRel.getRowType()
-            .getFieldNames());
-      }
-
-      // 6. Construct Union Rel
-      ImmutableList.Builder bldr = new ImmutableList.Builder<RelNode>();
-      bldr.add(unionLeftInput);
-      bldr.add(unionRightInput);
-      unionRel = new HiveUnion(cluster, TraitsUtil.getDefaultTraitSet(cluster),
-          bldr.build());
-
-      relToHiveRR.put(unionRel, unionoutRR);
-      relToHiveColNameCalcitePosMap.put(unionRel,
-          this.buildHiveToCalciteColumnMap(unionoutRR, unionRel));
-
-      return unionRel;
-    }
-
-    private RelNode genJoinRelNode(RelNode leftRel, RelNode rightRel, JoinType hiveJoinType,
-        ASTNode joinCond) throws SemanticException {
-      RelNode joinRel = null;
-
-      // 1. construct the RowResolver for the new Join Node by combining row
-      // resolvers from left, right
-      RowResolver leftRR = this.relToHiveRR.get(leftRel);
-      RowResolver rightRR = this.relToHiveRR.get(rightRel);
-      RowResolver joinRR = null;
-
-      if (hiveJoinType != JoinType.LEFTSEMI) {
-        joinRR = RowResolver.getCombinedRR(leftRR, rightRR);
-      } else {
-        joinRR = new RowResolver();
-        if (!RowResolver.add(joinRR, leftRR)) {
-          LOG.warn("Duplicates detected when adding columns to RR: see previous message");
-        }
-      }
-
-      // 2. Construct ExpressionNodeDesc representing Join Condition
-      RexNode calciteJoinCond = null;
-      if (joinCond != null) {
-        JoinTypeCheckCtx jCtx = new JoinTypeCheckCtx(leftRR, rightRR, hiveJoinType);
-        Map<ASTNode, ExprNodeDesc> exprNodes = JoinCondTypeCheckProcFactory.genExprNode(joinCond,
-            jCtx);
-        if (jCtx.getError() != null)
-          throw new SemanticException(SemanticAnalyzer.generateErrorMessage(jCtx.getErrorSrcNode(),
-              jCtx.getError()));
-
-        ExprNodeDesc joinCondnExprNode = exprNodes.get(joinCond);
-
-        List<RelNode> inputRels = new ArrayList<RelNode>();
-        inputRels.add(leftRel);
-        inputRels.add(rightRel);
-        calciteJoinCond = RexNodeConverter.convert(cluster, joinCondnExprNode, inputRels,
-            relToHiveRR, relToHiveColNameCalcitePosMap, false);
-      } else {
-        calciteJoinCond = cluster.getRexBuilder().makeLiteral(true);
-      }
-
-      // 3. Validate that join condition is legal (i.e no function refering to
-      // both sides of join, only equi join)
-      // TODO: Join filter handling (only supported for OJ by runtime or is it
-      // supported for IJ as well)
-
-      // 4. Construct Join Rel Node
-      boolean leftSemiJoin = false;
-      JoinRelType calciteJoinType;
-      switch (hiveJoinType) {
-      case LEFTOUTER:
-        calciteJoinType = JoinRelType.LEFT;
-        break;
-      case RIGHTOUTER:
-        calciteJoinType = JoinRelType.RIGHT;
-        break;
-      case FULLOUTER:
-        calciteJoinType = JoinRelType.FULL;
-        break;
-      case LEFTSEMI:
-        calciteJoinType = JoinRelType.INNER;
-        leftSemiJoin = true;
-        break;
-      case INNER:
-      default:
-        calciteJoinType = JoinRelType.INNER;
-        break;
-      }
-
-      if (leftSemiJoin) {
-        List<RelDataTypeField> sysFieldList = new ArrayList<RelDataTypeField>();
-        List<RexNode> leftJoinKeys = new ArrayList<RexNode>();
-        List<RexNode> rightJoinKeys = new ArrayList<RexNode>();
-
-        RexNode nonEquiConds = RelOptUtil.splitJoinCondition(sysFieldList, leftRel, rightRel,
-            calciteJoinCond, leftJoinKeys, rightJoinKeys, null, null);
-
-        if (!nonEquiConds.isAlwaysTrue()) {
-          throw new SemanticException("Non equality condition not supported in Semi-Join"
-              + nonEquiConds);
-        }
-
-        RelNode[] inputRels = new RelNode[] { leftRel, rightRel };
-        final List<Integer> leftKeys = new ArrayList<Integer>();
-        final List<Integer> rightKeys = new ArrayList<Integer>();
-        calciteJoinCond = HiveCalciteUtil.projectNonColumnEquiConditions(
-            HiveProject.DEFAULT_PROJECT_FACTORY, inputRels, leftJoinKeys, rightJoinKeys, 0,
-            leftKeys, rightKeys);
-
-        joinRel = new SemiJoin(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
-            inputRels[0], inputRels[1], calciteJoinCond, ImmutableIntList.copyOf(leftKeys),
-            ImmutableIntList.copyOf(rightKeys));
-      } else {
-        joinRel = HiveJoin.getJoin(cluster, leftRel, rightRel, calciteJoinCond, calciteJoinType,
-            leftSemiJoin);
-      }
-      // 5. Add new JoinRel & its RR to the maps
-      relToHiveColNameCalcitePosMap.put(joinRel, this.buildHiveToCalciteColumnMap(joinRR, joinRel));
-      relToHiveRR.put(joinRel, joinRR);
-
-      return joinRel;
-    }
-
-    /**
-     * Generate Join Logical Plan Relnode by walking through the join AST.
-     *
-     * @param qb
-     * @param aliasToRel
-     *          Alias(Table/Relation alias) to RelNode; only read and not
-     *          written in to by this method
-     * @return
-     * @throws SemanticException
-     */
-    private RelNode genJoinLogicalPlan(ASTNode joinParseTree, Map<String, RelNode> aliasToRel)
-        throws SemanticException {
-      RelNode leftRel = null;
-      RelNode rightRel = null;
-      JoinType hiveJoinType = null;
-
-      if (joinParseTree.getToken().getType() == HiveParser.TOK_UNIQUEJOIN) {
-        String msg = String.format("UNIQUE JOIN is currently not supported in CBO,"
-            + " turn off cbo to use UNIQUE JOIN.");
-        LOG.debug(msg);
-        throw new CalciteSemanticException(msg);
-      }
-
-      // 1. Determine Join Type
-      // TODO: What about TOK_CROSSJOIN, TOK_MAPJOIN
-      switch (joinParseTree.getToken().getType()) {
-      case HiveParser.TOK_LEFTOUTERJOIN:
-        hiveJoinType = JoinType.LEFTOUTER;
-        break;
-      case HiveParser.TOK_RIGHTOUTERJOIN:
-        hiveJoinType = JoinType.RIGHTOUTER;
-        break;
-      case HiveParser.TOK_FULLOUTERJOIN:
-        hiveJoinType = JoinType.FULLOUTER;
-        break;
-      case HiveParser.TOK_LEFTSEMIJOIN:
-        hiveJoinType = JoinType.LEFTSEMI;
-        break;
-      default:
-        hiveJoinType = JoinType.INNER;
-        break;
-      }
-
-      // 2. Get Left Table Alias
-      ASTNode left = (ASTNode) joinParseTree.getChild(0);
-      if ((left.getToken().getType() == HiveParser.TOK_TABREF)
-          || (left.getToken().getType() == HiveParser.TOK_SUBQUERY)
-          || (left.getToken().getType() == HiveParser.TOK_PTBLFUNCTION)) {
-        String tableName = getUnescapedUnqualifiedTableName((ASTNode) left.getChild(0))
-            .toLowerCase();
-        String leftTableAlias = left.getChildCount() == 1 ? tableName : unescapeIdentifier(left
-            .getChild(left.getChildCount() - 1).getText().toLowerCase());
-        // ptf node form is: ^(TOK_PTBLFUNCTION $name $alias?
-        // partitionTableFunctionSource partitioningSpec? expression*)
-        // guranteed to have an lias here: check done in processJoin
-        leftTableAlias = (left.getToken().getType() == HiveParser.TOK_PTBLFUNCTION) ? unescapeIdentifier(left
-            .getChild(1).getText().toLowerCase())
-            : leftTableAlias;
-        leftRel = aliasToRel.get(leftTableAlias);
-      } else if (isJoinToken(left)) {
-        leftRel = genJoinLogicalPlan(left, aliasToRel);
-      } else {
-        assert (false);
-      }
-
-      // 3. Get Right Table Alias
-      ASTNode right = (ASTNode) joinParseTree.getChild(1);
-      if ((right.getToken().getType() == HiveParser.TOK_TABREF)
-          || (right.getToken().getType() == HiveParser.TOK_SUBQUERY)
-          || (right.getToken().getType() == HiveParser.TOK_PTBLFUNCTION)) {
-        String tableName = getUnescapedUnqualifiedTableName((ASTNode) right.getChild(0))
-            .toLowerCase();
-        String rightTableAlias = right.getChildCount() == 1 ? tableName : unescapeIdentifier(right
-            .getChild(right.getChildCount() - 1).getText().toLowerCase());
-        // ptf node form is: ^(TOK_PTBLFUNCTION $name $alias?
-        // partitionTableFunctionSource partitioningSpec? expression*)
-        // guranteed to have an lias here: check done in processJoin
-        rightTableAlias = (right.getToken().getType() == HiveParser.TOK_PTBLFUNCTION) ? unescapeIdentifier(right
-            .getChild(1).getText().toLowerCase())
-            : rightTableAlias;
-        rightRel = aliasToRel.get(rightTableAlias);
-      } else {
-        assert (false);
-      }
-
-      // 4. Get Join Condn
-      ASTNode joinCond = (ASTNode) joinParseTree.getChild(2);
-
-      // 5. Create Join rel
-      return genJoinRelNode(leftRel, rightRel, hiveJoinType, joinCond);
-    }
-
-    private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticException {
-      RowResolver rr = new RowResolver();
-      HiveTableScan tableRel = null;
-
-      try {
-
-        // 1. If the table has a Sample specified, bail from Calcite path.
-        if ( qb.getParseInfo().getTabSample(tableAlias) != null ||
-            SemanticAnalyzer.this.nameToSplitSample.containsKey(tableAlias)) {
-          String msg = String.format("Table Sample specified for %s." +
-          		" Currently we don't support Table Sample clauses in CBO," +
-          		" turn off cbo for queries on tableSamples.", tableAlias);
-          LOG.debug(msg);
-          throw new CalciteSemanticException(msg);
-        }
-
-        // 2. Get Table Metadata
-        Table tab = qb.getMetaData().getSrcForAlias(tableAlias);
-
-        // 3. Get Table Logical Schema (Row Type)
-        // NOTE: Table logical schema = Non Partition Cols + Partition Cols +
-        // Virtual Cols
-
-        // 3.1 Add Column info for non partion cols (Object Inspector fields)
-        StructObjectInspector rowObjectInspector = (StructObjectInspector) tab.getDeserializer()
-            .getObjectInspector();
-        List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
-        ColumnInfo colInfo;
-        String colName;
-        ArrayList<ColumnInfo> cInfoLst = new ArrayList<ColumnInfo>();
-        for (int i = 0; i < fields.size(); i++) {
-          colName = fields.get(i).getFieldName();
-          colInfo = new ColumnInfo(
-              fields.get(i).getFieldName(),
-              TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()),
-              tableAlias, false);
-          colInfo.setSkewedCol((isSkewedCol(tableAlias, qb, colName)) ? true : false);
-          rr.put(tableAlias, colName, colInfo);
-          cInfoLst.add(colInfo);
-        }
-        // TODO: Fix this
-        ArrayList<ColumnInfo> nonPartitionColumns = new ArrayList<ColumnInfo>(cInfoLst);
-        ArrayList<ColumnInfo> partitionColumns = new ArrayList<ColumnInfo>();
-
-        // 3.2 Add column info corresponding to partition columns
-        for (FieldSchema part_col : tab.getPartCols()) {
-          colName = part_col.getName();
-          colInfo = new ColumnInfo(colName,
-              TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), tableAlias, true);
-          rr.put(tableAlias, colName, colInfo);
-          cInfoLst.add(colInfo);
-          partitionColumns.add(colInfo);
-        }
-
-        // 3.3 Add column info corresponding to virtual columns
-        Iterator<VirtualColumn> vcs = VirtualColumn.getRegistry(conf).iterator();
-        while (vcs.hasNext()) {
-          VirtualColumn vc = vcs.next();
-          colInfo = new ColumnInfo(vc.getName(), vc.getTypeInfo(), tableAlias, true,
-              vc.getIsHidden());
-          rr.put(tableAlias, vc.getName(), colInfo);
-          cInfoLst.add(colInfo);
-        }
-
-        // 3.4 Build row type from field <type, name>
-        RelDataType rowType = TypeConverter.getType(cluster, rr, null);
-
-        // 4. Build RelOptAbstractTable
-        String fullyQualifiedTabName = tab.getDbName();
-        if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty())
-          fullyQualifiedTabName = fullyQualifiedTabName + "." + tab.getTableName();
-        else
-          fullyQualifiedTabName = tab.getTableName();
-        RelOptHiveTable optTable = new RelOptHiveTable(relOptSchema, fullyQualifiedTabName,
-            tableAlias, rowType, tab, nonPartitionColumns, partitionColumns, conf, partitionCache,
-            noColsMissingStats);
-
-        // 5. Build Hive Table Scan Rel
-        tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable,
-            rowType);
-
-        // 6. Add Schema(RR) to RelNode-Schema map
-        ImmutableMap<String, Integer> hiveToCalciteColMap = buildHiveToCalciteColumnMap(rr, tableRel);
-        relToHiveRR.put(tableRel, rr);
-        relToHiveColNameCalcitePosMap.put(tableRel, hiveToCalciteColMap);
-      } catch (Exception e) {
-        if (e instanceof SemanticException) {
-          throw (SemanticException) e;
-        } else {
-          throw (new RuntimeException(e));
-        }
-      }
-
-      return tableRel;
-    }
-
-    private RelNode genFilterRelNode(ASTNode filterExpr, RelNode srcRel) throws SemanticException {
-      ExprNodeDesc filterCondn = genExprNodeDesc(filterExpr, relToHiveRR.get(srcRel));
-      if (filterCondn instanceof ExprNodeConstantDesc &&
-        !filterCondn.getTypeString().equals(serdeConstants.BOOLEAN_TYPE_NAME)) {
-        // queries like select * from t1 where 'foo';
-        // Calcite's rule PushFilterThroughProject chokes on it. Arguably, we can insert a cast to
-        // boolean in such cases, but since Postgres, Oracle and MS SQL server fail on compile time
-        // for such queries, its an arcane corner case, not worth of adding that complexity.
-        throw new CalciteSemanticException("Filter expression with non-boolean return type.");
-      }
-      ImmutableMap<String, Integer> hiveColNameCalcitePosMap = this.relToHiveColNameCalcitePosMap
-          .get(srcRel);
-      RexNode convertedFilterExpr = new RexNodeConverter(cluster, srcRel.getRowType(),
-          hiveColNameCalcitePosMap, 0, true).convert(filterCondn);
-      RexNode factoredFilterExpr = RexUtil.pullFactors(cluster.getRexBuilder(), convertedFilterExpr);
-      RelNode filterRel = new HiveFilter(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
-          srcRel, factoredFilterExpr);
-      this.relToHiveColNameCalcitePosMap.put(filterRel, hiveColNameCalcitePosMap);
-      relToHiveRR.put(filterRel, relToHiveRR.get(srcRel));
-      relToHiveColNameCalcitePosMap.put(filterRel, hiveColNameCalcitePosMap);
-
-      return filterRel;
-    }
-
-    private RelNode genFilterRelNode(QB qb, ASTNode searchCond, RelNode srcRel,
-        Map<String, RelNode> aliasToRel, boolean forHavingClause) throws SemanticException {
-      /*
-       * Handle Subquery predicates.
-       *
-       * Notes (8/22/14 hb): Why is this a copy of the code from {@link
-       * #genFilterPlan} - for now we will support the same behavior as non CBO
-       * route. - but plan to allow nested SubQueries(Restriction.9.m) and
-       * multiple SubQuery expressions(Restriction.8.m). This requires use to
-       * utilize Calcite's Decorrelation mechanics, and for Calcite to fix/flush out
-       * Null semantics(CALCITE-373) - besides only the driving code has been
-       * copied. Most of the code which is SubQueryUtils and QBSubQuery is
-       * reused.
-       */
-      int numSrcColumns = srcRel.getRowType().getFieldCount();
-      List<ASTNode> subQueriesInOriginalTree = SubQueryUtils.findSubQueries(searchCond);
-      if (subQueriesInOriginalTree.size() > 0) {
-
-        /*
-         * Restriction.9.m :: disallow nested SubQuery expressions.
-         */
-        if (qb.getSubQueryPredicateDef() != null) {
-          throw new SemanticException(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg(
-              subQueriesInOriginalTree.get(0), "Nested SubQuery expressions are not supported."));
-        }
-
-        /*
-         * Restriction.8.m :: We allow only 1 SubQuery expression per Query.
-         */
-        if (subQueriesInOriginalTree.size() > 1) {
-
-          throw new SemanticException(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg(
-              subQueriesInOriginalTree.get(1), "Only 1 SubQuery expression is supported."));
-        }
-
-        /*
-         * Clone the Search AST; apply all rewrites on the clone.
-         */
-        ASTNode clonedSearchCond = (ASTNode) SubQueryUtils.adaptor.dupTree(searchCond);
-        List<ASTNode> subQueries = SubQueryUtils.findSubQueries(clonedSearchCond);
-
-        RowResolver inputRR = relToHiveRR.get(srcRel);
-        RowResolver outerQBRR = inputRR;
-        ImmutableMap<String, Integer> outerQBPosMap =
-            relToHiveColNameCalcitePosMap.get(srcRel);
-
-        for (int i = 0; i < subQueries.size(); i++) {
-          ASTNode subQueryAST = subQueries.get(i);
-          ASTNode originalSubQueryAST = subQueriesInOriginalTree.get(i);
-
-          int sqIdx = qb.incrNumSubQueryPredicates();
-          clonedSearchCond = SubQueryUtils.rewriteParentQueryWhere(clonedSearchCond, subQueryAST);
-
-          QBSubQuery subQuery = SubQueryUtils.buildSubQuery(qb.getId(), sqIdx, subQueryAST,
-              originalSubQueryAST, ctx);
-
-          if (!forHavingClause) {
-            qb.setWhereClauseSubQueryPredicate(subQuery);
-          } else {
-            qb.setHavingClauseSubQueryPredicate(subQuery);
-          }
-          String havingInputAlias = null;
-
-          if (forHavingClause) {
-            havingInputAlias = "gby_sq" + sqIdx;
-            aliasToRel.put(havingInputAlias, srcRel);
-          }
-
-          subQuery.validateAndRewriteAST(inputRR, forHavingClause, havingInputAlias,
-              aliasToRel.keySet());
-
-          QB qbSQ = new QB(subQuery.getOuterQueryId(), subQuery.getAlias(), true);
-          qbSQ.setSubQueryDef(subQuery.getSubQuery());
-          Phase1Ctx ctx_1 = initPhase1Ctx();
-          doPhase1(subQuery.getSubQueryAST(), qbSQ, ctx_1, null);
-          getMetaData(qbSQ);
-          RelNode subQueryRelNode = genLogicalPlan(qbSQ, false);
-          aliasToRel.put(subQuery.getAlias(), subQueryRelNode);
-          RowResolver sqRR = relToHiveRR.get(subQueryRelNode);
-
-          /*
-           * Check.5.h :: For In and Not In the SubQuery must implicitly or
-           * explicitly only contain one select item.
-           */
-          if (subQuery.getOperator().getType() != SubQueryType.EXISTS
-              && subQuery.getOperator().getType() != SubQueryType.NOT_EXISTS
-              && sqRR.getColumnInfos().size() - subQuery.getNumOfCorrelationExprsAddedToSQSelect() > 1) {
-            throw new SemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(subQueryAST,
-                "SubQuery can contain only 1 item in Select List."));
-          }
-
-          /*
-           * If this is a Not In SubQuery Predicate then Join in the Null Check
-           * SubQuery. See QBSubQuery.NotInCheck for details on why and how this
-           * is constructed.
-           */
-          if (subQuery.getNotInCheck() != null) {
-            QBSubQuery.NotInCheck notInCheck = subQuery.getNotInCheck();
-            notInCheck.setSQRR(sqRR);
-            QB qbSQ_nic = new QB(subQuery.getOuterQueryId(), notInCheck.getAlias(), true);
-            qbSQ_nic.setSubQueryDef(notInCheck.getSubQuery());
-            ctx_1 = initPhase1Ctx();
-            doPhase1(notInCheck.getSubQueryAST(), qbSQ_nic, ctx_1, null);
-            getMetaData(qbSQ_nic);
-            RelNode subQueryNICRelNode = genLogicalPlan(qbSQ_nic, false);
-            aliasToRel.put(notInCheck.getAlias(), subQueryNICRelNode);
-            srcRel = genJoinRelNode(srcRel, subQueryNICRelNode,
-            // set explicitly to inner until we figure out SemiJoin use
-            // notInCheck.getJoinType(),
-                JoinType.INNER, notInCheck.getJoinConditionAST());
-            inputRR = relToHiveRR.get(srcRel);
-            if (forHavingClause) {
-              aliasToRel.put(havingInputAlias, srcRel);
-            }
-          }
-
-          /*
-           * Gen Join between outer Operator and SQ op
-           */
-          subQuery.buildJoinCondition(inputRR, sqRR, forHavingClause, havingInputAlias);
-          srcRel = genJoinRelNode(srcRel, subQueryRelNode, subQuery.getJoinType(),
-              subQuery.getJoinConditionAST());
-          searchCond = subQuery.updateOuterQueryFilter(clonedSearchCond);
-
-          srcRel = genFilterRelNode(searchCond, srcRel);
-
-          /*
-           * For Not Exists and Not In, add a projection on top of the Left
-           * Outer Join.
-           */
-          if (subQuery.getOperator().getType() != SubQueryType.NOT_EXISTS
-              || subQuery.getOperator().getType() != SubQueryType.NOT_IN) {
-            srcRel = projectLeftOuterSide(srcRel, numSrcColumns);
-          }
-        }
-        relToHiveRR.put(srcRel, outerQBRR);
-        relToHiveColNameCalcitePosMap.put(srcRel, outerQBPosMap);
-        return srcRel;
-      }
-
-      return genFilterRelNode(searchCond, srcRel);
-    }
-
-    private RelNode projectLeftOuterSide(RelNode srcRel, int numColumns) throws SemanticException {
-      RowResolver iRR = relToHiveRR.get(srcRel);
-      RowResolver oRR = new RowResolver();
-      RowResolver.add(oRR, iRR, numColumns);
-
-      List<RexNode> calciteColLst = new ArrayList<RexNode>();
-      List<String> oFieldNames = new ArrayList<String>();
-      RelDataType iType = srcRel.getRowType();
-
-      for (int i = 0; i < iType.getFieldCount(); i++) {
-        RelDataTypeField fType = iType.getFieldList().get(i);
-        String fName = iType.getFieldNames().get(i);
-        calciteColLst.add(cluster.getRexBuilder().makeInputRef(fType.getType(), i));
-        oFieldNames.add(fName);
-      }
-
-      HiveRelNode selRel = HiveProject.create(srcRel, calciteColLst, oFieldNames);
-
-      this.relToHiveColNameCalcitePosMap.put(selRel, buildHiveToCalciteColumnMap(oRR, selRel));
-      this.relToHiveRR.put(selRel, oRR);
-      return selRel;
-    }
-
-    private RelNode genFilterLogicalPlan(QB qb, RelNode srcRel, Map<String, RelNode> aliasToRel,
-        boolean forHavingClause) throws SemanticException {
-      RelNode filterRel = null;
-
-      Iterator<ASTNode> whereClauseIterator = getQBParseInfo(qb).getDestToWhereExpr().values()
-          .iterator();
-      if (whereClauseIterator.hasNext()) {
-        filterRel = genFilterRelNode(qb, (ASTNode) whereClauseIterator.next().getChild(0), srcRel,
-            aliasToRel, forHavingClause);
-      }
-
-      return filterRel;
-    }
-
-    /**
-     * Class to store GenericUDAF related information.
-     */
-    private class AggInfo {
-      private final List<ExprNodeDesc> m_aggParams;
-      private final TypeInfo           m_returnType;
-      private final String             m_udfName;
-      private final boolean            m_distinct;
-
-      private AggInfo(List<ExprNodeDesc> aggParams, TypeInfo returnType, String udfName,
-          boolean isDistinct) {
-        m_aggParams = aggParams;
-        m_returnType = returnType;
-        m_udfName = udfName;
-        m_distinct = isDistinct;
-      }
-    }
-
-    private AggregateCall convertGBAgg(AggInfo agg, RelNode input, List<RexNode> gbChildProjLst,
-        RexNodeConverter converter, HashMap<String, Integer> rexNodeToPosMap,
-        Integer childProjLstIndx) throws SemanticException {
-
-      // 1. Get agg fn ret type in Calcite
-      RelDataType aggFnRetType = TypeConverter.convert(agg.m_returnType,
-          this.cluster.getTypeFactory());
-
-      // 2. Convert Agg Fn args and type of args to Calcite
-      // TODO: Does HQL allows expressions as aggregate args or can it only be
-      // projections from child?
-      Integer inputIndx;
-      List<Integer> argList = new ArrayList<Integer>();
-      RexNode rexNd = null;
-      RelDataTypeFactory dtFactory = this.cluster.getTypeFactory();
-      ImmutableList.Builder<RelDataType> aggArgRelDTBldr = new ImmutableList.Builder<RelDataType>();
-      for (ExprNodeDesc expr : agg.m_aggParams) {
-        rexNd = converter.convert(expr);
-        inputIndx = rexNodeToPosMap.get(rexNd.toString());
-        if (inputIndx == null) {
-          gbChildProjLst.add(rexNd);
-          rexNodeToPosMap.put(rexNd.toString(), childProjLstIndx);
-          inputIndx = childProjLstIndx;
-          childProjLstIndx++;
-        }
-        argList.add(inputIndx);
-
-        // TODO: does arg need type cast?
-        aggArgRelDTBldr.add(TypeConverter.convert(expr.getTypeInfo(), dtFactory));
-      }
-
-      // 3. Get Aggregation FN from Calcite given name, ret type and input arg
-      // type
-      final SqlAggFunction aggregation = SqlFunctionConverter.getCalciteAggFn(agg.m_udfName,
-          aggArgRelDTBldr.build(), aggFnRetType);
-
-      return new AggregateCall(aggregation, agg.m_distinct, argList, aggFnRetType, null);
-    }
-
-    private RelNode genGBRelNode(List<ExprNodeDesc> gbExprs, List<AggInfo> aggInfoLst,
-        RelNode srcRel) throws SemanticException {
-      RowResolver gbInputRR = this.relToHiveRR.get(srcRel);
-      ImmutableMap<String, Integer> posMap = this.relToHiveColNameCalcitePosMap.get(srcRel);
-      RexNodeConverter converter = new RexNodeConverter(this.cluster, srcRel.getRowType(),
-          posMap, 0, false);
-
-      final List<RexNode> gbChildProjLst = Lists.newArrayList();
-      final HashMap<String, Integer> rexNodeToPosMap = new HashMap<String, Integer>();
-      final List<Integer> groupSetPositions = Lists.newArrayList();
-      Integer gbIndx = 0;
-      RexNode rnd;
-      for (ExprNodeDesc key : gbExprs) {
-        rnd = converter.convert(key);
-        gbChildProjLst.add(rnd);
-        groupSetPositions.add(gbIndx);
-        rexNodeToPosMap.put(rnd.toString(), gbIndx);
-        gbIndx++;
-      }
-      final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions);
-
-      List<AggregateCall> aggregateCalls = Lists.newArrayList();
-      int i = aggInfoLst.size();
-      for (AggInfo agg : aggInfoLst) {
-        aggregateCalls.add(convertGBAgg(agg, srcRel, gbChildProjLst, converter, rexNodeToPosMap,
-            gbChildProjLst.size()));
-      }
-
-      if (gbChildProjLst.isEmpty()) {
-        // This will happen for count(*), in such cases we arbitarily pick
-        // first element from srcRel
-        gbChildProjLst.add(this.cluster.getRexBuilder().makeInputRef(srcRel, 0));
-      }
-      RelNode gbInputRel = HiveProject.create(srcRel, gbChildProjLst, null);
-
-      HiveRelNode aggregateRel = null;
-      try {
-        aggregateRel = new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
-            gbInputRel, false, groupSet, null, aggregateCalls);
-      } catch (InvalidRelException e) {
-        throw new SemanticException(e);
-      }
-
-      return aggregateRel;
-    }
-
-    private void addAlternateGByKeyMappings(ASTNode gByExpr, ColumnInfo colInfo,
-        RowResolver gByInputRR, RowResolver gByRR) {
-      if (gByExpr.getType() == HiveParser.DOT
-          && gByExpr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL) {
-        String tab_alias = BaseSemanticAnalyzer.unescapeIdentifier(gByExpr.getChild(0).getChild(0)
-            .getText());
-        String col_alias = BaseSemanticAnalyzer.unescapeIdentifier(gByExpr.getChild(1).getText());
-        gByRR.put(tab_alias, col_alias, colInfo);
-      } else if (gByExpr.getType() == HiveParser.TOK_TABLE_OR_COL) {
-        String col_alias = BaseSemanticAnalyzer.unescapeIdentifier(gByExpr.getChild(0).getText());
-        String tab_alias = null;
-        /*
-         * If the input to the GBy has a tab alias for the column, then add an
-         * entry based on that tab_alias. For e.g. this query: select b.x,
-         * count(*) from t1 b group by x needs (tab_alias=b, col_alias=x) in the
-         * GBy RR. tab_alias=b comes from looking at the RowResolver that is the
-         * ancestor before any GBy/ReduceSinks added for the GBY operation.
-         */
-        try {
-          ColumnInfo pColInfo = gByInputRR.get(tab_alias, col_alias);
-          tab_alias = pColInfo == null ? null : pColInfo.getTabAlias();
-        } catch (SemanticException se) {
-        }
-        gByRR.put(tab_alias, col_alias, colInfo);
-      }
-    }
-
-    private void addToGBExpr(RowResolver groupByOutputRowResolver,
-        RowResolver groupByInputRowResolver, ASTNode grpbyExpr, ExprNodeDesc grpbyExprNDesc,
-        List<ExprNodeDesc> gbExprNDescLst, List<String> outputColumnNames) {
-      // TODO: Should we use grpbyExprNDesc.getTypeInfo()? what if expr is
-      // UDF
-      int i = gbExprNDescLst.size();
-      String field = getColumnInternalName(i);
-      outputColumnNames.add(field);
-      gbExprNDescLst.add(grpbyExprNDesc);
-
-      ColumnInfo oColInfo = new ColumnInfo(field, grpbyExprNDesc.getTypeInfo(), null, false);
-      groupByOutputRowResolver.putExpression(grpbyExpr, oColInfo);
-
-      addAlternateGByKeyMappings(grpbyExpr, oColInfo, groupByInputRowResolver,
-          groupByOutputRowResolver);
-    }
-
-    private AggInfo getHiveAggInfo(ASTNode aggAst, int aggFnLstArgIndx, RowResolver inputRR)
-        throws SemanticException {
-      AggInfo aInfo = null;
-
-      // 1 Convert UDAF Params to ExprNodeDesc
-      ArrayList<ExprNodeDesc> aggParameters = new ArrayList<ExprNodeDesc>();
-      for (int i = 1; i <= aggFnLstArgIndx; i++) {
-        ASTNode paraExpr = (ASTNode) aggAst.getChild(i);
-        ExprNodeDesc paraExprNode = genExprNodeDesc(paraExpr, inputRR);
-        aggParameters.add(paraExprNode);
-      }
-
-      // 2. Is this distinct UDAF
-      boolean isDistinct = aggAst.getType() == HiveParser.TOK_FUNCTIONDI;
-
-      // 3. Determine type of UDAF
-      TypeInfo udafRetType = null;
-
-      // 3.1 Obtain UDAF name
-      String aggName = unescapeIdentifier(aggAst.getChild(0).getText());
-
-      // 3.2 Rank functions type is 'int'/'double'
-      if (FunctionRegistry.isRankingFunction(aggName)) {
-        if (aggName.equalsIgnoreCase("percent_rank"))
-          udafRetType = TypeInfoFactory.doubleTypeInfo;
-        else
-          udafRetType = TypeInfoFactory.intTypeInfo;
-      } else {
-        // 3.3 Try obtaining UDAF evaluators to determine the ret type
-        try {
-          boolean isAllColumns = aggAst.getType() == HiveParser.TOK_FUNCTIONSTAR;
-
-          // 3.3.1 Get UDAF Evaluator
-          Mode amode = groupByDescModeToUDAFMode(GroupByDesc.Mode.COMPLETE, isDistinct);
-
-          GenericUDAFEvaluator genericUDAFEvaluator = null;
-          if (aggName.toLowerCase().equals(FunctionRegistry.LEAD_FUNC_NAME)
-              || aggName.toLowerCase().equals(FunctionRegistry.LAG_FUNC_NAME)) {
-            ArrayList<ObjectInspector> originalParameterTypeInfos =
-                getWritableObjectInspector(aggParameters);
-            genericUDAFEvaluator =
-                FunctionRegistry.getGenericWindowingEvaluator(aggName,
-                    originalParameterTypeInfos, isDistinct, isAllColumns);
-            GenericUDAFInfo udaf = getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters);
-            udafRetType = ((ListTypeInfo)udaf.returnType).getListElementTypeInfo();
-          } else {
-            genericUDAFEvaluator = getGenericUDAFEvaluator(aggName,
-              aggParameters, aggAst, isDistinct, isAllColumns);
-            assert (genericUDAFEvaluator != null);
-
-            // 3.3.2 Get UDAF Info using UDAF Evaluator
-            GenericUDAFInfo udaf = getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters);
-            udafRetType = udaf.returnType;
-          }
-        } catch (Exception e) {
-          LOG.debug("CBO: Couldn't Obtain UDAF evaluators for " + aggName

[... 1085 lines stripped ...]


Mime
View raw message