hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject svn commit: r1617743 - in /hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql: optimizer/optiq/RelOptHiveTable.java optimizer/optiq/translator/RelNodeConverter.java parse/SemanticAnalyzer.java
Date Wed, 13 Aug 2014 15:53:04 GMT
Author: hashutosh
Date: Wed Aug 13 15:53:03 2014
New Revision: 1617743

URL: http://svn.apache.org/r1617743
Log:
HIVE-7703 : Dont recompute partition list between optiq and hive planning (Ashutosh Chauhan
via Harish Butani)

Modified:
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java?rev=1617743&r1=1617742&r2=1617743&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java
(original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java
Wed Aug 13 15:53:03 2014
@@ -48,6 +48,7 @@ public class RelOptHiveTable extends Rel
   Map<Integer, ColStatistics>                     m_hiveColStatsMap = new HashMap<Integer,
ColStatistics>();
   private Integer                                 m_numPartitions;
   PrunedPartitionList                             partitionList;
+  Map<String, PrunedPartitionList>                partitionCache;
 
   protected static final Log                      LOG               = LogFactory
                                                                         .getLog(RelOptHiveTable.class
@@ -55,7 +56,7 @@ public class RelOptHiveTable extends Rel
 
   public RelOptHiveTable(RelOptSchema optiqSchema, String name, RelDataType rowType,
       Table hiveTblMetadata, List<ColumnInfo> hiveNonPartitionCols,
-      List<ColumnInfo> hivePartitionCols, HiveConf hconf) {
+      List<ColumnInfo> hivePartitionCols, HiveConf hconf, Map<String, PrunedPartitionList>
partitionCache) {
     super(optiqSchema, name, rowType);
     m_hiveTblMetadata = hiveTblMetadata;
     m_hiveNonPartitionCols = ImmutableList.copyOf(hiveNonPartitionCols);
@@ -63,6 +64,7 @@ public class RelOptHiveTable extends Rel
     m_hivePartitionColsMap = getColInfoMap(hivePartitionCols, m_hiveNonPartitionColsMap.size());
     m_noOfProjs = hiveNonPartitionCols.size() + hivePartitionCols.size();
     m_hiveConf = hconf;
+    this.partitionCache = partitionCache;
   }
 
   private static ImmutableMap<Integer, ColumnInfo> getColInfoMap(List<ColumnInfo>
hiveCols,
@@ -96,25 +98,13 @@ public class RelOptHiveTable extends Rel
   @Override
   public double getRowCount() {
     if (m_rowCount == -1) {
-
       if (m_hiveTblMetadata.isPartitioned()) {
-        if (partitionList == null) {
-          try {
-            List<Partition> parts = new ArrayList<Partition>(
-                Hive.get().getAllPartitionsOf(m_hiveTblMetadata));
+        computePartitionList(m_hiveConf, null);
             List<Long> rowCounts = StatsUtils.getBasicStatForPartitions(
-                m_hiveTblMetadata, parts, StatsSetupConst.ROW_COUNT);
-            m_rowCount = StatsUtils.getSumIgnoreNegatives(rowCounts);
-
-          } catch (HiveException he) {
-            throw new RuntimeException(he);
-          }
-        } else {
-          List<Long> rowCounts = StatsUtils.getBasicStatForPartitions(
               m_hiveTblMetadata, partitionList.getNotDeniedPartns(),
               StatsSetupConst.ROW_COUNT);
           m_rowCount = StatsUtils.getSumIgnoreNegatives(rowCounts);
-        }
+
       } else {
         m_rowCount = StatsUtils.getNumRows(m_hiveTblMetadata);
       }
@@ -152,16 +142,14 @@ public class RelOptHiveTable extends Rel
     try {
       if (pruneNode == null || InputFinder.bits(pruneNode).length() == 0 ) {
         // there is no predicate on partitioning column, we need all partitions in this case.
-        partitionList = PartitionPruner.prune(m_hiveTblMetadata, null, conf, getName(),
-            new HashMap<String, PrunedPartitionList>());
+        partitionList = PartitionPruner.prune(m_hiveTblMetadata, null, conf, getName(), partitionCache);
         return;
       }
 
       // We have valid pruning expressions, only retrieve qualifying partitions
       ExprNodeDesc pruneExpr = pruneNode.accept(new ExprNodeConverter(getName(), getRowType(),
true));
 
-      partitionList = PartitionPruner.prune(m_hiveTblMetadata, pruneExpr, conf, getName(),
-          new HashMap<String, PrunedPartitionList>());
+      partitionList = PartitionPruner.prune(m_hiveTblMetadata, pruneExpr, conf, getName(),
partitionCache);
     } catch (HiveException he) {
       throw new RuntimeException(he);
     }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java?rev=1617743&r1=1617742&r2=1617743&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java
(original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java
Wed Aug 13 15:53:03 2014
@@ -87,7 +87,7 @@ public class RelNodeConverter {
                                                             .<String, Aggregation>
builder()
                                                             .put(
                                                                 "count",
-                                                                (Aggregation) SqlStdOperatorTable.COUNT)
+                                                                SqlStdOperatorTable.COUNT)
                                                             .put("sum", SqlStdOperatorTable.SUM)
                                                             .put("min", SqlStdOperatorTable.MIN)
                                                             .put("max", SqlStdOperatorTable.MAX)
@@ -254,6 +254,7 @@ public class RelNodeConverter {
   }
 
   static class JoinProcessor implements NodeProcessor {
+    @Override
     @SuppressWarnings("unchecked")
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
@@ -408,6 +409,7 @@ public class RelNodeConverter {
   }
 
   static class FilterProcessor implements NodeProcessor {
+    @Override
     @SuppressWarnings("unchecked")
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
@@ -434,6 +436,7 @@ public class RelNodeConverter {
   }
 
   static class SelectProcessor implements NodeProcessor {
+    @Override
     @SuppressWarnings("unchecked")
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
@@ -454,6 +457,7 @@ public class RelNodeConverter {
        */
       List<String> oFieldNames = Lists.transform(selectOp.getConf().getOutputColumnNames(),
           new Function<String, String>() {
+            @Override
             public String apply(String hName) {
               return "_o_" + hName;
             }
@@ -467,6 +471,7 @@ public class RelNodeConverter {
   }
 
   static class LimitProcessor implements NodeProcessor {
+    @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
       Context ctx = (Context) procCtx;
@@ -492,6 +497,7 @@ public class RelNodeConverter {
   }
 
   static class GroupByProcessor implements NodeProcessor {
+    @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
       Context ctx = (Context) procCtx;
@@ -524,6 +530,7 @@ public class RelNodeConverter {
         // noinspection unchecked
         input = HiveProjectRel.create(input, CompositeList.of(Lists.transform(input.getRowType()
             .getFieldList(), new Function<RelDataTypeField, RexNode>() {
+          @Override
           public RexNode apply(RelDataTypeField input) {
             return new RexInputRef(input.getIndex(), input.getType());
           }
@@ -542,6 +549,7 @@ public class RelNodeConverter {
   }
 
   static class ReduceSinkProcessor implements NodeProcessor {
+    @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
       Context ctx = (Context) procCtx;
@@ -581,6 +589,7 @@ public class RelNodeConverter {
         // noinspection unchecked
         input = HiveProjectRel.create(input, CompositeList.of(Lists.transform(input.getRowType()
             .getFieldList(), new Function<RelDataTypeField, RexNode>() {
+          @Override
           public RexNode apply(RelDataTypeField input) {
             return new RexInputRef(input.getIndex(), input.getType());
           }
@@ -612,6 +621,7 @@ public class RelNodeConverter {
   }
 
   static class TableScanProcessor implements NodeProcessor {
+    @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
       Context ctx = (Context) procCtx;
@@ -634,7 +644,7 @@ public class RelNodeConverter {
       }
       RelDataType rowType = TypeConverter.getType(ctx.cluster, rr, neededCols);
       RelOptHiveTable optTable = new RelOptHiveTable(ctx.schema, tableScanOp.getConf().getAlias(),
-          rowType, ctx.sA.getTable(tableScanOp), null, null, null);
+          rowType, ctx.sA.getTable(tableScanOp), null, null, null, null);
       TableAccessRelBase tableRel = new HiveTableScanRel(ctx.cluster,
           ctx.cluster.traitSetOf(HiveRel.CONVENTION), optTable, rowType);
       ctx.buildColumnMap(tableScanOp, tableRel);
@@ -673,12 +683,13 @@ public class RelNodeConverter {
   }
 
   static class DefaultProcessor implements NodeProcessor {
+    @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
       @SuppressWarnings("unchecked")
       Operator<? extends OperatorDesc> op = (Operator<? extends OperatorDesc>)
nd;
       Context ctx = (Context) procCtx;
-      RelNode node = (HiveRel) ctx.getParentNode(op, 0);
+      RelNode node = ctx.getParentNode(op, 0);
       ctx.hiveOpToRelNode.put(op, node);
       return node;
     }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1617743&r1=1617742&r2=1617743&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Wed
Aug 13 15:53:03 2014
@@ -9569,7 +9569,7 @@ public class SemanticAnalyzer extends Ba
 
       try {
         // 1. Gen Optimized AST
-        ASTNode newAST = new OptiqBasedPlanner().getOptimizedAST();
+        ASTNode newAST = new OptiqBasedPlanner().getOptimizedAST(prunedPartitions);
 
         // 2. Regen OP plan from optimized AST
         init();
@@ -11786,17 +11786,18 @@ public class SemanticAnalyzer extends Ba
   private class OptiqBasedPlanner implements Frameworks.PlannerAction<RelNode> {
     RelOptCluster                                         m_cluster;
     RelOptSchema                                          m_relOptSchema;
-    SchemaPlus                                            m_rootSchema;
     SemanticException                                     m_semanticException;
+    Map<String, PrunedPartitionList>                      partitionCache;
 
     // TODO: Do we need to keep track of RR, ColNameToPosMap for every op or
     // just last one.
     LinkedHashMap<RelNode, RowResolver>                   m_relToHiveRR           
     = new LinkedHashMap<RelNode, RowResolver>();
     LinkedHashMap<RelNode, ImmutableMap<String, Integer>> m_relToHiveColNameOptiqPosMap
= new LinkedHashMap<RelNode, ImmutableMap<String, Integer>>();
 
-    private ASTNode getOptimizedAST() throws SemanticException {
+    private ASTNode getOptimizedAST(Map<String, PrunedPartitionList> partitionCache)
throws SemanticException {
       ASTNode optiqOptimizedAST = null;
       RelNode optimizedOptiqPlan = null;
+      this.partitionCache = partitionCache;
 
       try {
         optimizedOptiqPlan = Frameworks.withPlanner(this);
@@ -11827,7 +11828,6 @@ public class SemanticAnalyzer extends Ba
 
       m_cluster = cluster;
       m_relOptSchema = relOptSchema;
-      m_rootSchema = rootSchema;
 
       try {
         optiqGenPlan = genLogicalPlan(qb);
@@ -12148,7 +12148,7 @@ public class SemanticAnalyzer extends Ba
 
         // 4. Build RelOptAbstractTable
         RelOptHiveTable optTable = new RelOptHiveTable(m_relOptSchema,
-            tableAlias, rowType, tab, nonPartitionColumns, partitionColumns, conf);
+            tableAlias, rowType, tab, nonPartitionColumns, partitionColumns, conf, partitionCache);
 
         // 5. Build Hive Table Scan Rel
         tableRel = new HiveTableScanRel(m_cluster,



Mime
View raw message