hive-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "ASF GitHub Bot (JIRA)" <j...@apache.org>
Subject [jira] [Work logged] (HIVE-21382) Group by keys reduction optimization - keys are not reduced in query23
Date Wed, 13 Mar 2019 18:52:02 GMT

     [ https://issues.apache.org/jira/browse/HIVE-21382?focusedWorklogId=212606&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-212606
]

ASF GitHub Bot logged work on HIVE-21382:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 13/Mar/19 18:51
            Start Date: 13/Mar/19 18:51
    Worklog Time Spent: 10m 
      Work Description: jcamachor commented on pull request #567: HIVE-21382: Group by keys
reduction optimization - keys are not reduced in query23
URL: https://github.com/apache/hive/pull/567#discussion_r265274642
 
 

 ##########
 File path: ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
 ##########
 @@ -315,48 +322,128 @@ private boolean isRexLiteral(final RexNode rexNode) {
   }
 
 
+  private static class TableRefFinder extends RexVisitorImpl<Void> {
+    private Set<RexTableInputRef> tableRefs = null;
+    TableRefFinder() {
+      super(true);
+      this.tableRefs = new HashSet<>();
+    }
+
+    public Set<RexTableInputRef> getTableRefs() {
+      return this.tableRefs;
+    }
+
+    @Override
+    public Void visitTableInputRef(RexTableInputRef ref) {
+      this.tableRefs.add(ref);
+      return null;
+    }
+  }
+
   // Given a groupset this tries to find out if the cardinality of the grouping columns could
have changed
   // because if not and it consist of keys (unique + not null OR pk), we can safely remove
rest of the columns
   // if those are columns are not being used further up
   private ImmutableBitSet generateGroupSetIfCardinalitySame(final Aggregate aggregate,
                                         final ImmutableBitSet originalGroupSet, final ImmutableBitSet
fieldsUsed) {
-    Pair<RelOptTable, List<Integer>> tabToOrgCol = HiveRelOptUtil.getColumnOriginSet(aggregate.getInput(),
-                                                                                     originalGroupSet);
-    if(tabToOrgCol == null) {
-      return originalGroupSet;
-    }
-    RelOptHiveTable tbl = (RelOptHiveTable)tabToOrgCol.left;
-    List<Integer> backtrackedGBList = tabToOrgCol.right;
-    ImmutableBitSet backtrackedGBSet = ImmutableBitSet.builder().addAll(backtrackedGBList).build();
 
-    List<ImmutableBitSet> allKeys = tbl.getNonNullableKeys();
-    ImmutableBitSet currentKey = null;
-    for(ImmutableBitSet key:allKeys) {
-      if(backtrackedGBSet.contains(key)) {
-        // only if grouping sets consist of keys
-        currentKey = key;
-        break;
+    RexBuilder rexBuilder = aggregate.getCluster().getRexBuilder();
+    RelMetadataQuery mq = aggregate.getCluster().getMetadataQuery();
+
+    Iterator<Integer> iterator = originalGroupSet.iterator();
+    Map<Pair<RelOptTable, Integer>, Pair<List<Integer>, List<Integer>>>
mapGBKeysLineage= new HashMap<>();
+
+    Map<Pair<RelOptTable, Integer>, List<Integer>> candidateKeys = new
HashMap<>();
+
+    while(iterator.hasNext()) {
+      Integer key = iterator.next();
+      RexNode inputRef = rexBuilder.makeInputRef(aggregate.getInput(), key.intValue());
+      Set<RexNode> exprLineage = mq.getExpressionLineage(aggregate, inputRef);
+      if(exprLineage != null && exprLineage.size() == 1){
+        RexNode expr = exprLineage.iterator().next();
+        if(expr instanceof RexTableInputRef) {
+          RexTableInputRef tblRef = (RexTableInputRef)expr;
+          Pair<RelOptTable, Integer> baseTable = Pair.of(tblRef.getTableRef().getTable(),
tblRef.getTableRef().getEntityNumber());
+          if(mapGBKeysLineage.containsKey(baseTable)) {
+            List<Integer> baseCol = mapGBKeysLineage.get(baseTable).left;
+            baseCol.add(tblRef.getIndex());
+            List<Integer> gbKey = mapGBKeysLineage.get(baseTable).right;
+            gbKey.add(key);
+          } else {
+            List<Integer> baseCol = new ArrayList<>();
+            baseCol.add(tblRef.getIndex());
+            List<Integer> gbKey = new ArrayList<>();
+            gbKey.add(key);
+            mapGBKeysLineage.put(baseTable, Pair.of(baseCol, gbKey));
+          }
+        } else if(RexUtil.isDeterministic(expr)){
+          // even though we weren't able to backtrack this key it could still be candidate
for removal
+          // if rest of the columns contain pk/unique
+          TableRefFinder finder = new TableRefFinder();
+          expr.accept(finder);
+          Set<RexTableInputRef> tableRefs = finder.getTableRefs();
+          if(tableRefs.size() == 1) {
+            RexTableInputRef tblRef = tableRefs.iterator().next();
+            Pair<RelOptTable, Integer> baseTable = Pair.of(tblRef.getTableRef().getTable(),
tblRef.getTableRef().getEntityNumber());
+            if(candidateKeys.containsKey(baseTable)) {
+              List<Integer> candidateGBKeys = candidateKeys.get(baseTable);
+              candidateGBKeys.add(key);
+            } else {
+              List<Integer> candidateGBKeys =  new ArrayList<>();
+              candidateGBKeys.add(key);
+              candidateKeys.put(baseTable, candidateGBKeys);
+            }
+          }
+        }
       }
     }
-    if(currentKey == null || currentKey.isEmpty()) {
-      return originalGroupSet;
-    }
 
     // we want to delete all columns in original GB set except the key
     ImmutableBitSet.Builder builder = ImmutableBitSet.builder();
 
-    // we have established that this gb set contains keys and it is safe to remove rest of
the columns
-    for(int i=0; i<backtrackedGBList.size(); i++) {
-      Integer backtrackedCol = backtrackedGBList.get(i);
-      int orgCol = originalGroupSet.nth(i);
-      if(fieldsUsed.get((orgCol))
-          || currentKey.get(backtrackedCol)) {
-        // keep the columns which are being used or are part of keys
-        builder.set(orgCol);
+    for(Map.Entry<Pair<RelOptTable, Integer>, Pair<List<Integer>, List<Integer>>>
entry:mapGBKeysLineage.entrySet()) {
+      RelOptHiveTable tbl = (RelOptHiveTable)entry.getKey().left;
+      List<Integer> backtrackedGBList = entry.getValue().left;
+      List<Integer> gbKeys = entry.getValue().right;
+
+      ImmutableBitSet backtrackedGBSet = ImmutableBitSet.builder().addAll(backtrackedGBList).build();
+
+      List<ImmutableBitSet> allKeys = tbl.getNonNullableKeys();
+      ImmutableBitSet currentKey = null;
+      for(ImmutableBitSet key:allKeys) {
+        if(backtrackedGBSet.contains(key)) {
+          // only if grouping sets consist of keys
+          currentKey = key;
+          break;
+        }
+      }
+      if(currentKey == null || currentKey.isEmpty()) {
+        continue;
+      }
+
+      // we have established that this gb set contains keys and it is safe to remove rest
of the columns
+      for(int i=0; i<backtrackedGBList.size(); i++) {
+        Integer backtrackedCol = backtrackedGBList.get(i);
+        int orgCol = gbKeys.get(i);
+        if(!fieldsUsed.get((orgCol))
+            && !currentKey.get(backtrackedCol)) {
+          // keep the columns which are being used or are part of keys
 
 Review comment:
   Comment probably needs to be updated, it does not seem to in accordance with the if clause
condition.
 
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 212606)

> Group by keys reduction optimization - keys are not reduced in query23
> ----------------------------------------------------------------------
>
>                 Key: HIVE-21382
>                 URL: https://issues.apache.org/jira/browse/HIVE-21382
>             Project: Hive
>          Issue Type: Improvement
>            Reporter: Vineet Garg
>            Assignee: Vineet Garg
>            Priority: Major
>              Labels: pull-request-available
>         Attachments: HIVE-21382.1.patch, HIVE-21382.2.patch, HIVE-21382.2.patch, HIVE-21382.3.patch
>
>          Time Spent: 1h
>  Remaining Estimate: 0h
>
> {code:sql}
> explain cbo with frequent_ss_items as 
>  (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*)
cnt
>   from store_sales
>       ,date_dim 
>       ,item
>   where ss_sold_date_sk = d_date_sk
>     and ss_item_sk = i_item_sk 
>     and d_year in (1999,1999+1,1999+2,1999+3)
>   group by substr(i_item_desc,1,30),i_item_sk,d_date
>   having count(*) >4)
> select  sum(sales)
>  from ((select cs_quantity*cs_list_price sales
>        from catalog_sales
>            ,date_dim 
>        where d_year = 1999 
>          and d_moy = 1 
>          and cs_sold_date_sk = d_date_sk 
>          and cs_item_sk in (select item_sk from frequent_ss_items))) subq limit 100;
> {code}
> {code:sql}
> HiveSortLimit(fetch=[100])
>   HiveProject($f0=[$0])
>     HiveAggregate(group=[{}], agg#0=[sum($0)])
>       HiveProject(sales=[*(CAST($2):DECIMAL(10, 0), $3)])
>         HiveSemiJoin(condition=[=($1, $5)], joinType=[inner])
>           HiveJoin(condition=[=($0, $4)], joinType=[inner], algorithm=[none], cost=[{2.0
rows, 0.0 cpu, 0.0 io}])
>             HiveProject(cs_sold_date_sk=[$0], cs_item_sk=[$15], cs_quantity=[$18], cs_list_price=[$20])
>               HiveFilter(condition=[IS NOT NULL($0)])
>                 HiveTableScan(table=[[perf_constraints, catalog_sales]], table:alias=[catalog_sales])
>             HiveProject(d_date_sk=[$0])
>               HiveFilter(condition=[AND(=($6, 1999), =($8, 1))])
>                 HiveTableScan(table=[[perf_constraints, date_dim]], table:alias=[date_dim])
>           HiveProject(i_item_sk=[$1])
>             HiveFilter(condition=[>($3, 4)])
>               HiveProject(substr=[$2], i_item_sk=[$1], d_date=[$0], $f3=[$3])
>                 HiveAggregate(group=[{3, 4, 5}], agg#0=[count()])
>                   HiveJoin(condition=[=($1, $4)], joinType=[inner], algorithm=[none],
cost=[{2.0 rows, 0.0 cpu, 0.0 io}])
>                     HiveJoin(condition=[=($0, $2)], joinType=[inner], algorithm=[none],
cost=[{2.0 rows, 0.0 cpu, 0.0 io}])
>                       HiveProject(ss_sold_date_sk=[$0], ss_item_sk=[$2])
>                         HiveFilter(condition=[IS NOT NULL($0)])
>                           HiveTableScan(table=[[perf_constraints, store_sales]], table:alias=[store_sales])
>                       HiveProject(d_date_sk=[$0], d_date=[$2])
>                         HiveFilter(condition=[IN($6, 1999, 2000, 2001, 2002)])
>                           HiveTableScan(table=[[perf_constraints, date_dim]], table:alias=[date_dim])
>                     HiveProject(i_item_sk=[$0], substr=[substr($4, 1, 30)])
>                       HiveTableScan(table=[[perf_constraints, item]], table:alias=[item])
> {code}
> Right side of HiveSemiJoin has an aggregate which could be reduce to have only {{i_item_sk}}
as group by key since {{i_item_sk}} is primary key.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Mime
View raw message