hive-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "ASF GitHub Bot (Jira)" <j...@apache.org>
Subject [jira] [Work logged] (HIVE-23716) Support Anti Join in Hive
Date Tue, 21 Jul 2020 14:58:00 GMT

     [ https://issues.apache.org/jira/browse/HIVE-23716?focusedWorklogId=461627&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-461627
]

ASF GitHub Bot logged work on HIVE-23716:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 21/Jul/20 14:57
            Start Date: 21/Jul/20 14:57
    Worklog Time Spent: 10m 
      Work Description: pgaref commented on a change in pull request #1147:
URL: https://github.com/apache/hive/pull/1147#discussion_r458162559



##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinAntiJoinLongOperator.java
##########
@@ -0,0 +1,315 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector.mapjoin;
+
+import org.apache.hadoop.hive.ql.CompilationOpContext;
+import org.apache.hadoop.hive.ql.exec.JoinUtil;
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashSet;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.ql.plan.VectorDesc;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Arrays;
+
+// TODO : Duplicate codes need to merge with semi join.
+// Single-Column Long hash table import.
+// Single-Column Long specific imports.
+
+/*
+ * Specialized class for doing a vectorized map join that is an anti join on a Single-Column
Long
+ * using a hash set.
+ */
+public class VectorMapJoinAntiJoinLongOperator extends VectorMapJoinAntiJoinGenerateResultOperator
{
+
+  private static final long serialVersionUID = 1L;
+  private static final String CLASS_NAME = VectorMapJoinAntiJoinLongOperator.class.getName();
+  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
+  protected String getLoggingPrefix() {
+    return super.getLoggingPrefix(CLASS_NAME);
+  }
+
+  // The above members are initialized by the constructor and must not be
+  // transient.
+
+  // The hash map for this specialized class.
+  private transient VectorMapJoinLongHashSet hashSet;
+
+  // Single-Column Long specific members.
+  // For integers, we have optional min/max filtering.
+  private transient boolean useMinMax;
+  private transient long min;
+  private transient long max;
+
+  // The column number for this one column join specialization.
+  private transient int singleJoinColumn;
+
+  // Pass-thru constructors.
+  /** Kryo ctor. */
+  protected VectorMapJoinAntiJoinLongOperator() {
+    super();
+  }
+
+  public VectorMapJoinAntiJoinLongOperator(CompilationOpContext ctx) {
+    super(ctx);
+  }
+
+  public VectorMapJoinAntiJoinLongOperator(CompilationOpContext ctx, OperatorDesc conf,
+                                           VectorizationContext vContext, VectorDesc vectorDesc)
throws HiveException {
+    super(ctx, conf, vContext, vectorDesc);
+  }
+
+  // Process Single-Column Long Anti Join on a vectorized row batch.
+  @Override
+  protected void commonSetup() throws HiveException {
+    super.commonSetup();
+
+    // Initialize Single-Column Long members for this specialized class.
+    singleJoinColumn = bigTableKeyColumnMap[0];
+  }
+
+  @Override
+  public void hashTableSetup() throws HiveException {
+    super.hashTableSetup();
+
+    // Get our Single-Column Long hash set information for this specialized class.
+    hashSet = (VectorMapJoinLongHashSet) vectorMapJoinHashTable;
+    useMinMax = hashSet.useMinMax();
+    if (useMinMax) {
+      min = hashSet.min();
+      max = hashSet.max();
+    }
+  }
+
+  @Override
+  public void processBatch(VectorizedRowBatch batch) throws HiveException {
+
+    try {
+      // (Currently none)
+      // antiPerBatchSetup(batch);
+
+      // For anti joins, we may apply the filter(s) now.
+      for(VectorExpression ve : bigTableFilterExpressions) {
+        ve.evaluate(batch);
+      }
+
+      final int inputLogicalSize = batch.size;
+      if (inputLogicalSize == 0) {
+        return;
+      }
+
+      // Perform any key expressions.  Results will go into scratch columns.
+      if (bigTableKeyExpressions != null) {
+        for (VectorExpression ve : bigTableKeyExpressions) {
+          ve.evaluate(batch);
+        }
+      }
+
+      // The one join column for this specialized class.
+      LongColumnVector joinColVector = (LongColumnVector) batch.cols[singleJoinColumn];
+      long[] vector = joinColVector.vector;
+
+      // Check single column for repeating.
+      boolean allKeyInputColumnsRepeating = joinColVector.isRepeating;
+
+      if (allKeyInputColumnsRepeating) {
+        // All key input columns are repeating.  Generate key once.  Lookup once.
+        // Since the key is repeated, we must use entry 0 regardless of selectedInUse.
+        JoinUtil.JoinResult joinResult;
+        if (!joinColVector.noNulls && joinColVector.isNull[0]) {
+          // For anti join, if the right side is null then its a match.
+          joinResult = JoinUtil.JoinResult.MATCH;
+        } else {
+          long key = vector[0];
+          if (useMinMax && (key < min || key > max)) {
+            // Out of range for whole batch. Its a match for anti join. We can emit the row.
+            joinResult = JoinUtil.JoinResult.MATCH;
+          } else {
+            joinResult = hashSet.contains(key, hashSetResults[0]);
+            // reverse the join result for anti join.
+            if (joinResult == JoinUtil.JoinResult.NOMATCH) {
+              joinResult = JoinUtil.JoinResult.MATCH;
+            } else if (joinResult == JoinUtil.JoinResult.MATCH) {
+              joinResult = JoinUtil.JoinResult.NOMATCH;
+            }
+          }
+        }
+
+        // Common repeated join result processing.
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name());
+        }
+        finishAntiRepeated(batch, joinResult, hashSetResults[0]);
+      } else {
+        // NOT Repeating.
+
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated");
+        }
+
+        // We remember any matching rows in matches / matchSize.  At the end of the loop,
+        // selected / batch.size will represent both matching and non-matching rows for outer
join.
+        // Only deferred rows will have been removed from selected.
+        int selected[] = batch.selected;
+        boolean selectedInUse = batch.selectedInUse;
+
+        int hashSetResultCount = 0;
+        int allMatchCount = 0;
+        int spillCount = 0;
+        long saveKey = 0;
+
+        // We optimize performance by only looking up the first key in a series of equal
keys.
+        boolean haveSaveKey = false;

Review comment:
       Maybe rename to haveExistingKey? or HaveCurrentKey?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 461627)
    Time Spent: 3h 20m  (was: 3h 10m)

> Support Anti Join in Hive 
> --------------------------
>
>                 Key: HIVE-23716
>                 URL: https://issues.apache.org/jira/browse/HIVE-23716
>             Project: Hive
>          Issue Type: Bug
>            Reporter: mahesh kumar behera
>            Assignee: mahesh kumar behera
>            Priority: Major
>              Labels: pull-request-available
>         Attachments: HIVE-23716.01.patch
>
>          Time Spent: 3h 20m
>  Remaining Estimate: 0h
>
> Currently hive does not support Anti join. The query for anti join is converted to left
outer join and null filter on right side join key is added to get the desired result. This
is causing
>  # Extra computation — The left outer join projects the redundant columns from right
side. Along with that, filtering is done to remove the redundant rows. This is can be avoided
in case of anti join as anti join will project only the required columns and rows from the
left side table.
>  # Extra shuffle — In case of anti join the duplicate records moved to join node can
be avoided from the child node. This can reduce significant amount of data movement if the
number of distinct rows( join keys) is significant.
>  # Extra Memory Usage - In case of map based anti join , hash set is sufficient as just
the key is required to check  if the records matches the join condition. In case of left
join, we need the key and the non key columns also and thus a hash table will be required.
> For a query like
> {code:java}
>  select wr_order_number FROM web_returns LEFT JOIN web_sales  ON wr_order_number = ws_order_number
WHERE ws_order_number IS NULL;{code}
> The number of distinct ws_order_number in web_sales table in a typical 10TB TPCDS set
up is just 10% of total records. So when we convert this query to anti join, instead of 7
billion rows, only 600 million rows are moved to join node.
> In the current patch, just one conversion is done. The pattern of project->filter->left-join
is converted to project->anti-join. This will take care of sub queries with “not exists”
clause. The queries with “not exists” are converted first to filter + left-join and then
its converted to anti join. The queries with “not in” are not handled in the current patch.
> From execution side, both merge join and map join with vectorized execution  is supported
for anti join.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Mime
View raw message