hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From prasan...@apache.org
Subject [09/52] [abbrv] hive git commit: HIVE-10546 : genFileSinkPlan should use the generated SEL's RR for the partition col of FS (Pengcheng Xiong via Ashutosh Chauhan)
Date Thu, 07 May 2015 01:21:03 GMT
HIVE-10546 : genFileSinkPlan should use the generated SEL's RR for the partition col of FS
(Pengcheng Xiong via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <hashutosh@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/50704eec
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/50704eec
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/50704eec

Branch: refs/heads/llap
Commit: 50704eec0aaaf865532e2cdbbd9682a86288fc51
Parents: 59ecdd3
Author: Pengcheng Xiong <pxiong@hortonworks.com>
Authored: Wed Apr 29 21:36:00 2015 -0700
Committer: Ashutosh Chauhan <hashutosh@apache.org>
Committed: Fri May 1 12:09:17 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 42 +++++++++++++++++---
 1 file changed, 36 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/50704eec/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 1d2c764..dec0e38 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6043,7 +6043,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     boolean enforceBucketing = false;
     boolean enforceSorting = false;
     ArrayList<ExprNodeDesc> partnCols = new ArrayList<ExprNodeDesc>();
-    ArrayList<ExprNodeDesc> partnColsNoConvert = new ArrayList<ExprNodeDesc>();
     ArrayList<ExprNodeDesc> sortCols = new ArrayList<ExprNodeDesc>();
     ArrayList<Integer> sortOrders = new ArrayList<Integer>();
     boolean multiFileSpray = false;
@@ -6055,11 +6054,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       enforceBucketing = true;
       if (updating() || deleting()) {
         partnCols = getPartitionColsFromBucketColsForUpdateDelete(input, true);
-        partnColsNoConvert = getPartitionColsFromBucketColsForUpdateDelete(input, false);
       } else {
         partnCols = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input,
true);
-        partnColsNoConvert = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc,
input,
-            false);
       }
     }
 
@@ -6071,7 +6067,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       sortOrders = getSortOrders(dest, qb, dest_tab, input);
       if (!enforceBucketing) {
         partnCols = sortCols;
-        partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false);
       }
     }
 
@@ -6107,12 +6102,41 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       reduceSinkOperatorsAddedByEnforceBucketingSorting.add((ReduceSinkOperator)input.getParentOperators().get(0));
       ctx.setMultiFileSpray(multiFileSpray);
       ctx.setNumFiles(numFiles);
-      ctx.setPartnCols(partnColsNoConvert);
       ctx.setTotalFiles(totalFiles);
     }
     return input;
   }
 
+  private void genPartnCols(String dest, Operator input, QB qb,
+      TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException
{
+    boolean enforceBucketing = false;
+    boolean enforceSorting = false;
+    ArrayList<ExprNodeDesc> partnColsNoConvert = new ArrayList<ExprNodeDesc>();
+
+    if ((dest_tab.getNumBuckets() > 0) &&
+        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+      enforceBucketing = true;
+      if (updating() || deleting()) {
+        partnColsNoConvert = getPartitionColsFromBucketColsForUpdateDelete(input, false);
+      } else {
+        partnColsNoConvert = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc,
input,
+            false);
+      }
+    }
+
+    if ((dest_tab.getSortCols() != null) &&
+        (dest_tab.getSortCols().size() > 0) &&
+        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTING))) {
+      enforceSorting = true;
+      if (!enforceBucketing) {
+        partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false);
+      }
+    }
+
+    if (enforceBucketing || enforceSorting) {
+      ctx.setPartnCols(partnColsNoConvert);
+    }
+  }
   /**
    * Check for HOLD_DDLTIME hint.
    *
@@ -6556,6 +6580,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     // If this table is working with ACID semantics, turn off merging
     canBeMerged &= !destTableIsAcid;
 
+    // Generate the partition columns from the parent input
+    if (dest_type.intValue() == QBMetaData.DEST_TABLE
+        || dest_type.intValue() == QBMetaData.DEST_PARTITION) {
+      genPartnCols(dest, input, qb, table_desc, dest_tab, rsCtx);
+    }
+
     FileSinkDesc fileSinkDesc = new FileSinkDesc(
       queryTmpdir,
       table_desc,


Mime
View raw message