hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vik...@apache.org
Subject svn commit: r1665379 - in /hive/trunk: itests/src/test/resources/ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/ ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/ ql/src/java/o...
Date Mon, 09 Mar 2015 23:00:47 GMT
Author: vikram
Date: Mon Mar  9 23:00:47 2015
New Revision: 1665379

URL: http://svn.apache.org/r1665379
Log:
Revert HIVE-9886: Missing files (Vikram Dixit K)

Modified:
    hive/trunk/itests/src/test/resources/testconfiguration.properties
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java

Modified: hive/trunk/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/trunk/itests/src/test/resources/testconfiguration.properties?rev=1665379&r1=1665378&r2=1665379&view=diff
==============================================================================
--- hive/trunk/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/trunk/itests/src/test/resources/testconfiguration.properties Mon Mar  9 23:00:47
2015
@@ -305,8 +305,7 @@ minitez.query.files=bucket_map_join_tez1
   tez_smb_main.q,\
   tez_smb_1.q,\
   vectorized_dynamic_partition_pruning.q,\
-  tez_multi_union.q,\
-  tez_join.q
+  tez_multi_union.q
 
 encrypted.query.files=encryption_join_unencrypted_tbl.q,\
   encryption_insert_partition_static.q,\

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java?rev=1665379&r1=1665378&r2=1665379&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java Mon
Mar  9 23:00:47 2015
@@ -111,7 +111,7 @@ public class ConvertJoinMapJoin implemen
         }
 
         if (parentOp instanceof ReduceSinkOperator) {
-          ReduceSinkOperator rs = (ReduceSinkOperator) parentOp;
+          ReduceSinkOperator rs = (ReduceSinkOperator)parentOp;
           estimatedBuckets = (estimatedBuckets < rs.getConf().getNumReducers()) ?
               rs.getConf().getNumReducers() : estimatedBuckets;
         }
@@ -133,10 +133,10 @@ public class ConvertJoinMapJoin implemen
       if (retval == null) {
         return retval;
       } else {
-        // only case is full outer join with SMB enabled which is not possible. Convert to
regular
-        // join.
-        convertJoinSMBJoin(joinOp, context, 0, 0, false, false);
-        return null;
+          // only case is full outer join with SMB enabled which is not possible. Convert
to regular
+          // join.
+          convertJoinSMBJoin(joinOp, context, 0, 0, false, false);
+          return null;
       }
     }
 
@@ -160,10 +160,8 @@ public class ConvertJoinMapJoin implemen
     }
 
     MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, mapJoinConversionPos);
-    // map join operator by default has no bucket cols and num of reduce sinks
-    // reduced by 1
-    mapJoinOp
-        .setOpTraits(new OpTraits(null, -1, null, joinOp.getOpTraits().getNumReduceSinks()));
+    // map join operator by default has no bucket cols
+    mapJoinOp.setOpTraits(new OpTraits(null, -1, null));
     mapJoinOp.setStatistics(joinOp.getStatistics());
     // propagate this change till the next RS
     for (Operator<? extends OperatorDesc> childOp : mapJoinOp.getChildOperators())
{
@@ -178,8 +176,7 @@ public class ConvertJoinMapJoin implemen
       TezBucketJoinProcCtx tezBucketJoinProcCtx) throws SemanticException {
     // we cannot convert to bucket map join, we cannot convert to
     // map join either based on the size. Check if we can convert to SMB join.
-    if ((context.conf.getBoolVar(HiveConf.ConfVars.HIVE_AUTO_SORTMERGE_JOIN) == false)
-        || (joinOp.getOpTraits().getNumReduceSinks() >= 2)) {
+    if (context.conf.getBoolVar(HiveConf.ConfVars.HIVE_AUTO_SORTMERGE_JOIN) == false) {
       convertJoinSMBJoin(joinOp, context, 0, 0, false, false);
       return null;
     }
@@ -224,7 +221,7 @@ public class ConvertJoinMapJoin implemen
       convertJoinSMBJoin(joinOp, context, pos, 0, false, false);
     }
     return null;
-  }
+}
 
   // replaces the join operator with a new CommonJoinOperator, removes the
   // parent reduce sinks
@@ -243,9 +240,9 @@ public class ConvertJoinMapJoin implemen
           new MapJoinDesc(
                   MapJoinProcessor.getKeys(joinOp.getConf().isLeftInputJoin(),
                   joinOp.getConf().getBaseSrc(), joinOp).getSecond(),
-                  null, joinDesc.getExprs(), null, null,
-                  joinDesc.getOutputColumnNames(), mapJoinConversionPos, joinDesc.getConds(),
-                  joinDesc.getFilters(), joinDesc.getNoOuterJoin(), null);
+              null, joinDesc.getExprs(), null, null,
+              joinDesc.getOutputColumnNames(), mapJoinConversionPos, joinDesc.getConds(),
+              joinDesc.getFilters(), joinDesc.getNoOuterJoin(), null);
       mapJoinDesc.setNullSafes(joinDesc.getNullSafes());
       mapJoinDesc.setFilterMap(joinDesc.getFilterMap());
       mapJoinDesc.resetOrder();
@@ -254,9 +251,9 @@ public class ConvertJoinMapJoin implemen
     CommonMergeJoinOperator mergeJoinOp =
         (CommonMergeJoinOperator) OperatorFactory.get(new CommonMergeJoinDesc(numBuckets,
             isSubQuery, mapJoinConversionPos, mapJoinDesc), joinOp.getSchema());
-    int numReduceSinks = joinOp.getOpTraits().getNumReduceSinks();
-    OpTraits opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), numBuckets,
joinOp
-        .getOpTraits().getSortCols(), numReduceSinks);
+    OpTraits opTraits =
+        new OpTraits(joinOp.getOpTraits().getBucketColNames(), numBuckets, joinOp.getOpTraits()
+            .getSortCols());
     mergeJoinOp.setOpTraits(opTraits);
     mergeJoinOp.setStatistics(joinOp.getStatistics());
 
@@ -292,7 +289,8 @@ public class ConvertJoinMapJoin implemen
 
     if (adjustParentsChildren) {
       mergeJoinOp.getConf().setGenJoinKeys(true);
-      List<Operator<? extends OperatorDesc>> newParentOpList = new ArrayList<Operator<?
extends OperatorDesc>>();
+      List<Operator<? extends OperatorDesc>> newParentOpList =
+          new ArrayList<Operator<? extends OperatorDesc>>();
       for (Operator<? extends OperatorDesc> parentOp : mergeJoinOp.getParentOperators())
{
         for (Operator<? extends OperatorDesc> grandParentOp : parentOp.getParentOperators())
{
           grandParentOp.getChildOperators().remove(parentOp);
@@ -330,8 +328,7 @@ public class ConvertJoinMapJoin implemen
     if (currentOp instanceof ReduceSinkOperator) {
       return;
     }
-    currentOp.setOpTraits(new OpTraits(null, -1, null,
-        currentOp.getOpTraits().getNumReduceSinks()));
+    currentOp.setOpTraits(new OpTraits(null, -1, null));
     for (Operator<? extends OperatorDesc> childOp : currentOp.getChildOperators())
{
       if ((childOp instanceof ReduceSinkOperator) || (childOp instanceof GroupByOperator))
{
         break;
@@ -354,7 +351,7 @@ public class ConvertJoinMapJoin implemen
 
     // we can set the traits for this join operator
     OpTraits opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(),
-        tezBucketJoinProcCtx.getNumBuckets(), null, joinOp.getOpTraits().getNumReduceSinks());
+        tezBucketJoinProcCtx.getNumBuckets(), null);
     mapJoinOp.setOpTraits(opTraits);
     mapJoinOp.setStatistics(joinOp.getStatistics());
     setNumberOfBucketsOnChildren(mapJoinOp);
@@ -380,7 +377,8 @@ public class ConvertJoinMapJoin implemen
 
     ReduceSinkOperator bigTableRS =
         (ReduceSinkOperator) joinOp.getParentOperators().get(bigTablePosition);
-    int numBuckets = bigTableRS.getParentOperators().get(0).getOpTraits().getNumBuckets();
+    int numBuckets = bigTableRS.getParentOperators().get(0).getOpTraits()
+            .getNumBuckets();
 
     // the sort and bucket cols have to match on both sides for this
     // transformation of the join operation
@@ -427,12 +425,13 @@ public class ConvertJoinMapJoin implemen
   }
 
   /*
-   * If the parent reduce sink of the big table side has the same emit key cols as its parent,
we
-   * can create a bucket map join eliminating the reduce sink.
+   * If the parent reduce sink of the big table side has the same emit key cols
+   * as its parent, we can create a bucket map join eliminating the reduce sink.
    */
   private boolean checkConvertJoinBucketMapJoin(JoinOperator joinOp,
       OptimizeTezProcContext context, int bigTablePosition,
-      TezBucketJoinProcCtx tezBucketJoinProcCtx) throws SemanticException {
+      TezBucketJoinProcCtx tezBucketJoinProcCtx)
+  throws SemanticException {
     // bail on mux-operator because mux operator masks the emit keys of the
     // constituent reduce sinks
     if (!(joinOp.getParentOperators().get(0) instanceof ReduceSinkOperator)) {
@@ -454,8 +453,8 @@ public class ConvertJoinMapJoin implemen
     }
 
     /*
-     * this is the case when the big table is a sub-query and is probably already bucketed
by the
-     * join column in say a group by operation
+     * this is the case when the big table is a sub-query and is probably
+     * already bucketed by the join column in say a group by operation
      */
     boolean isSubQuery = false;
     if (numBuckets < 0) {
@@ -493,8 +492,7 @@ public class ConvertJoinMapJoin implemen
           // all columns need to be at least a subset of the parentOfParent's bucket cols
           ExprNodeDesc exprNodeDesc = colExprMap.get(colName);
           if (exprNodeDesc instanceof ExprNodeColumnDesc) {
-            if (((ExprNodeColumnDesc) exprNodeDesc).getColumn()
-                .equals(listBucketCols.get(colCount))) {
+            if (((ExprNodeColumnDesc)exprNodeDesc).getColumn().equals(listBucketCols.get(colCount)))
{
               colCount++;
             } else {
               break;
@@ -564,13 +562,14 @@ public class ConvertJoinMapJoin implemen
 
       Statistics currInputStat = parentOp.getStatistics();
       if (currInputStat == null) {
-        LOG.warn("Couldn't get statistics from: " + parentOp);
+        LOG.warn("Couldn't get statistics from: "+parentOp);
         return -1;
       }
 
       long inputSize = currInputStat.getDataSize();
-      if ((bigInputStat == null)
-          || ((bigInputStat != null) && (inputSize > bigInputStat.getDataSize())))
{
+      if ((bigInputStat == null) ||
+          ((bigInputStat != null) &&
+          (inputSize > bigInputStat.getDataSize()))) {
 
         if (bigTableFound) {
           // cannot convert to map join; we've already chosen a big table
@@ -640,11 +639,11 @@ public class ConvertJoinMapJoin implemen
       }
     }
 
-    // can safely convert the join to a map join.
+    //can safely convert the join to a map join.
     MapJoinOperator mapJoinOp =
         MapJoinProcessor.convertJoinOpMapJoinOp(context.conf, joinOp,
-            joinOp.getConf().isLeftInputJoin(), joinOp.getConf().getBaseSrc(),
-            joinOp.getConf().getMapAliases(), bigTablePosition, true);
+                joinOp.getConf().isLeftInputJoin(), joinOp.getConf().getBaseSrc(),
+                joinOp.getConf().getMapAliases(), bigTablePosition, true);
 
     Operator<? extends OperatorDesc> parentBigTableOp =
         mapJoinOp.getParentOperators().get(bigTablePosition);
@@ -668,7 +667,7 @@ public class ConvertJoinMapJoin implemen
             parentBigTableOp.getParentOperators().get(0));
       }
       parentBigTableOp.getParentOperators().get(0).removeChild(parentBigTableOp);
-      for (Operator<? extends OperatorDesc>op : mapJoinOp.getParentOperators()) {
+      for (Operator<? extends OperatorDesc> op : mapJoinOp.getParentOperators()) {
         if (!(op.getChildOperators().contains(mapJoinOp))) {
           op.getChildOperators().add(mapJoinOp);
         }
@@ -682,7 +681,7 @@ public class ConvertJoinMapJoin implemen
   private boolean hasDynamicPartitionBroadcast(Operator<?> parent) {
     boolean hasDynamicPartitionPruning = false;
 
-    for (Operator<?> op : parent.getChildOperators()) {
+    for (Operator<?> op: parent.getChildOperators()) {
       while (op != null) {
         if (op instanceof AppMasterEventOperator && op.getConf() instanceof DynamicPruningEventDesc)
{
           // found dynamic partition pruning operator

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java?rev=1665379&r1=1665378&r2=1665379&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
(original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
Mon Mar  9 23:00:47 2015
@@ -82,7 +82,7 @@ public class OpTraitsRulesProcFactory {
   }
 
   /*
-   * Reduce sink operator is the de-facto operator
+   * Reduce sink operator is the de-facto operator 
    * for determining keyCols (emit keys of a map phase)
    */
   public static class ReduceSinkRule implements NodeProcessor {
@@ -106,25 +106,24 @@ public class OpTraitsRulesProcFactory {
       List<List<String>> listBucketCols = new ArrayList<List<String>>();
       listBucketCols.add(bucketCols);
       int numBuckets = -1;
-      int numReduceSinks = 1;
       OpTraits parentOpTraits = rs.getParentOperators().get(0).getConf().getOpTraits();
       if (parentOpTraits != null) {
         numBuckets = parentOpTraits.getNumBuckets();
-        numReduceSinks += parentOpTraits.getNumReduceSinks();
       }
-      OpTraits opTraits = new OpTraits(listBucketCols, numBuckets, listBucketCols, numReduceSinks);
+      OpTraits opTraits = new OpTraits(listBucketCols, numBuckets, listBucketCols);
       rs.setOpTraits(opTraits);
       return null;
     }
   }
 
   /*
-   * Table scan has the table object and pruned partitions that has information
-   * such as bucketing, sorting, etc. that is used later for optimization.
+   * Table scan has the table object and pruned partitions that has information such as
+   * bucketing, sorting, etc. that is used later for optimization.
    */
   public static class TableScanRule implements NodeProcessor {
 
-    public boolean checkBucketedTable(Table tbl, ParseContext pGraphContext,
+    public boolean checkBucketedTable(Table tbl, 
+        ParseContext pGraphContext,
         PrunedPartitionList prunedParts) throws SemanticException {
 
       if (tbl.isPartitioned()) {
@@ -132,11 +131,9 @@ public class OpTraitsRulesProcFactory {
         // construct a mapping of (Partition->bucket file names) and (Partition ->
bucket number)
         if (!partitions.isEmpty()) {
           for (Partition p : partitions) {
-            List<String> fileNames = 
-                AbstractBucketJoinProc.getBucketFilePathsOfPartition(p.getDataLocation(),

-                    pGraphContext);
-            // The number of files for the table should be same as number of
-            // buckets.
+            List<String> fileNames =
+                AbstractBucketJoinProc.getBucketFilePathsOfPartition(p.getDataLocation(),
pGraphContext);
+            // The number of files for the table should be same as number of buckets.
             int bucketCount = p.getBucketCount();
 
             if (fileNames.size() != 0 && fileNames.size() != bucketCount) {
@@ -146,9 +143,8 @@ public class OpTraitsRulesProcFactory {
         }
       } else {
 
-        List<String> fileNames = 
-            AbstractBucketJoinProc.getBucketFilePathsOfPartition(tbl.getDataLocation(), 
-                pGraphContext);
+        List<String> fileNames =
+            AbstractBucketJoinProc.getBucketFilePathsOfPartition(tbl.getDataLocation(), pGraphContext);
         Integer num = new Integer(tbl.getNumBuckets());
 
         // The number of files for the table should be same as number of buckets.
@@ -187,8 +183,7 @@ public class OpTraitsRulesProcFactory {
         }
         sortedColsList.add(sortCols);
       }
-      // num reduce sinks hardcoded to 0 because TS has no parents
-      OpTraits opTraits = new OpTraits(bucketColsList, numBuckets, sortedColsList, 0);
+      OpTraits opTraits = new OpTraits(bucketColsList, numBuckets, sortedColsList);
       ts.setOpTraits(opTraits);
       return null;
     }
@@ -213,13 +208,8 @@ public class OpTraitsRulesProcFactory {
       }
 
       List<List<String>> listBucketCols = new ArrayList<List<String>>();
-      int numReduceSinks = 0;
-      OpTraits parentOpTraits = gbyOp.getParentOperators().get(0).getOpTraits();
-      if (parentOpTraits != null) {
-        numReduceSinks = parentOpTraits.getNumReduceSinks();
-      }
       listBucketCols.add(gbyKeys);
-      OpTraits opTraits = new OpTraits(listBucketCols, -1, listBucketCols, numReduceSinks);
+      OpTraits opTraits = new OpTraits(listBucketCols, -1, listBucketCols);
       gbyOp.setOpTraits(opTraits);
       return null;
     }
@@ -227,8 +217,8 @@ public class OpTraitsRulesProcFactory {
 
   public static class SelectRule implements NodeProcessor {
 
-    public List<List<String>> getConvertedColNames(
-        List<List<String>> parentColNames, SelectOperator selOp) {
+    public List<List<String>> getConvertedColNames(List<List<String>>
parentColNames,
+        SelectOperator selOp) {
       List<List<String>> listBucketCols = new ArrayList<List<String>>();
       if (selOp.getColumnExprMap() != null) {
         if (parentColNames != null) {
@@ -254,8 +244,8 @@ public class OpTraitsRulesProcFactory {
     @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
-      SelectOperator selOp = (SelectOperator) nd;
-      List<List<String>> parentBucketColNames = 
+      SelectOperator selOp = (SelectOperator)nd;
+      List<List<String>> parentBucketColNames =
           selOp.getParentOperators().get(0).getOpTraits().getBucketColNames();
 
       List<List<String>> listBucketCols = null;
@@ -264,21 +254,18 @@ public class OpTraitsRulesProcFactory {
         if (parentBucketColNames != null) {
           listBucketCols = getConvertedColNames(parentBucketColNames, selOp);
         }
-        List<List<String>> parentSortColNames = 
-            selOp.getParentOperators().get(0).getOpTraits().getSortCols();
+        List<List<String>> parentSortColNames = selOp.getParentOperators().get(0).getOpTraits()
+            .getSortCols();
         if (parentSortColNames != null) {
           listSortCols = getConvertedColNames(parentSortColNames, selOp);
         }
       }
 
       int numBuckets = -1;
-      int numReduceSinks = 0;
-      OpTraits parentOpTraits = selOp.getParentOperators().get(0).getOpTraits();
-      if (parentOpTraits != null) {
-        numBuckets = parentOpTraits.getNumBuckets();
-        numReduceSinks = parentOpTraits.getNumReduceSinks();
+      if (selOp.getParentOperators().get(0).getOpTraits() != null) {
+        numBuckets = selOp.getParentOperators().get(0).getOpTraits().getNumBuckets();
       }
-      OpTraits opTraits = new OpTraits(listBucketCols, numBuckets, listSortCols, numReduceSinks);
+      OpTraits opTraits = new OpTraits(listBucketCols, numBuckets, listSortCols);
       selOp.setOpTraits(opTraits);
       return null;
     }
@@ -289,31 +276,26 @@ public class OpTraitsRulesProcFactory {
     @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
-      JoinOperator joinOp = (JoinOperator) nd;
+      JoinOperator joinOp = (JoinOperator)nd;
       List<List<String>> bucketColsList = new ArrayList<List<String>>();
       List<List<String>> sortColsList = new ArrayList<List<String>>();
       byte pos = 0;
-      int numReduceSinks = 0; // will be set to the larger of the parents
       for (Operator<? extends OperatorDesc> parentOp : joinOp.getParentOperators())
{
         if (!(parentOp instanceof ReduceSinkOperator)) {
           // can be mux operator
           break;
         }
-        ReduceSinkOperator rsOp = (ReduceSinkOperator) parentOp;
+        ReduceSinkOperator rsOp = (ReduceSinkOperator)parentOp;
         if (rsOp.getOpTraits() == null) {
           ReduceSinkRule rsRule = new ReduceSinkRule();
           rsRule.process(rsOp, stack, procCtx, nodeOutputs);
         }
-        OpTraits parentOpTraits = rsOp.getOpTraits();
-        bucketColsList.add(getOutputColNames(joinOp, parentOpTraits.getBucketColNames(),
pos));
-        sortColsList.add(getOutputColNames(joinOp, parentOpTraits.getSortCols(), pos));
-        if (parentOpTraits.getNumReduceSinks() > numReduceSinks) {
-          numReduceSinks = parentOpTraits.getNumReduceSinks();
-        }
+        bucketColsList.add(getOutputColNames(joinOp, rsOp.getOpTraits().getBucketColNames(),
pos));
+        sortColsList.add(getOutputColNames(joinOp, rsOp.getOpTraits().getSortCols(), pos));
         pos++;
       }
 
-      joinOp.setOpTraits(new OpTraits(bucketColsList, -1, bucketColsList, numReduceSinks));
+      joinOp.setOpTraits(new OpTraits(bucketColsList, -1, bucketColsList));
       return null;
     }
 
@@ -329,7 +311,7 @@ public class OpTraitsRulesProcFactory {
         for (String colName : colNames) {
           for (ExprNodeDesc exprNode : joinOp.getConf().getExprs().get(pos)) {
             if (exprNode instanceof ExprNodeColumnDesc) {
-              if (((ExprNodeColumnDesc) (exprNode)).getColumn().equals(colName)) {
+              if(((ExprNodeColumnDesc)(exprNode)).getColumn().equals(colName)) {
                 for (Entry<String, ExprNodeDesc> entry : joinOp.getColumnExprMap().entrySet())
{
                   if (entry.getValue().isSame(exprNode)) {
                     bucketColNames.add(entry.getKey());
@@ -356,30 +338,20 @@ public class OpTraitsRulesProcFactory {
   }
 
   /*
-   * When we have operators that have multiple parents, it is not clear which
-   * parent's traits we need to propagate forward.
+   *  When we have operators that have multiple parents, it is not
+   *  clear which parent's traits we need to propagate forward.
    */
   public static class MultiParentRule implements NodeProcessor {
 
     @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
+      OpTraits opTraits = new OpTraits(null, -1, null);
       @SuppressWarnings("unchecked")
-      Operator<? extends OperatorDesc> operator = (Operator<? extends OperatorDesc>)
nd;
-
-      int numReduceSinks = 0;
-      for (Operator<?> parentOp : operator.getParentOperators()) {
-        if (parentOp.getOpTraits() == null) {
-          continue;
-        }
-        if (parentOp.getOpTraits().getNumReduceSinks() > numReduceSinks) {
-          numReduceSinks = parentOp.getOpTraits().getNumReduceSinks();
-        }
-      }
-      OpTraits opTraits = new OpTraits(null, -1, null, numReduceSinks);
+      Operator<? extends OperatorDesc> operator = (Operator<? extends OperatorDesc>)nd;
       operator.setOpTraits(opTraits);
       return null;
-    }
+    } 
   }
 
   public static NodeProcessor getTableScanRule() {
@@ -389,7 +361,7 @@ public class OpTraitsRulesProcFactory {
   public static NodeProcessor getReduceSinkRule() {
     return new ReduceSinkRule();
   }
-
+  
   public static NodeProcessor getSelectRule() {
     return new SelectRule();
   }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java?rev=1665379&r1=1665378&r2=1665379&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
(original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
Mon Mar  9 23:00:47 2015
@@ -103,7 +103,7 @@ public class SparkMapJoinOptimizer imple
     }
 
     // we can set the traits for this join operator
-    OpTraits opTraits = new OpTraits(bucketColNames, numBuckets, null, joinOp.getOpTraits().getNumReduceSinks());
+    OpTraits opTraits = new OpTraits(bucketColNames, numBuckets, null);
     mapJoinOp.setOpTraits(opTraits);
     mapJoinOp.setStatistics(joinOp.getStatistics());
     setNumberOfBucketsOnChildren(mapJoinOp);

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java?rev=1665379&r1=1665378&r2=1665379&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java Mon Mar  9 23:00:47
2015
@@ -25,14 +25,11 @@ public class OpTraits {
   List<List<String>> bucketColNames;
   List<List<String>> sortColNames;
   int numBuckets;
-  int numReduceSinks;
 
-  public OpTraits(List<List<String>> bucketColNames, int numBuckets,
-      List<List<String>> sortColNames, int numReduceSinks) {
+  public OpTraits(List<List<String>> bucketColNames, int numBuckets, List<List<String>>
sortColNames) {
     this.bucketColNames = bucketColNames;
     this.numBuckets = numBuckets;
     this.sortColNames = sortColNames;
-    this.numReduceSinks = numReduceSinks;
   }
 
   public List<List<String>> getBucketColNames() {
@@ -58,12 +55,4 @@ public class OpTraits {
   public List<List<String>> getSortCols() {
     return sortColNames;
   }
-
-  public void setNumReduceSinks(int numReduceSinks) {
-    this.numReduceSinks = numReduceSinks;
-  }
-
-  public int getNumReduceSinks() {
-    return this.numReduceSinks;
-  }
 }



Mime
View raw message