hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gunt...@apache.org
Subject svn commit: r1667456 [9/16] - in /hive/branches/llap: ./ beeline/src/java/org/apache/hive/beeline/ common/src/java/org/apache/hadoop/hive/conf/ contrib/src/test/queries/clientnegative/ contrib/src/test/queries/clientpositive/ contrib/src/test/results/c...
Date Wed, 18 Mar 2015 05:40:11 GMT
Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java Wed Mar 18 05:40:07 2015
@@ -325,7 +325,7 @@ public class ReduceRecordSource implemen
       row.add(deserializeValue(valueWritable, tag));
 
       try {
-        reducer.processOp(row, tag);
+        reducer.process(row, tag);
       } catch (Exception e) {
         String rowString = null;
         try {
@@ -364,7 +364,7 @@ public class ReduceRecordSource implemen
         rowIdx++;
         if (rowIdx >= BATCH_SIZE) {
           VectorizedBatchUtil.setBatchSize(batch, rowIdx);
-          reducer.processOp(batch, tag);
+          reducer.process(batch, tag);
 
           // Reset just the value columns and value buffer.
           for (int i = keysColumnOffset; i < batch.numCols; i++) {
@@ -377,7 +377,7 @@ public class ReduceRecordSource implemen
       if (rowIdx > 0) {
         // Flush final partial batch.
         VectorizedBatchUtil.setBatchSize(batch, rowIdx);
-        reducer.processOp(batch, tag);
+        reducer.process(batch, tag);
       }
       batch.reset();
       keyBuffer.reset();

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java Wed Mar 18 05:40:07 2015
@@ -785,7 +785,7 @@ public class TezJobMonitor {
       final int running = progress.getRunningTaskCount();
       final int failed = progress.getFailedTaskAttemptCount();
       if (total <= 0) {
-        reportBuffer.append(String.format("%s: -/-\t", s, complete, total));
+        reportBuffer.append(String.format("%s: -/-\t", s));
       } else {
         if (complete == total && !completed.contains(s)) {
           completed.add(s);

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java Wed Mar 18 05:40:07 2015
@@ -19,9 +19,9 @@ package org.apache.hadoop.hive.ql.exec.t
 
 import java.io.IOException;
 import java.text.NumberFormat;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -33,7 +33,6 @@ import org.apache.tez.common.TezUtils;
 import org.apache.tez.mapreduce.processor.MRTaskReporter;
 import org.apache.tez.runtime.api.AbstractLogicalIOProcessor;
 import org.apache.tez.runtime.api.Event;
-import org.apache.tez.runtime.api.Input;
 import org.apache.tez.runtime.api.LogicalInput;
 import org.apache.tez.runtime.api.LogicalOutput;
 import org.apache.tez.runtime.api.ProcessorContext;
@@ -143,20 +142,6 @@ public class TezProcessor extends Abstra
       throws Exception {
     Throwable originalThrowable = null;
     try {
-      // Outputs will be started later by the individual Processors.
-      TezCacheAccess cacheAccess = TezCacheAccess.createInstance(jobConf);
-      // Start the actual Inputs. After MRInput initialization.
-      for (Map.Entry<String, LogicalInput> inputEntry : inputs.entrySet()) {
-        if (!cacheAccess.isInputCached(inputEntry.getKey())) {
-          LOG.info("Starting input " + inputEntry.getKey());
-          inputEntry.getValue().start();
-          processorContext.waitForAnyInputReady(Collections.singletonList((Input) (inputEntry
-              .getValue())));
-        } else {
-          LOG.info("Input: " + inputEntry.getKey()
-              + " is already cached. Skipping start and wait for ready");
-        }
-      }
 
       MRTaskReporter mrReporter = new MRTaskReporter(getContext());
       rproc.init(jobConf, getContext(), mrReporter, inputs, outputs);

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAppMasterEventOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAppMasterEventOperator.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAppMasterEventOperator.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAppMasterEventOperator.java Wed Mar 18 05:40:07 2015
@@ -18,7 +18,8 @@
 
 package org.apache.hadoop.hive.ql.exec.vector;
 
-import java.io.IOException;
+import java.util.Collection;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator;
@@ -27,15 +28,9 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.AppMasterEventDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.serde2.SerDeStats;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.io.ObjectWritable;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 
 /**
@@ -59,18 +54,19 @@ public class VectorAppMasterEventOperato
   }
 
   @Override
-  public void initializeOp(Configuration hconf) throws HiveException {
-    super.initializeOp(hconf);
+  public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     valueWriters = VectorExpressionWriterFactory.getExpressionWriters(
         (StructObjectInspector) inputObjInspectors[0]);
     singleRow = new Object[valueWriters.length];
+    return result;
   }
 
   @Override
-  public void processOp(Object data, int tag) throws HiveException {
-    
+  public void process(Object data, int tag) throws HiveException {
+
     VectorizedRowBatch vrg = (VectorizedRowBatch) data;
-    
+
     Writable [] records = null;
     Writable recordValue = null;
     boolean vectorizedSerde = false;
@@ -85,7 +81,7 @@ public class VectorAppMasterEventOperato
     } catch (SerDeException e1) {
       throw new HiveException(e1);
     }
-    
+
     for (int i = 0; i < vrg.size; i++) {
       Writable row = null;
       if (vectorizedSerde) {

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java Wed Mar 18 05:40:07 2015
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hive.ql.exec.vector;
 
+import java.util.Collection;
+import java.util.concurrent.Future;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
@@ -50,7 +53,7 @@ public class VectorFileSinkOperator exte
   }
 
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
     // We need a input object inspector that is for the row we will extract out of the
     // vectorized row batch, not for example, an original inspector for an ORC table, etc.
     VectorExpressionWriterFactory.processVectorInspector(
@@ -66,15 +69,15 @@ public class VectorFileSinkOperator exte
     singleRow = new Object[valueWriters.length];
 
     // Call FileSinkOperator with new input inspector.
-    super.initializeOp(hconf);
+    return super.initializeOp(hconf);
   }
 
   @Override
-  public void processOp(Object data, int tag) throws HiveException {
+  public void process(Object data, int tag) throws HiveException {
     VectorizedRowBatch vrg = (VectorizedRowBatch)data;
     for (int i = 0; i < vrg.size; i++) {
       Object[] row = getRowObject(vrg, i);
-      super.processOp(row, tag);
+      super.process(row, tag);
     }
   }
 

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java Wed Mar 18 05:40:07 2015
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hive.ql.exec.vector;
 
+import java.util.Collection;
+import java.util.concurrent.Future;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
@@ -27,7 +30,6 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.FilterDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 
 /**
  * Filter operator implementation.
@@ -39,7 +41,7 @@ public class VectorFilterOperator extend
   private VectorExpression conditionEvaluator = null;
 
   // Temporary selected vector
-  private int[] temporarySelected = new int [VectorizedRowBatch.DEFAULT_SIZE];
+  private final int[] temporarySelected = new int [VectorizedRowBatch.DEFAULT_SIZE];
 
   // filterMode is 1 if condition is always true, -1 if always false
   // and 0 if condition needs to be computed.
@@ -59,7 +61,8 @@ public class VectorFilterOperator extend
 
 
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     try {
       heartbeatInterval = HiveConf.getIntVar(hconf,
           HiveConf.ConfVars.HIVESENDHEARTBEAT);
@@ -74,7 +77,7 @@ public class VectorFilterOperator extend
         filterMode = -1;
       }
     }
-    initializeChildren(hconf);
+    return result;
   }
 
   public void setFilterCondition(VectorExpression expr) {
@@ -82,7 +85,7 @@ public class VectorFilterOperator extend
   }
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
 
     VectorizedRowBatch vrg = (VectorizedRowBatch) row;
 

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java Wed Mar 18 05:40:07 2015
@@ -22,18 +22,19 @@ import java.lang.management.ManagementFa
 import java.lang.management.MemoryMXBean;
 import java.lang.ref.SoftReference;
 import java.util.ArrayList;
-import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.GroupByOperator;
 import org.apache.hadoop.hive.ql.exec.KeyWrapper;
+import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hive.ql.plan.Ag
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.GroupByDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
@@ -54,7 +56,8 @@ import org.apache.hadoop.io.DataOutputBu
  * stores the aggregate operators' intermediate states. Emits row mode output.
  *
  */
-public class VectorGroupByOperator extends GroupByOperator implements VectorizationContextRegion {
+public class VectorGroupByOperator extends Operator<GroupByDesc> implements
+    VectorizationContextRegion {
 
   private static final Log LOG = LogFactory.getLog(
       VectorGroupByOperator.class.getName());
@@ -100,7 +103,15 @@ public class VectorGroupByOperator exten
   private transient VectorizedRowBatchCtx vrbCtx;
 
   private transient VectorColumnAssign[] vectorColumnAssign;
-  
+
+  private transient int numEntriesHashTable;
+
+  private transient long maxHashTblMemory;
+
+  private transient long maxMemory;
+
+  private float memoryThreshold;
+
   /**
    * Interface for processing mode: global, hash, unsorted streaming, or group batch
    */
@@ -118,9 +129,11 @@ public class VectorGroupByOperator exten
   private abstract class ProcessingModeBase implements IProcessingMode {
 
     // Overridden and used in sorted reduce group batch processing mode.
+    @Override
     public void startGroup() throws HiveException {
       // Do nothing.
     }
+    @Override
     public void endGroup() throws HiveException {
       // Do nothing.
     }
@@ -177,7 +190,7 @@ public class VectorGroupByOperator exten
   private class ProcessingModeGlobalAggregate extends ProcessingModeBase {
 
     /**
-     * In global processing mode there is only one set of aggregation buffers 
+     * In global processing mode there is only one set of aggregation buffers
      */
     private VectorAggregationBufferRow aggregationBuffers;
 
@@ -233,7 +246,7 @@ public class VectorGroupByOperator exten
     private long sumBatchSize;
 
     /**
-     * Max number of entries in the vector group by aggregation hashtables. 
+     * Max number of entries in the vector group by aggregation hashtables.
      * Exceeding this will trigger a flush irrelevant of memory pressure condition.
      */
     private int maxHtEntries = 1000000;
@@ -247,12 +260,12 @@ public class VectorGroupByOperator exten
      * Percent of entries to flush when memory threshold exceeded.
      */
     private float percentEntriesToFlush = 0.1f;
-  
+
     /**
      * A soft reference used to detect memory pressure
      */
     private SoftReference<Object> gcCanary = new SoftReference<Object>(new Object());
-    
+
     /**
      * Counts the number of time the gcCanary died and was resurrected
      */
@@ -289,7 +302,7 @@ public class VectorGroupByOperator exten
             HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION);
           this.numRowsCompareHashAggr = HiveConf.getIntVar(hconf,
             HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL);
-      } 
+      }
       else {
         this.percentEntriesToFlush =
             HiveConf.ConfVars.HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT.defaultFloatVal;
@@ -322,14 +335,14 @@ public class VectorGroupByOperator exten
       processAggregators(batch);
 
       //Flush if memory limits were reached
-      // We keep flushing until the memory is under threshold 
+      // We keep flushing until the memory is under threshold
       int preFlushEntriesCount = numEntriesHashTable;
       while (shouldFlush(batch)) {
         flush(false);
 
         if(gcCanary.get() == null) {
           gcCanaryFlushes++;
-          gcCanary = new SoftReference<Object>(new Object()); 
+          gcCanary = new SoftReference<Object>(new Object());
         }
 
         //Validate that some progress is being made
@@ -468,7 +481,7 @@ public class VectorGroupByOperator exten
         mapKeysAggregationBuffers.clear();
         numEntriesHashTable = 0;
       }
-      
+
       if (all && LOG.isDebugEnabled()) {
         LOG.debug(String.format("GC canary caused %d flushes", gcCanaryFlushes));
       }
@@ -495,7 +508,7 @@ public class VectorGroupByOperator exten
       if (gcCanary.get() == null) {
         return true;
       }
-      
+
       return false;
     }
 
@@ -515,14 +528,14 @@ public class VectorGroupByOperator exten
     }
 
     /**
-     * Checks if the HT reduces the number of entries by at least minReductionHashAggr factor 
+     * Checks if the HT reduces the number of entries by at least minReductionHashAggr factor
      * @throws HiveException
      */
     private void checkHashModeEfficiency() throws HiveException {
       if (lastModeCheckRowCount > numRowsCompareHashAggr) {
         lastModeCheckRowCount = 0;
         if (LOG.isDebugEnabled()) {
-          LOG.debug(String.format("checkHashModeEfficiency: HT:%d RC:%d MIN:%d", 
+          LOG.debug(String.format("checkHashModeEfficiency: HT:%d RC:%d MIN:%d",
               numEntriesHashTable, sumBatchSize, (long)(sumBatchSize * minReductionHashAggr)));
         }
         if (numEntriesHashTable > sumBatchSize * minReductionHashAggr) {
@@ -541,7 +554,7 @@ public class VectorGroupByOperator exten
    */
   private class ProcessingModeUnsortedStreaming extends ProcessingModeBase {
 
-    /** 
+    /**
      * The aggregation buffers used in streaming mode
      */
     private VectorAggregationBufferRow currentStreamingAggregators;
@@ -554,19 +567,19 @@ public class VectorGroupByOperator exten
     /**
      * The keys that needs to be flushed at the end of the current batch
      */
-    private final VectorHashKeyWrapper[] keysToFlush = 
+    private final VectorHashKeyWrapper[] keysToFlush =
         new VectorHashKeyWrapper[VectorizedRowBatch.DEFAULT_SIZE];
 
     /**
      * The aggregates that needs to be flushed at the end of the current batch
      */
-    private final VectorAggregationBufferRow[] rowsToFlush = 
+    private final VectorAggregationBufferRow[] rowsToFlush =
         new VectorAggregationBufferRow[VectorizedRowBatch.DEFAULT_SIZE];
 
     /**
      * A pool of VectorAggregationBufferRow to avoid repeated allocations
      */
-    private VectorUtilBatchObjectPool<VectorAggregationBufferRow> 
+    private VectorUtilBatchObjectPool<VectorAggregationBufferRow>
       streamAggregationBufferRowPool;
 
     @Override
@@ -658,7 +671,7 @@ public class VectorGroupByOperator exten
    *      vectorized reduce-shuffle feeds the batches to us.
    *
    *   2) Later at endGroup after reduce-shuffle has fed us all the input batches for the group,
-   *      we fill in the aggregation columns in outputBatch at outputBatch.size.  Our method 
+   *      we fill in the aggregation columns in outputBatch at outputBatch.size.  Our method
    *      writeGroupRow does this and finally increments outputBatch.size.
    *
    */
@@ -672,7 +685,7 @@ public class VectorGroupByOperator exten
      */
     VectorGroupKeyHelper groupKeyHelper;
 
-    /** 
+    /**
      * The group vector aggregation buffers.
      */
     private VectorAggregationBufferRow groupAggregators;
@@ -750,7 +763,7 @@ public class VectorGroupByOperator exten
       AggregationDesc aggDesc = aggrDesc.get(i);
       aggregators[i] = vContext.getAggregatorExpression(aggDesc, desc.getVectorDesc().isReduce());
     }
-    
+
     isVectorOutput = desc.getVectorDesc().isVectorOutput();
 
     vOutContext = new VectorizationContext(desc.getOutputColumnNames());
@@ -762,7 +775,8 @@ public class VectorGroupByOperator exten
   }
 
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
 
     List<ObjectInspector> objectInspectors = new ArrayList<ObjectInspector>();
 
@@ -773,9 +787,9 @@ public class VectorGroupByOperator exten
 
       // grouping id should be pruned, which is the last of key columns
       // see ColumnPrunerGroupByProc
-      outputKeyLength = 
+      outputKeyLength =
           conf.pruneGroupingSetId() ? keyExpressions.length - 1 : keyExpressions.length;
-      
+
       keyOutputWriters = new VectorExpressionWriter[outputKeyLength];
 
       for(int i = 0; i < outputKeyLength; ++i) {
@@ -812,8 +826,6 @@ public class VectorGroupByOperator exten
       throw new HiveException(e);
     }
 
-    initializeChildren(hconf);
-
     forwardCache = new Object[outputKeyLength + aggregators.length];
 
     if (outputKeyLength == 0) {
@@ -826,13 +838,14 @@ public class VectorGroupByOperator exten
       processingMode = this.new ProcessingModeHashAggregate();
     }
     processingMode.initialize(hconf);
+    return result;
   }
 
   /**
    * changes the processing mode to unsorted streaming
-   * This is done at the request of the hash agg mode, if the number of keys 
+   * This is done at the request of the hash agg mode, if the number of keys
    * exceeds the minReductionHashAggr factor
-   * @throws HiveException 
+   * @throws HiveException
    */
   private void changeToUnsortedStreamingMode() throws HiveException {
     processingMode = this.new ProcessingModeUnsortedStreaming();
@@ -859,7 +872,7 @@ public class VectorGroupByOperator exten
   }
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     VectorizedRowBatch batch = (VectorizedRowBatch) row;
 
     if (batch.size > 0) {
@@ -962,4 +975,9 @@ public class VectorGroupByOperator exten
   public VectorizationContext getOuputVectorizationContext() {
     return vOutContext;
   }
+
+  @Override
+  public OperatorType getType() {
+    return OperatorType.GROUPBY;
+  }
 }

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorLimitOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorLimitOperator.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorLimitOperator.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorLimitOperator.java Wed Mar 18 05:40:07 2015
@@ -39,7 +39,7 @@ public class VectorLimitOperator extends
   }
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     VectorizedRowBatch batch = (VectorizedRowBatch) row;
 
     if (currCount < limit) {

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java Wed Mar 18 05:40:07 2015
@@ -19,9 +19,11 @@
 package org.apache.hadoop.hive.ql.exec.vector;
 
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -56,7 +58,7 @@ public class VectorMapJoinOperator exten
 
   private VectorExpression[] bigTableFilterExpressions;
   private VectorExpression[] bigTableValueExpressions;
-  
+
   private VectorizationContext vOutContext;
 
   // The above members are initialized by the constructor and must not be
@@ -76,7 +78,7 @@ public class VectorMapJoinOperator exten
   private transient VectorExpressionWriter[] keyOutputWriters;
 
   private transient VectorizedRowBatchCtx vrbCtx = null;
-  
+
   public VectorMapJoinOperator() {
     super();
   }
@@ -112,9 +114,9 @@ public class VectorMapJoinOperator exten
   }
 
   @Override
-  public void initializeOp(Configuration hconf) throws HiveException {
-    super.initializeOp(hconf);
-    
+  public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
+
     List<ExprNodeDesc> keyDesc = conf.getKeys().get(posBigTable);
     keyOutputWriters = VectorExpressionWriterFactory.getExpressionWriters(keyDesc);
 
@@ -178,6 +180,7 @@ public class VectorMapJoinOperator exten
     filterMaps[posBigTable] = null;
 
     outputVectorAssigners = new HashMap<ObjectInspector, VectorColumnAssign[]>();
+    return result;
   }
 
   /**
@@ -220,7 +223,7 @@ public class VectorMapJoinOperator exten
   }
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     byte alias = (byte) tag;
     VectorizedRowBatch inBatch = (VectorizedRowBatch) row;
 
@@ -246,7 +249,7 @@ public class VectorMapJoinOperator exten
     // of row-mode small-tables) this is a reasonable trade-off.
     //
     for(batchIndex=0; batchIndex < inBatch.size; ++batchIndex) {
-      super.processOp(row, tag);
+      super.process(row, tag);
     }
 
     // Set these two to invalid values so any attempt to use them

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java Wed Mar 18 05:40:07 2015
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hive.ql.exec.vector;
 
+import java.util.Collection;
+import java.util.concurrent.Future;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
@@ -34,7 +37,7 @@ public class VectorReduceSinkOperator ex
 
   // Writer for producing row from input batch.
   private VectorExpressionWriter[] rowWriters;
-  
+
   protected transient Object[] singleRow;
 
   public VectorReduceSinkOperator(VectorizationContext vContext, OperatorDesc conf)
@@ -49,7 +52,7 @@ public class VectorReduceSinkOperator ex
   }
 
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
     // We need a input object inspector that is for the row we will extract out of the
     // vectorized row batch, not for example, an original inspector for an ORC table, etc.
     VectorExpressionWriterFactory.processVectorInspector(
@@ -64,17 +67,16 @@ public class VectorReduceSinkOperator ex
             });
     singleRow = new Object[rowWriters.length];
 
-    // Call ReduceSinkOperator with new input inspector.
-    super.initializeOp(hconf);
+    return super.initializeOp(hconf);
   }
 
   @Override
-  public void processOp(Object data, int tag) throws HiveException {
+  public void process(Object data, int tag) throws HiveException {
     VectorizedRowBatch vrg = (VectorizedRowBatch) data;
 
     for (int batchIndex = 0 ; batchIndex < vrg.size; ++batchIndex) {
       Object row = getRowObject(vrg, batchIndex);
-      super.processOp(row, tag);
+      super.process(row, tag);
     }
   }
 

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java Wed Mar 18 05:40:07 2015
@@ -19,9 +19,11 @@
 package org.apache.hadoop.hive.ql.exec.vector;
 
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -47,8 +49,8 @@ import org.apache.hadoop.hive.serde2.obj
 public class VectorSMBMapJoinOperator extends SMBMapJoinOperator implements VectorizationContextRegion {
 
   private static final Log LOG = LogFactory.getLog(
-      VectorSMBMapJoinOperator.class.getName());  
-  
+      VectorSMBMapJoinOperator.class.getName());
+
   private static final long serialVersionUID = 1L;
 
   private VectorExpression[] bigTableValueExpressions;
@@ -65,7 +67,7 @@ public class VectorSMBMapJoinOperator ex
   // transient.
   //---------------------------------------------------------------------------
 
-  private transient VectorizedRowBatch outputBatch;  
+  private transient VectorizedRowBatch outputBatch;
 
   private transient VectorizedRowBatchCtx vrbCtx = null;
 
@@ -78,23 +80,23 @@ public class VectorSMBMapJoinOperator ex
   private transient VectorHashKeyWrapper[] keyValues;
 
   private transient SMBJoinKeyEvaluator keyEvaluator;
-  
+
   private transient VectorExpressionWriter[] valueWriters;
-  
+
   private interface SMBJoinKeyEvaluator {
     List<Object> evaluate(VectorHashKeyWrapper kw) throws HiveException;
-}  
+}
 
   public VectorSMBMapJoinOperator() {
     super();
   }
-  
+
   public VectorSMBMapJoinOperator(VectorizationContext vContext, OperatorDesc conf)
       throws HiveException {
     this();
     SMBJoinDesc desc = (SMBJoinDesc) conf;
     this.conf = desc;
-    
+
     order = desc.getTagOrder();
     numAliases = desc.getExprs().size();
     posBigTable = (byte) desc.getPosBigTable();
@@ -118,7 +120,7 @@ public class VectorSMBMapJoinOperator ex
     vOutContext = new VectorizationContext(desc.getOutputColumnNames());
     vOutContext.setFileKey(vContext.getFileKey() + "/SMB_JOIN_" + desc.getBigTableAlias());
   }
-  
+
   @Override
   protected List<Object> smbJoinComputeKeys(Object row, byte alias) throws HiveException {
     if (alias == this.posBigTable) {
@@ -127,21 +129,21 @@ public class VectorSMBMapJoinOperator ex
     } else {
       return super.smbJoinComputeKeys(row, alias);
     }
-  }  
-  
+  }
+
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
-    super.initializeOp(hconf);
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
 
     vrbCtx = new VectorizedRowBatchCtx();
     vrbCtx.init(vOutContext.getScratchColumnTypeMap(), (StructObjectInspector) this.outputObjInspector);
-    
+
     outputBatch = vrbCtx.createVectorizedRowBatch();
-    
+
     keyWrapperBatch = VectorHashKeyWrapperBatch.compileKeyWrapperBatch(keyExpressions);
-    
+
     outputVectorAssigners = new HashMap<ObjectInspector, VectorColumnAssign[]>();
-    
+
     // This key evaluator translates from the vectorized VectorHashKeyWrapper format
     // into the row-mode MapJoinKey
     keyEvaluator = new SMBJoinKeyEvaluator() {
@@ -163,14 +165,14 @@ public class VectorSMBMapJoinOperator ex
         return key;
       };
     }.init();
-    
+
     Map<Byte, List<ExprNodeDesc>> valueExpressions = conf.getExprs();
-    List<ExprNodeDesc> bigTableExpressions = valueExpressions.get(posBigTable);    
-    
+    List<ExprNodeDesc> bigTableExpressions = valueExpressions.get(posBigTable);
+
     // We're hijacking the big table evaluators and replacing them with our own custom ones
     // which are going to return values from the input batch vector expressions
     List<ExprNodeEvaluator> vectorNodeEvaluators = new ArrayList<ExprNodeEvaluator>(bigTableExpressions.size());
-    
+
     VectorExpressionWriterFactory.processVectorExpressions(
         bigTableExpressions,
         new VectorExpressionWriterFactory.ListOIDClosure() {
@@ -180,7 +182,7 @@ public class VectorSMBMapJoinOperator ex
             valueWriters = writers;
             joinValuesObjectInspectors[posBigTable] = oids;
           }
-        });    
+        });
 
     for(int i=0; i<bigTableExpressions.size(); ++i) {
       ExprNodeDesc desc = bigTableExpressions.get(i);
@@ -213,51 +215,51 @@ public class VectorSMBMapJoinOperator ex
     }
     // Now replace the old evaluators with our own
     joinValues[posBigTable] = vectorNodeEvaluators;
-    
+    return result;
   }
-  
+
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     byte alias = (byte) tag;
-    
+
     if (alias != this.posBigTable) {
-      super.processOp(row, tag);
+      super.process(row, tag);
     } else {
-  
+
       VectorizedRowBatch inBatch = (VectorizedRowBatch) row;
-  
+
       if (null != bigTableFilterExpressions) {
         for(VectorExpression ve : bigTableFilterExpressions) {
           ve.evaluate(inBatch);
         }
       }
-  
+
       if (null != bigTableValueExpressions) {
         for(VectorExpression ve : bigTableValueExpressions) {
           ve.evaluate(inBatch);
         }
       }
-  
+
       keyWrapperBatch.evaluateBatch(inBatch);
       keyValues = keyWrapperBatch.getVectorHashKeyWrappers();
-  
+
       // This implementation of vectorized JOIN is delegating all the work
       // to the row-mode implementation by hijacking the big table node evaluators
       // and calling the row-mode join processOp for each row in the input batch.
-      // Since the JOIN operator is not fully vectorized anyway at the moment 
+      // Since the JOIN operator is not fully vectorized anyway at the moment
       // (due to the use of row-mode small-tables) this is a reasonable trade-off.
       //
       for(batchIndex=0; batchIndex < inBatch.size; ++batchIndex ) {
-        super.processOp(row, tag);
+        super.process(row, tag);
       }
-  
+
       // Set these two to invalid values so any attempt to use them
       // outside the inner loop results in NPE/OutOfBounds errors
       batchIndex = -1;
       keyValues = null;
     }
   }
-  
+
   @Override
   public void closeOp(boolean aborted) throws HiveException {
     super.closeOp(aborted);
@@ -265,7 +267,7 @@ public class VectorSMBMapJoinOperator ex
       flushOutput();
     }
   }
-  
+
   @Override
   protected void internalForward(Object row, ObjectInspector outputOI) throws HiveException {
     Object[] values = (Object[]) row;
@@ -283,11 +285,11 @@ public class VectorSMBMapJoinOperator ex
       flushOutput();
     }
   }
-  
+
   private void flushOutput() throws HiveException {
     forward(outputBatch, null);
     outputBatch.reset();
-  }  
+  }
 
   @Override
   public VectorizationContext getOuputVectorizationContext() {

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSelectOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSelectOperator.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSelectOperator.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSelectOperator.java Wed Mar 18 05:40:07 2015
@@ -19,11 +19,12 @@
 package org.apache.hadoop.hive.ql.exec.vector;
 
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
-import java.util.Map;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.ql.exec.SelectOperator;
+import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory;
@@ -31,13 +32,15 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.SelectDesc;
+import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
 
 /**
  * Select operator implementation.
  */
-public class VectorSelectOperator extends SelectOperator implements VectorizationContextRegion {
+public class VectorSelectOperator extends Operator<SelectDesc> implements
+    VectorizationContextRegion {
 
   private static final long serialVersionUID = 1L;
 
@@ -62,7 +65,7 @@ public class VectorSelectOperator extend
     }
 
     /**
-     * Create a new vectorization context to create a new projection, but keep 
+     * Create a new vectorization context to create a new projection, but keep
      * same output column manager must be inherited to track the scratch the columns.
      */
     vOutContext = new VectorizationContext(vContext);
@@ -74,7 +77,7 @@ public class VectorSelectOperator extend
     for (int i=0; i < colList.size(); ++i) {
       String columnName = this.conf.getOutputColumnNames().get(i);
       VectorExpression ve = vExpressions[i];
-      vOutContext.addProjectionColumn(columnName, 
+      vOutContext.addProjectionColumn(columnName,
               ve.getOutputColumn());
     }
   }
@@ -83,11 +86,11 @@ public class VectorSelectOperator extend
   }
 
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     // Just forward the row as is
     if (conf.isSelStarNoCompute()) {
-      initializeChildren(hconf);
-      return;
+      return null;
     }
 
     List<ObjectInspector> objectInspectors = new ArrayList<ObjectInspector>();
@@ -102,15 +105,15 @@ public class VectorSelectOperator extend
     outputObjInspector = ObjectInspectorFactory.getStandardStructObjectInspector(
         outputFieldNames, objectInspectors);
 
-    initializeChildren(hconf);
     projectedColumns = new int [vExpressions.length];
     for (int i = 0; i < projectedColumns.length; i++) {
       projectedColumns[i] = vExpressions[i].getOutputColumn();
     }
+    return result;
   }
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
 
     // Just forward the row as is
     if (conf.isSelStarNoCompute()) {
@@ -167,4 +170,9 @@ public class VectorSelectOperator extend
   public VectorizationContext getOuputVectorizationContext() {
     return vOutContext;
   }
+
+  @Override
+  public OperatorType getType() {
+    return OperatorType.SELECT;
+  }
 }

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileMapper.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileMapper.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileMapper.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileMapper.java Wed Mar 18 05:40:07 2015
@@ -97,7 +97,7 @@ public class MergeFileMapper extends Map
     row[0] = key;
     row[1] = value;
     try {
-      mergeOp.processOp(row, 0);
+      mergeOp.process(row, 0);
     } catch (HiveException e) {
       abort = true;
       throw new IOException(e);

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java Wed Mar 18 05:40:07 2015
@@ -98,7 +98,8 @@ public class HiveSchemaConverter {
         return Types.optional(PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY).length(bytes).as(OriginalType.DECIMAL).
             scale(scale).precision(prec).named(name);
       } else if (typeInfo.equals(TypeInfoFactory.dateTypeInfo)) {
-        return new PrimitiveType(repetition, PrimitiveTypeName.INT32, name, OriginalType.DATE);
+        return Types.primitive(PrimitiveTypeName.INT32, repetition).as(OriginalType.DATE).named
+            (name);
       } else if (typeInfo.equals(TypeInfoFactory.unknownTypeInfo)) {
         throw new UnsupportedOperationException("Unknown type not implemented");
       } else {

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/lib/ForwardWalker.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/lib/ForwardWalker.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/lib/ForwardWalker.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/lib/ForwardWalker.java Wed Mar 18 05:40:07 2015
@@ -54,9 +54,6 @@ public class ForwardWalker extends Defau
   @SuppressWarnings("unchecked")
   protected void addAllParents(Node nd) {
     Operator<? extends OperatorDesc> op = (Operator<? extends OperatorDesc>) nd;
-    if (op.getParentOperators() == null) {
-      return;
-    }
     getToWalk().removeAll(op.getParentOperators());
     getToWalk().addAll(0, op.getParentOperators());
   }
@@ -68,6 +65,7 @@ public class ForwardWalker extends Defau
 * current operator in the graph
 * @throws SemanticException
 */
+  @Override
   public void walk(Node nd) throws SemanticException {
     if (opStack.empty() || nd != opStack.peek()) {
       opStack.push(nd);

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java Wed Mar 18 05:40:07 2015
@@ -93,7 +93,7 @@ public class ConvertJoinMapJoin implemen
         return retval;
       } else {
         int pos = 0; // it doesn't matter which position we use in this case.
-        convertJoinSMBJoin(joinOp, context, pos, 0, false, false);
+        convertJoinSMBJoin(joinOp, context, pos, 0, false);
         return null;
       }
     }
@@ -135,7 +135,7 @@ public class ConvertJoinMapJoin implemen
       } else {
         // only case is full outer join with SMB enabled which is not possible. Convert to regular
         // join.
-        convertJoinSMBJoin(joinOp, context, 0, 0, false, false);
+        convertJoinSMBJoin(joinOp, context, 0, 0, false);
         return null;
       }
     }
@@ -155,7 +155,7 @@ public class ConvertJoinMapJoin implemen
       // we are just converting to a common merge join operator. The shuffle
       // join in map-reduce case.
       int pos = 0; // it doesn't matter which position we use in this case.
-      convertJoinSMBJoin(joinOp, context, pos, 0, false, false);
+      convertJoinSMBJoin(joinOp, context, pos, 0, false);
       return null;
     }
 
@@ -180,7 +180,7 @@ public class ConvertJoinMapJoin implemen
     // map join either based on the size. Check if we can convert to SMB join.
     if ((context.conf.getBoolVar(HiveConf.ConfVars.HIVE_AUTO_SORTMERGE_JOIN) == false)
         || (joinOp.getOpTraits().getNumReduceSinks() >= 2)) {
-      convertJoinSMBJoin(joinOp, context, 0, 0, false, false);
+      convertJoinSMBJoin(joinOp, context, 0, 0, false);
       return null;
     }
     Class<? extends BigTableSelectorForAutoSMJ> bigTableMatcherClass = null;
@@ -188,7 +188,7 @@ public class ConvertJoinMapJoin implemen
       String selector = HiveConf.getVar(context.parseContext.getConf(),
           HiveConf.ConfVars.HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR);
       bigTableMatcherClass =
-          (Class<? extends BigTableSelectorForAutoSMJ>) JavaUtils.loadClass(selector);
+          JavaUtils.loadClass(selector);
     } catch (ClassNotFoundException e) {
       throw new SemanticException(e.getMessage());
     }
@@ -210,18 +210,18 @@ public class ConvertJoinMapJoin implemen
       // we are just converting to a common merge join operator. The shuffle
       // join in map-reduce case.
       int pos = 0; // it doesn't matter which position we use in this case.
-      convertJoinSMBJoin(joinOp, context, pos, 0, false, false);
+      convertJoinSMBJoin(joinOp, context, pos, 0, false);
       return null;
     }
 
     if (checkConvertJoinSMBJoin(joinOp, context, mapJoinConversionPos, tezBucketJoinProcCtx)) {
       convertJoinSMBJoin(joinOp, context, mapJoinConversionPos,
-          tezBucketJoinProcCtx.getNumBuckets(), tezBucketJoinProcCtx.isSubQuery(), true);
+          tezBucketJoinProcCtx.getNumBuckets(), true);
     } else {
       // we are just converting to a common merge join operator. The shuffle
       // join in map-reduce case.
       int pos = 0; // it doesn't matter which position we use in this case.
-      convertJoinSMBJoin(joinOp, context, pos, 0, false, false);
+      convertJoinSMBJoin(joinOp, context, pos, 0, false);
     }
     return null;
   }
@@ -229,7 +229,7 @@ public class ConvertJoinMapJoin implemen
   // replaces the join operator with a new CommonJoinOperator, removes the
   // parent reduce sinks
   private void convertJoinSMBJoin(JoinOperator joinOp, OptimizeTezProcContext context,
-      int mapJoinConversionPos, int numBuckets, boolean isSubQuery, boolean adjustParentsChildren)
+      int mapJoinConversionPos, int numBuckets, boolean adjustParentsChildren)
       throws SemanticException {
     MapJoinDesc mapJoinDesc = null;
     if (adjustParentsChildren) {
@@ -253,7 +253,7 @@ public class ConvertJoinMapJoin implemen
 
     CommonMergeJoinOperator mergeJoinOp =
         (CommonMergeJoinOperator) OperatorFactory.get(new CommonMergeJoinDesc(numBuckets,
-            isSubQuery, mapJoinConversionPos, mapJoinDesc), joinOp.getSchema());
+            mapJoinConversionPos, mapJoinDesc), joinOp.getSchema());
     int numReduceSinks = joinOp.getOpTraits().getNumReduceSinks();
     OpTraits opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), numBuckets, joinOp
         .getOpTraits().getSortCols(), numReduceSinks);
@@ -273,16 +273,7 @@ public class ConvertJoinMapJoin implemen
     }
 
     List<Operator<? extends OperatorDesc>> childOperators = mergeJoinOp.getChildOperators();
-    if (childOperators == null) {
-      childOperators = new ArrayList<Operator<? extends OperatorDesc>>();
-      mergeJoinOp.setChildOperators(childOperators);
-    }
-
     List<Operator<? extends OperatorDesc>> parentOperators = mergeJoinOp.getParentOperators();
-    if (parentOperators == null) {
-      parentOperators = new ArrayList<Operator<? extends OperatorDesc>>();
-      mergeJoinOp.setParentOperators(parentOperators);
-    }
 
     childOperators.clear();
     parentOperators.clear();
@@ -363,8 +354,6 @@ public class ConvertJoinMapJoin implemen
     Map<String, Integer> bigTableBucketNumMapping = new HashMap<String, Integer>();
     bigTableBucketNumMapping.put(joinDesc.getBigTableAlias(), tezBucketJoinProcCtx.getNumBuckets());
     joinDesc.setBigTableBucketNumMapping(bigTableBucketNumMapping);
-    LOG.info("Setting legacy map join to " + (!tezBucketJoinProcCtx.isSubQuery()));
-    joinDesc.setCustomBucketMapJoin(!tezBucketJoinProcCtx.isSubQuery());
 
     return true;
   }
@@ -405,13 +394,10 @@ public class ConvertJoinMapJoin implemen
       }
     }
 
-    boolean isSubQuery = false;
     if (numBuckets < 0) {
-      isSubQuery = true;
       numBuckets = bigTableRS.getConf().getNumReducers();
     }
     tezBucketJoinProcCtx.setNumBuckets(numBuckets);
-    tezBucketJoinProcCtx.setIsSubQuery(isSubQuery);
     LOG.info("We can convert the join to an SMB join.");
     return true;
   }
@@ -457,13 +443,10 @@ public class ConvertJoinMapJoin implemen
      * this is the case when the big table is a sub-query and is probably already bucketed by the
      * join column in say a group by operation
      */
-    boolean isSubQuery = false;
     if (numBuckets < 0) {
-      isSubQuery = true;
       numBuckets = rs.getConf().getNumReducers();
     }
     tezBucketJoinProcCtx.setNumBuckets(numBuckets);
-    tezBucketJoinProcCtx.setIsSubQuery(isSubQuery);
     return true;
   }
 

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java Wed Mar 18 05:40:07 2015
@@ -251,10 +251,8 @@ public class MapJoinProcessor implements
     if (!op.opAllowedAfterMapJoin()) {
       throw new SemanticException(ErrorMsg.OPERATOR_NOT_ALLOWED_WITH_MAPJOIN.getMsg());
     }
-    if (op.getChildOperators() != null) {
-      for (Operator<? extends OperatorDesc> childOp : op.getChildOperators()) {
-        checkChildOperatorType(childOp);
-      }
+    for (Operator<? extends OperatorDesc> childOp : op.getChildOperators()) {
+      checkChildOperatorType(childOp);
     }
   }
 

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java Wed Mar 18 05:40:07 2015
@@ -31,8 +31,10 @@ import org.apache.hadoop.hive.ql.exec.Ha
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.OperatorFactory;
+import org.apache.hadoop.hive.ql.exec.OperatorUtils;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
 import org.apache.hadoop.hive.ql.exec.RowSchema;
+import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.NodeProcessor;
@@ -64,7 +66,7 @@ public class ReduceSinkMapJoinProc imple
 
   /* (non-Javadoc)
    * This processor addresses the RS-MJ case that occurs in tez on the small/hash
-   * table side of things. The work that RS will be a part of must be connected 
+   * table side of things. The work that RS will be a part of must be connected
    * to the MJ work via be a broadcast edge.
    * We should not walk down the tree when we encounter this pattern because:
    * the type of work (map work or reduce work) needs to be determined
@@ -91,7 +93,7 @@ public class ReduceSinkMapJoinProc imple
     parentRS.setSkipTag(true);
     // remember the original parent list before we start modifying it.
     if (!context.mapJoinParentMap.containsKey(mapJoinOp)) {
-      List<Operator<?>> parents = new ArrayList(mapJoinOp.getParentOperators());
+      List<Operator<?>> parents = new ArrayList<Operator<?>>(mapJoinOp.getParentOperators());
       context.mapJoinParentMap.put(mapJoinOp, parents);
     }
 
@@ -173,9 +175,12 @@ public class ReduceSinkMapJoinProc imple
       parentRS.getConf().setReducerTraits(EnumSet.of(FIXED));
 
       numBuckets = (Integer) joinConf.getBigTableBucketNumMapping().values().toArray()[0];
-      if (joinConf.getCustomBucketMapJoin()) {
+      Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstream(mapJoinOp.getParentOperators()
+          .get(joinConf.getPosBigTable()), TableScanOperator.class);
+
+      if (rootOp instanceof TableScanOperator) { // we will run in mapper
         edgeType = EdgeType.CUSTOM_EDGE;
-      } else {
+      } else { // we will run in reducer
         edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
       }
     }
@@ -218,8 +223,8 @@ public class ReduceSinkMapJoinProc imple
     }
     linkWorkMap.put(parentWork, edgeProp);
     context.linkOpWithWorkMap.put(mapJoinOp, linkWorkMap);
-    
-    List<ReduceSinkOperator> reduceSinks 
+
+    List<ReduceSinkOperator> reduceSinks
       = context.linkWorkWithReduceSinkMap.get(parentWork);
     if (reduceSinks == null) {
       reduceSinks = new ArrayList<ReduceSinkOperator>();

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TezBucketJoinProcCtx.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TezBucketJoinProcCtx.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TezBucketJoinProcCtx.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TezBucketJoinProcCtx.java Wed Mar 18 05:40:07 2015
@@ -18,28 +18,15 @@
 
 package org.apache.hadoop.hive.ql.optimizer;
 
-import java.util.List;
-import java.util.Map;
-
 import org.apache.hadoop.hive.conf.HiveConf;
 
 public class TezBucketJoinProcCtx extends BucketJoinProcCtx {
-  // determines if we need to use custom edge or one-to-one edge
-  boolean isSubQuery = false;
   int numBuckets = -1;
 
   public TezBucketJoinProcCtx(HiveConf conf) {
     super(conf);
   }
 
-  public void setIsSubQuery (boolean isSubQuery) {
-    this.isSubQuery = isSubQuery;
-  }
-
-  public boolean isSubQuery () {
-    return isSubQuery;
-  }
-
   public void setNumBuckets(int numBuckets) {
     this.numBuckets = numBuckets;
   }

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java Wed Mar 18 05:40:07 2015
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.optimizer.listbucketingpruner;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -190,7 +191,7 @@ public class ListBucketingPruner impleme
       finalPaths = execute(ctx, part, pruner);
     } catch (SemanticException e) {
       // Use full partition path for error case.
-      LOG.warn("Using full partition scan :" + part.getPath() + ".", e);
+      LOG.warn("Using full partition scan :" + Arrays.toString(part.getPath()) + ".", e);
       finalPaths = part.getPath();
     }
 
@@ -215,7 +216,7 @@ public class ListBucketingPruner impleme
     if (ListBucketingPrunerUtils.isUnknownState(pruner)) {
       // Use full partition path for error case.
       LOG.warn("List bucketing pruner is either null or in unknown state "
-          + " so that it uses full partition scan :" + part.getPath());
+          + " so that it uses full partition scan :" + Arrays.toString(part.getPath()));
       finalPaths = part.getPath();
     } else {
       // Retrieve skewed columns.
@@ -357,7 +358,7 @@ public class ListBucketingPruner impleme
   private static Path[] generateFinalPath(Partition part, List<Path> selectedPaths) {
     Path[] finalPaths;
     if (selectedPaths.size() == 0) {
-      LOG.warn("Using full partition scan :" + part.getPath() + ".");
+      LOG.warn("Using full partition scan :" + Arrays.toString(part.getPath()) + ".");
       finalPaths = part.getPath();
     } else {
       finalPaths = selectedPaths.toArray(new Path[0]);

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java Wed Mar 18 05:40:07 2015
@@ -107,7 +107,7 @@ public class OpTraitsRulesProcFactory {
       listBucketCols.add(bucketCols);
       int numBuckets = -1;
       int numReduceSinks = 1;
-      OpTraits parentOpTraits = rs.getParentOperators().get(0).getConf().getOpTraits();
+      OpTraits parentOpTraits = rs.getParentOperators().get(0).getConf().getTraits();
       if (parentOpTraits != null) {
         numBuckets = parentOpTraits.getNumBuckets();
         numReduceSinks += parentOpTraits.getNumReduceSinks();

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java Wed Mar 18 05:40:07 2015
@@ -554,10 +554,6 @@ public class CommonJoinTaskDispatcher ex
       return false;
     }
 
-    if (op.getChildOperators() == null) {
-      return true;
-    }
-
     for (Operator<? extends OperatorDesc> childOp : op.getChildOperators()) {
       if (!checkOperatorOKMapJoinConversion(childOp)) {
         return false;

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SplitSparkWorkResolver.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SplitSparkWorkResolver.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SplitSparkWorkResolver.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SplitSparkWorkResolver.java Wed Mar 18 05:40:07 2015
@@ -183,7 +183,7 @@ public class SplitSparkWorkResolver impl
   private void setStatistics(Operator<? extends OperatorDesc> origin,
       Operator<? extends OperatorDesc> clone) {
     clone.getConf().setStatistics(origin.getConf().getStatistics());
-    clone.getConf().setOpTraits(origin.getConf().getOpTraits());
+    clone.getConf().setTraits(origin.getConf().getTraits());
     if (origin.getChildOperators().size() == clone.getChildOperators().size()) {
       for (int i = 0; i < clone.getChildOperators().size(); i++) {
         setStatistics(origin.getChildOperators().get(i), clone.getChildOperators().get(i));

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Wed Mar 18 05:40:07 2015
@@ -1093,7 +1093,7 @@ public class DDLSemanticAnalyzer extends
       }
     }
 
-    storageFormat.fillDefaultStorageFormat();
+    storageFormat.fillDefaultStorageFormat(false);
     if (indexTableName == null) {
       indexTableName = MetaStoreUtils.getIndexTableName(qTabName[0], qTabName[1], indexName);
       indexTableName = qTabName[0] + "." + indexTableName; // on same database with base table

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g Wed Mar 18 05:40:07 2015
@@ -35,6 +35,9 @@ k=3;
       RecognitionException e) {
     gParent.errors.add(new ParseError(gParent, e, tokenNames));
   }
+  protected boolean useSQL11ReservedKeywordsForIdentifier() {
+    return gParent.useSQL11ReservedKeywordsForIdentifier();
+  }
 }
 
 @rulecatch {
@@ -126,7 +129,7 @@ lateralView
 @init {gParent.pushMsg("lateral view", state); }
 @after {gParent.popMsg(state); }
 	:
-	KW_LATERAL KW_VIEW KW_OUTER function tableAlias (KW_AS identifier ((COMMA)=> COMMA identifier)*)?
+	(KW_LATERAL KW_VIEW KW_OUTER) => KW_LATERAL KW_VIEW KW_OUTER function tableAlias (KW_AS identifier ((COMMA)=> COMMA identifier)*)?
 	-> ^(TOK_LATERAL_VIEW_OUTER ^(TOK_SELECT ^(TOK_SELEXPR function identifier* tableAlias)))
 	|
 	KW_LATERAL KW_VIEW function tableAlias (KW_AS identifier ((COMMA)=> COMMA identifier)*)?
@@ -177,7 +180,12 @@ tableSample
 tableSource
 @init { gParent.pushMsg("table source", state); }
 @after { gParent.popMsg(state); }
-    : tabname=tableName (props=tableProperties)? (ts=tableSample)? (KW_AS? alias=Identifier)?
+    : tabname=tableName 
+    ((tableProperties) => props=tableProperties)?
+    ((tableSample) => ts=tableSample)? 
+    ((KW_AS) => (KW_AS alias=Identifier) 
+    |
+    (Identifier) => (alias=Identifier))?
     -> ^(TOK_TABREF $tabname $props? $ts? $alias?)
     ;
 
@@ -232,11 +240,11 @@ partitionedTableFunction
 @init { gParent.pushMsg("ptf clause", state); }
 @after { gParent.popMsg(state); } 
    :
-   name=Identifier
-   LPAREN KW_ON ptfsrc=partitionTableFunctionSource partitioningSpec?
-     ((Identifier LPAREN expression RPAREN ) => Identifier LPAREN expression RPAREN ( COMMA Identifier LPAREN expression RPAREN)*)? 
-   RPAREN alias=Identifier?
-   ->   ^(TOK_PTBLFUNCTION $name $alias? partitionTableFunctionSource partitioningSpec? expression*)
+   name=Identifier LPAREN KW_ON 
+   ((partitionTableFunctionSource) => (ptfsrc=partitionTableFunctionSource spec=partitioningSpec?))
+   ((Identifier LPAREN expression RPAREN ) => Identifier LPAREN expression RPAREN ( COMMA Identifier LPAREN expression RPAREN)*)?
+   ((RPAREN) => (RPAREN)) ((Identifier) => alias=Identifier)?
+   ->   ^(TOK_PTBLFUNCTION $name $alias? $ptfsrc $spec? expression*)
    ; 
 
 //----------------------- Rules for parsing whereClause -----------------------------

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g Wed Mar 18 05:40:07 2015
@@ -42,7 +42,6 @@ KW_TRUE : 'TRUE';
 KW_FALSE : 'FALSE';
 KW_ALL : 'ALL';
 KW_NONE: 'NONE';
-KW_DEFAULT : 'DEFAULT';
 KW_AND : 'AND';
 KW_OR : 'OR';
 KW_NOT : 'NOT' | '!';

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g?rev=1667456&r1=1667455&r2=1667456&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g Wed Mar 18 05:40:07 2015
@@ -354,6 +354,8 @@ package org.apache.hadoop.hive.ql.parse;
 
 import java.util.Collection;
 import java.util.HashMap;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
 }
 
 
@@ -370,7 +372,6 @@ import java.util.HashMap;
     xlateMap.put("KW_FALSE", "FALSE");
     xlateMap.put("KW_ALL", "ALL");
     xlateMap.put("KW_NONE", "NONE");
-    xlateMap.put("KW_DEFAULT", "DEFAULT");
     xlateMap.put("KW_AND", "AND");
     xlateMap.put("KW_OR", "OR");
     xlateMap.put("KW_NOT", "NOT");
@@ -620,6 +621,13 @@ import java.util.HashMap;
   private CommonTree throwSetOpException() throws RecognitionException {
     throw new FailedPredicateException(input, "orderByClause clusterByClause distributeByClause sortByClause limitClause can only be applied to the whole union.", "");
   }
+  private Configuration hiveConf;
+  public void setHiveConf(Configuration hiveConf) {
+    this.hiveConf = hiveConf;
+  }
+  protected boolean useSQL11ReservedKeywordsForIdentifier() {
+    return !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_SUPPORT_SQL11_RESERVED_KEYWORDS);
+  }
 }
 
 @rulecatch {
@@ -712,8 +720,8 @@ ddlStatement
     | unlockDatabase
     | createRoleStatement
     | dropRoleStatement
-    | grantPrivileges
-    | revokePrivileges
+    | (grantPrivileges) => grantPrivileges
+    | (revokePrivileges) => revokePrivileges
     | showGrants
     | showRoleGrants
     | showRolePrincipals
@@ -955,8 +963,7 @@ alterStatement
 alterTableStatementSuffix
 @init { pushMsg("alter table statement", state); }
 @after { popMsg(state); }
-    : alterStatementSuffixRename[true]
-    | alterStatementSuffixUpdateStatsCol
+    : (alterStatementSuffixRename[true]) => alterStatementSuffixRename[true]
     | alterStatementSuffixDropPartitions[true]
     | alterStatementSuffixAddPartitions[true]
     | alterStatementSuffixTouch
@@ -1297,15 +1304,15 @@ fileFormat
 tabTypeExpr
 @init { pushMsg("specifying table types", state); }
 @after { popMsg(state); }
-
-   : identifier (DOT^ (KW_ELEM_TYPE | KW_KEY_TYPE | KW_VALUE_TYPE | identifier))*
-   ;
-
-descTabTypeExpr
-@init { pushMsg("specifying describe table types", state); }
-@after { popMsg(state); }
-
-   : identifier (DOT^ (KW_ELEM_TYPE | KW_KEY_TYPE | KW_VALUE_TYPE | identifier))* identifier?
+   : identifier (DOT^ 
+   (
+   (KW_ELEM_TYPE) => KW_ELEM_TYPE
+   | 
+   (KW_KEY_TYPE) => KW_KEY_TYPE
+   | 
+   (KW_VALUE_TYPE) => KW_VALUE_TYPE 
+   | identifier
+   ))* identifier?
    ;
 
 partTypeExpr
@@ -1314,21 +1321,22 @@ partTypeExpr
     :  tabTypeExpr partitionSpec? -> ^(TOK_TABTYPE tabTypeExpr partitionSpec?)
     ;
 
-descPartTypeExpr
-@init { pushMsg("specifying describe table partitions", state); }
-@after { popMsg(state); }
-    :  descTabTypeExpr partitionSpec? -> ^(TOK_TABTYPE descTabTypeExpr partitionSpec?)
-    ;
-
 descStatement
 @init { pushMsg("describe statement", state); }
 @after { popMsg(state); }
-    : (KW_DESCRIBE|KW_DESC) (KW_DATABASE|KW_SCHEMA) KW_EXTENDED? (dbName=identifier) -> ^(TOK_DESCDATABASE $dbName KW_EXTENDED?)
-    | (KW_DESCRIBE|KW_DESC) (descOptions=KW_FORMATTED|descOptions=KW_EXTENDED|descOptions=KW_PRETTY)? (parttype=descPartTypeExpr) -> ^(TOK_DESCTABLE $parttype $descOptions?)
-    | (KW_DESCRIBE|KW_DESC) KW_FUNCTION KW_EXTENDED? (name=descFuncNames) -> ^(TOK_DESCFUNCTION $name KW_EXTENDED?)
+    :
+    (KW_DESCRIBE|KW_DESC)
+    (
+    (KW_DATABASE|KW_SCHEMA) => (KW_DATABASE|KW_SCHEMA) KW_EXTENDED? (dbName=identifier) -> ^(TOK_DESCDATABASE $dbName KW_EXTENDED?)
+    |
+    (KW_FUNCTION) => KW_FUNCTION KW_EXTENDED? (name=descFuncNames) -> ^(TOK_DESCFUNCTION $name KW_EXTENDED?)
+    |
+    (KW_FORMATTED|KW_EXTENDED|KW_PRETTY) => ((descOptions=KW_FORMATTED|descOptions=KW_EXTENDED|descOptions=KW_PRETTY) parttype=partTypeExpr) -> ^(TOK_DESCTABLE $parttype $descOptions)
+    |
+    parttype=partTypeExpr -> ^(TOK_DESCTABLE $parttype)
+    )
     ;
 
-
 analyzeStatement
 @init { pushMsg("analyze statement", state); }
 @after { popMsg(state); }
@@ -1350,8 +1358,12 @@ showStatement
     | KW_SHOW KW_TABLE KW_EXTENDED ((KW_FROM|KW_IN) db_name=identifier)? KW_LIKE showStmtIdentifier partitionSpec?
     -> ^(TOK_SHOW_TABLESTATUS showStmtIdentifier $db_name? partitionSpec?)
     | KW_SHOW KW_TBLPROPERTIES tableName (LPAREN prptyName=StringLiteral RPAREN)? -> ^(TOK_SHOW_TBLPROPERTIES tableName $prptyName?)
-    | KW_SHOW KW_LOCKS (parttype=partTypeExpr)? (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWLOCKS $parttype? $isExtended?)
-    | KW_SHOW KW_LOCKS (KW_DATABASE|KW_SCHEMA) (dbName=Identifier) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $dbName $isExtended?)
+    | KW_SHOW KW_LOCKS 
+      (
+      (KW_DATABASE|KW_SCHEMA) => (KW_DATABASE|KW_SCHEMA) (dbName=Identifier) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $dbName $isExtended?)
+      |
+      (parttype=partTypeExpr)? (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWLOCKS $parttype? $isExtended?)
+      )
     | KW_SHOW (showOptions=KW_FORMATTED)? (KW_INDEX|KW_INDEXES) KW_ON showStmtIdentifier ((KW_FROM|KW_IN) db_name=identifier)?
     -> ^(TOK_SHOWINDEXES showStmtIdentifier $showOptions? $db_name?)
     | KW_SHOW KW_COMPACTIONS -> ^(TOK_SHOW_COMPACTIONS)
@@ -1459,8 +1471,12 @@ showCurrentRole
 setRole
 @init {pushMsg("set role", state);}
 @after {popMsg(state);}
-    : KW_SET KW_ROLE roleName=identifier
-    -> ^(TOK_SHOW_SET_ROLE $roleName)
+    : KW_SET KW_ROLE 
+    (
+    (KW_ALL) => (all=KW_ALL) -> ^(TOK_SHOW_SET_ROLE Identifier[$all.text])
+    |
+    identifier -> ^(TOK_SHOW_SET_ROLE identifier)
+    )
     ;
 
 showGrants
@@ -1481,7 +1497,7 @@ showRolePrincipals
 privilegeIncludeColObject
 @init {pushMsg("privilege object including columns", state);}
 @after {popMsg(state);}
-    : KW_ALL -> ^(TOK_RESOURCE_ALL)
+    : (KW_ALL) => KW_ALL -> ^(TOK_RESOURCE_ALL)
     | privObjectCols -> ^(TOK_PRIV_OBJECT_COL privObjectCols)
     ;
 
@@ -1720,7 +1736,7 @@ tableSkewed
 @init { pushMsg("table skewed specification", state); }
 @after { popMsg(state); }
     :
-     KW_SKEWED KW_BY LPAREN skewedCols=columnNameList RPAREN KW_ON LPAREN (skewedValues=skewedValueElement) RPAREN (storedAsDirs)?
+     KW_SKEWED KW_BY LPAREN skewedCols=columnNameList RPAREN KW_ON LPAREN (skewedValues=skewedValueElement) RPAREN ((storedAsDirs) => storedAsDirs)?
     -> ^(TOK_TABLESKEWED $skewedCols $skewedValues storedAsDirs?)
     ;
 
@@ -1851,7 +1867,7 @@ tableFileFormat
 @init { pushMsg("table file format specification", state); }
 @after { popMsg(state); }
     :
-      KW_STORED KW_AS KW_INPUTFORMAT inFmt=StringLiteral KW_OUTPUTFORMAT outFmt=StringLiteral (KW_INPUTDRIVER inDriver=StringLiteral KW_OUTPUTDRIVER outDriver=StringLiteral)?
+      (KW_STORED KW_AS KW_INPUTFORMAT) => KW_STORED KW_AS KW_INPUTFORMAT inFmt=StringLiteral KW_OUTPUTFORMAT outFmt=StringLiteral (KW_INPUTDRIVER inDriver=StringLiteral KW_OUTPUTDRIVER outDriver=StringLiteral)?
       -> ^(TOK_TABLEFILEFORMAT $inFmt $outFmt $inDriver? $outDriver?)
       | KW_STORED KW_BY storageHandler=StringLiteral
          (KW_WITH KW_SERDEPROPERTIES serdeprops=tableProperties)?
@@ -2231,7 +2247,7 @@ simpleSelectStatement
    whereClause?
    groupByClause?
    havingClause?
-   window_clause?
+   ((window_clause) => window_clause)?
    -> ^(TOK_QUERY fromClause? ^(TOK_INSERT ^(TOK_DESTINATION ^(TOK_DIR TOK_TMP_FILE))
                      selectClause whereClause? groupByClause? havingClause? window_clause?))
    ;



Mime
View raw message