hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mmccl...@apache.org
Subject [47/51] [partial] hive git commit: HIVE-17433: Vectorization: Support Decimal64 in Hive Query Engine (Matt McCline, reviewed by Teddy Choi)
Date Sun, 29 Oct 2017 20:40:26 GMT
http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
index 6c024f7..ae58031 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
@@ -25,14 +25,16 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggreg
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory;
 import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationBufferRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationDesc;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 
 /**
 * <ClassName>. Vectorized implementation for MIN/MAX aggregates.
@@ -82,23 +84,19 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-    private transient VectorExpressionWriter resultWriter;
+  // This constructor is used to momentarily create the object so match can be called.
+  public <ClassName>() {
+    super();
+  }
 
-    public <ClassName>(VectorExpression inputExpression, GenericUDAFEvaluator.Mode mode) {
-      super(inputExpression, mode);
-    }
+  public <ClassName>(VectorAggregationDesc vecAggrDesc) {
+    super(vecAggrDesc);
+    init();
+  }
 
     private void init() {
     }
 
-    @Override
-    public void init(AggregationDesc desc) throws HiveException {
-      init();
-
-      resultWriter = VectorExpressionWriterFactory.genVectorExpressionWritable(
-          desc.getParameters().get(0));
-    }
-
     private Aggregation getCurrentAggregationBuffer(
         VectorAggregationBufferRow[] aggregationBufferSets,
         int aggregrateIndex,
@@ -122,8 +120,10 @@ public class <ClassName> extends VectorAggregateExpression {
 
       inputExpression.evaluate(batch);
 
-      DecimalColumnVector inputVector = (DecimalColumnVector)batch.
-        cols[this.inputExpression.getOutputColumn()];
+      DecimalColumnVector inputVector =
+          (DecimalColumnVector) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
+
       HiveDecimalWritable[] vector = inputVector.vector;
 
       if (inputVector.noNulls) {
@@ -303,8 +303,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
         inputExpression.evaluate(batch);
 
-        DecimalColumnVector inputVector = (DecimalColumnVector)batch.
-            cols[this.inputExpression.getOutputColumn()];
+        DecimalColumnVector inputVector =
+            (DecimalColumnVector)batch.cols[
+                this.inputExpression.getOutputColumnNum()];
 
         int batchSize = batch.size;
 
@@ -435,23 +436,6 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     @Override
-    public Object evaluateOutput(
-        AggregationBuffer agg) throws HiveException {
-    Aggregation myagg = (Aggregation) agg;
-      if (myagg.isNull) {
-        return null;
-      }
-      else {
-        return resultWriter.writeValue(myagg.value);
-      }
-    }
-
-    @Override
-    public ObjectInspector getOutputObjectInspector() {
-      return resultWriter.getObjectInspector();
-    }
-
-    @Override
     public long getAggregationBufferFixedSize() {
     JavaDataModel model = JavaDataModel.get();
     return JavaDataModel.alignUp(
@@ -459,4 +443,34 @@ public class <ClassName> extends VectorAggregateExpression {
       model.primitive2(),
       model.memoryAlign());
   }
+
+  @Override
+  public boolean matches(String name, ColumnVector.Type inputColVectorType,
+      ColumnVector.Type outputColVectorType, Mode mode) {
+
+    /*
+     * Min/max input and output is DECIMAL.
+     *
+     * Any mode (PARTIAL1, PARTIAL2, FINAL, COMPLETE).
+     */
+    return
+        name.equals("<DescriptionName>") &&
+        inputColVectorType == ColumnVector.Type.DECIMAL &&
+        outputColVectorType == ColumnVector.Type.DECIMAL;
+  }
+
+  @Override
+  public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum,
+      AggregationBuffer agg) throws HiveException {
+
+    DecimalColumnVector outputColVector = (DecimalColumnVector) batch.cols[columnNum];
+    Aggregation myagg = (Aggregation) agg;
+    if (myagg.isNull) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+    outputColVector.vector[batchIndex].set(myagg.value);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal64.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal64.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal64.txt
new file mode 100644
index 0000000..9d1f12d
--- /dev/null
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal64.txt
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.gen;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationDesc;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
+
+/**
+* <ClassName>. Vectorized implementation for MIN/MAX aggregates for Decimal64.
+*/
+@Description(name = "<DescriptionName>",
+    value = "<DescriptionValue>")
+public class <ClassName> extends <BaseClassName> {
+
+  private static final long serialVersionUID = 1L;
+
+  // This constructor is used to momentarily create the object so match can be called.
+  public <ClassName>() {
+    super();
+  }
+
+  public <ClassName>(VectorAggregationDesc vecAggrDesc) {
+    super(vecAggrDesc);
+  }
+
+  @Override
+  public boolean matches(String name, ColumnVector.Type inputColVectorType,
+      ColumnVector.Type outputColVectorType, Mode mode) {
+
+    /*
+     * Min/max input and output is DECIMAL_64.
+     *
+     * Any mode (PARTIAL1, PARTIAL2, FINAL, COMPLETE).
+     */
+    return
+        name.equals("<DescriptionName>") &&
+        inputColVectorType == ColumnVector.Type.DECIMAL_64 &&
+        outputColVectorType == ColumnVector.Type.DECIMAL_64;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
index d12f231..000b606 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
@@ -24,13 +24,15 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggreg
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory;
 import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationBufferRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationDesc;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.IntervalDayTimeColumnVector;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
 
 /**
@@ -81,23 +83,19 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-    private transient VectorExpressionWriter resultWriter;
+  // This constructor is used to momentarily create the object so match can be called.
+  public <ClassName>() {
+    super();
+  }
 
-    public <ClassName>(VectorExpression inputExpression, GenericUDAFEvaluator.Mode mode) {
-      super(inputExpression, mode);
-    }
+  public <ClassName>(VectorAggregationDesc vecAggrDesc) {
+    super(vecAggrDesc);
+    init();
+  }
 
     private void init() {
     }
 
-    @Override
-    public void init(AggregationDesc desc) throws HiveException {
-      init();
-
-      resultWriter = VectorExpressionWriterFactory.genVectorExpressionWritable(
-          desc.getParameters().get(0));
-    }
-
     private Aggregation getCurrentAggregationBuffer(
         VectorAggregationBufferRow[] aggregationBufferSets,
         int aggregrateIndex,
@@ -121,8 +119,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
       inputExpression.evaluate(batch);
 
-      IntervalDayTimeColumnVector inputColVector = (IntervalDayTimeColumnVector)batch.
-        cols[this.inputExpression.getOutputColumn()];
+      IntervalDayTimeColumnVector inputColVector =
+          (IntervalDayTimeColumnVector) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
 
       if (inputColVector.noNulls) {
         if (inputColVector.isRepeating) {
@@ -295,8 +294,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
         inputExpression.evaluate(batch);
 
-        IntervalDayTimeColumnVector inputColVector = (IntervalDayTimeColumnVector)batch.
-            cols[this.inputExpression.getOutputColumn()];
+        IntervalDayTimeColumnVector inputColVector =
+            (IntervalDayTimeColumnVector) batch.cols[
+                this.inputExpression.getOutputColumnNum()];
 
         int batchSize = batch.size;
 
@@ -418,23 +418,6 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     @Override
-    public Object evaluateOutput(
-        AggregationBuffer agg) throws HiveException {
-    Aggregation myagg = (Aggregation) agg;
-      if (myagg.isNull) {
-        return null;
-      }
-      else {
-        return resultWriter.writeValue(myagg.value);
-      }
-    }
-
-    @Override
-    public ObjectInspector getOutputObjectInspector() {
-      return resultWriter.getObjectInspector();
-    }
-
-    @Override
     public long getAggregationBufferFixedSize() {
     JavaDataModel model = JavaDataModel.get();
     return JavaDataModel.alignUp(
@@ -442,5 +425,35 @@ public class <ClassName> extends VectorAggregateExpression {
       model.primitive2(),
       model.memoryAlign());
   }
+
+  @Override
+  public boolean matches(String name, ColumnVector.Type inputColVectorType,
+      ColumnVector.Type outputColVectorType, Mode mode) {
+
+    /*
+     * Min/max input and output is INTERVAL_DAY_TIME.
+     *
+     * Any mode (PARTIAL1, PARTIAL2, FINAL, COMPLETE).
+     */
+    return
+        name.equals("<DescriptionName>") &&
+        inputColVectorType == ColumnVector.Type.INTERVAL_DAY_TIME &&
+        outputColVectorType == ColumnVector.Type.INTERVAL_DAY_TIME;
+  }
+
+  @Override
+  public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum,
+      AggregationBuffer agg) throws HiveException {
+
+    IntervalDayTimeColumnVector outputColVector = (IntervalDayTimeColumnVector) batch.cols[columnNum];
+    Aggregation myagg = (Aggregation) agg;
+       if (myagg.isNull) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+    outputColVector.set(batchIndex, myagg.value);
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
index d5eb712..8e0bca1 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
@@ -25,15 +25,15 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
 import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationBufferRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationDesc;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
-import org.apache.hadoop.io.Text;
 
 /**
 * <ClassName>. Vectorized implementation for MIN/MAX aggregates.
@@ -93,14 +93,17 @@ public class <ClassName> extends VectorAggregateExpression {
 
     }
 
-    transient private Text result;
+  // This constructor is used to momentarily create the object so match can be called.
+  public <ClassName>() {
+    super();
+  }
 
-    public <ClassName>(VectorExpression inputExpression, GenericUDAFEvaluator.Mode mode) {
-      super(inputExpression, mode);
-    }
+  public <ClassName>(VectorAggregationDesc vecAggrDesc) {
+    super(vecAggrDesc);
+    init();
+  }
 
     private void init() {
-      result = new Text();
     }
 
     private Aggregation getCurrentAggregationBuffer(
@@ -126,8 +129,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
       inputExpression.evaluate(batch);
 
-      BytesColumnVector inputColumn = (BytesColumnVector)batch.
-        cols[this.inputExpression.getOutputColumn()];
+      BytesColumnVector inputColumn =
+          (BytesColumnVector) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
 
       if (inputColumn.noNulls) {
         if (inputColumn.isRepeating) {
@@ -261,8 +265,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
         inputExpression.evaluate(batch);
 
-        BytesColumnVector inputColumn = (BytesColumnVector)batch.
-            cols[this.inputExpression.getOutputColumn()];
+        BytesColumnVector inputColumn =
+            (BytesColumnVector) batch.cols[
+                this.inputExpression.getOutputColumnNum()];
 
         int batchSize = batch.size;
 
@@ -362,24 +367,6 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     @Override
-    public Object evaluateOutput(
-        AggregationBuffer agg) throws HiveException {
-    Aggregation myagg = (Aggregation) agg;
-      if (myagg.isNull) {
-        return null;
-      }
-      else {
-        result.set(myagg.bytes, 0, myagg.length);
-        return result;
-      }
-    }
-
-    @Override
-    public ObjectInspector getOutputObjectInspector() {
-      return PrimitiveObjectInspectorFactory.writableStringObjectInspector;
-    }
-
-    @Override
     public long getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
       return JavaDataModel.alignUp(
@@ -394,8 +381,33 @@ public class <ClassName> extends VectorAggregateExpression {
       return true;
     }
 
-    @Override
-    public void init(AggregationDesc desc) throws HiveException {
-      init();
+  @Override
+  public boolean matches(String name, ColumnVector.Type inputColVectorType,
+      ColumnVector.Type outputColVectorType, Mode mode) {
+
+    /*
+     * Min/max input and output is BYTES.
+     *
+     * Any mode (PARTIAL1, PARTIAL2, FINAL, COMPLETE).
+     */
+    return
+        name.equals("<DescriptionName>") &&
+        inputColVectorType == ColumnVector.Type.BYTES &&
+        outputColVectorType == ColumnVector.Type.BYTES;
+  }
+
+  @Override
+  public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum,
+      AggregationBuffer agg) throws HiveException {
+
+    BytesColumnVector outputColVector = (BytesColumnVector) batch.cols[columnNum];
+    Aggregation myagg = (Aggregation) agg;
+       if (myagg.isNull) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
     }
+    outputColVector.isNull[batchIndex] = false;
+    outputColVector.setVal(batchIndex, myagg.bytes, 0, myagg.length);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
index f78de56..27da3d0 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
@@ -26,14 +26,16 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggreg
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory;
 import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationBufferRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationDesc;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 
 /**
 * <ClassName>. Vectorized implementation for MIN/MAX aggregates.
@@ -83,23 +85,19 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-    private transient VectorExpressionWriter resultWriter;
+  // This constructor is used to momentarily create the object so match can be called.
+  public <ClassName>() {
+    super();
+  }
 
-    public <ClassName>(VectorExpression inputExpression, GenericUDAFEvaluator.Mode mode) {
-      super(inputExpression, mode);
-    }
+  public <ClassName>(VectorAggregationDesc vecAggrDesc) {
+    super(vecAggrDesc);
+    init();
+  }
 
     private void init() {
     }
 
-    @Override
-    public void init(AggregationDesc desc) throws HiveException {
-      init();
-
-      resultWriter = VectorExpressionWriterFactory.genVectorExpressionWritable(
-          desc.getParameters().get(0));
-    }
-
     private Aggregation getCurrentAggregationBuffer(
         VectorAggregationBufferRow[] aggregationBufferSets,
         int aggregrateIndex,
@@ -123,8 +121,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
       inputExpression.evaluate(batch);
 
-      TimestampColumnVector inputColVector = (TimestampColumnVector)batch.
-        cols[this.inputExpression.getOutputColumn()];
+      TimestampColumnVector inputColVector =
+          (TimestampColumnVector) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
 
       if (inputColVector.noNulls) {
         if (inputColVector.isRepeating) {
@@ -297,8 +296,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
         inputExpression.evaluate(batch);
 
-        TimestampColumnVector inputColVector = (TimestampColumnVector)batch.
-            cols[this.inputExpression.getOutputColumn()];
+        TimestampColumnVector inputColVector =
+            (TimestampColumnVector) batch.cols[
+                this.inputExpression.getOutputColumnNum()];
 
         int batchSize = batch.size;
 
@@ -420,23 +420,6 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     @Override
-    public Object evaluateOutput(
-        AggregationBuffer agg) throws HiveException {
-    Aggregation myagg = (Aggregation) agg;
-      if (myagg.isNull) {
-        return null;
-      }
-      else {
-        return resultWriter.writeValue(myagg.value);
-      }
-    }
-
-    @Override
-    public ObjectInspector getOutputObjectInspector() {
-      return resultWriter.getObjectInspector();
-    }
-
-    @Override
     public long getAggregationBufferFixedSize() {
     JavaDataModel model = JavaDataModel.get();
     return JavaDataModel.alignUp(
@@ -444,5 +427,35 @@ public class <ClassName> extends VectorAggregateExpression {
       model.primitive2(),
       model.memoryAlign());
   }
+
+  @Override
+  public boolean matches(String name, ColumnVector.Type inputColVectorType,
+      ColumnVector.Type outputColVectorType, Mode mode) {
+
+    /*
+     * Min/max input and output is TIMESTAMP.
+     *
+     * Any mode (PARTIAL1, PARTIAL2, FINAL, COMPLETE).
+     */
+    return
+        name.equals("<DescriptionName>") &&
+        inputColVectorType == ColumnVector.Type.TIMESTAMP &&
+        outputColVectorType == ColumnVector.Type.TIMESTAMP;
+  }
+
+  @Override
+  public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum,
+      AggregationBuffer agg) throws HiveException {
+
+    TimestampColumnVector outputColVector = (TimestampColumnVector) batch.cols[columnNum];
+    Aggregation myagg = (Aggregation) agg;
+       if (myagg.isNull) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+    outputColVector.set(batchIndex, myagg.value);
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
index 475d578..a251f13 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
@@ -22,22 +22,21 @@ import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
 import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationBufferRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationDesc;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.hive.serde2.io.DoubleWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 
 /**
 * <ClassName>. Vectorized implementation for SUM aggregates.
 */
-@Description(name = "sum", 
+@Description(name = "sum",
     value = "_FUNC_(expr) - Returns the sum value of expr (vectorized, type: <ValueType>)")
 public class <ClassName> extends VectorAggregateExpression {
 
@@ -83,14 +82,17 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-    transient private <OutputType> result;
+  // This constructor is used to momentarily create the object so match can be called.
+  public <ClassName>() {
+    super();
+  }
 
-    public <ClassName>(VectorExpression inputExpression, GenericUDAFEvaluator.Mode mode) {
-      super(inputExpression, mode);
-    }
+  public <ClassName>(VectorAggregationDesc vecAggrDesc) {
+    super(vecAggrDesc);
+    init();
+  }
 
     private void init() {
-      result = new <OutputType>();
     }
 
     private Aggregation getCurrentAggregationBuffer(
@@ -116,8 +118,10 @@ public class <ClassName> extends VectorAggregateExpression {
 
       inputExpression.evaluate(batch);
 
-      <InputColumnVectorType> inputVector = (<InputColumnVectorType>)batch.
-        cols[this.inputExpression.getOutputColumn()];
+      <InputColumnVectorType> inputVector =
+          (<InputColumnVectorType>) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
+
       <ValueType>[] vector = inputVector.vector;
 
       if (inputVector.noNulls) {
@@ -292,8 +296,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
       inputExpression.evaluate(batch);
 
-      <InputColumnVectorType> inputVector = (<InputColumnVectorType>)batch.
-          cols[this.inputExpression.getOutputColumn()];
+      <InputColumnVectorType> inputVector =
+          (<InputColumnVectorType>) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
 
       int batchSize = batch.size;
 
@@ -411,23 +416,6 @@ public class <ClassName> extends VectorAggregateExpression {
       myAgg.reset();
     }
 
-    @Override
-    public Object evaluateOutput(AggregationBuffer agg) throws HiveException {
-      Aggregation myagg = (Aggregation) agg;
-      if (myagg.isNull) {
-        return null;
-      }
-      else {
-        result.set(myagg.sum);
-        return result;
-      }
-    }
-
-    @Override
-    public ObjectInspector getOutputObjectInspector() {
-      return <OutputTypeInspector>;
-    }
-
   @Override
   public long getAggregationBufferFixedSize() {
       JavaDataModel model = JavaDataModel.get();
@@ -437,7 +425,34 @@ public class <ClassName> extends VectorAggregateExpression {
   }
 
   @Override
-  public void init(AggregationDesc desc) throws HiveException {
-    init();
+  public boolean matches(String name, ColumnVector.Type inputColVectorType,
+      ColumnVector.Type outputColVectorType, Mode mode) {
+
+    /*
+     * Sum input and output are <UpperCaseColumnVectorType>.
+     *
+     * Any mode (PARTIAL1, PARTIAL2, FINAL, COMPLETE).
+     */
+    return
+        name.equals("sum") &&
+        inputColVectorType == ColumnVector.Type.<UpperCaseColumnVectorType> &&
+        outputColVectorType == ColumnVector.Type.<UpperCaseColumnVectorType>;
+  }
+
+  @Override
+  public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum,
+      AggregationBuffer agg) throws HiveException {
+
+    <InputColumnVectorType> outputColVector = (<InputColumnVectorType>) batch.cols[columnNum];
+
+    Aggregation myagg = (Aggregation) agg;
+    if (myagg.isNull) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+
+    outputColVector.vector[batchIndex] = myagg.sum;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
index 390bd02..901cb4b 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
@@ -25,19 +25,20 @@ import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
 import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationBufferRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationDesc;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFVariance;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFVariance.VarianceKind;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.hive.serde2.io.DoubleWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 
 import com.google.common.base.Preconditions;
 
@@ -83,8 +84,9 @@ public class <ClassName> extends VectorAggregateExpression {
           sum += value;
           count++;
           if (count > 1) {
-            double t = count * value - sum;
-            variance += (t * t) / ((double) count * (count - 1));
+            variance =
+                GenericUDAFVariance.calculateIntermediate(
+                    count, sum, value, variance);
           }
         }
       }
@@ -103,68 +105,33 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-#IF PARTIAL1
-    transient private LongWritable resultCount;
-    transient private DoubleWritable resultSum;
-    transient private DoubleWritable resultVariance;
-    transient private Object[] partialResult;
-
-    transient private ObjectInspector soi;
-#ENDIF PARTIAL1
 #IF COMPLETE
-    transient private DoubleWritable fullResult;
-
-    transient private ObjectInspector oi;
+    transient private VarianceKind varianceKind = VarianceKind.NONE;
 #ENDIF COMPLETE
 
-    public <ClassName>(VectorExpression inputExpression, GenericUDAFEvaluator.Mode mode) {
-      super(inputExpression, mode);
+  // This constructor is used to momentarily create the object so match can be called.
+  public <ClassName>() {
+    super();
+  }
+
+  public <ClassName>(VectorAggregationDesc vecAggrDesc) {
+    super(vecAggrDesc);
 #IF PARTIAL1
-      Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.PARTIAL1);
+    Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.PARTIAL1);
 #ENDIF PARTIAL1
 #IF COMPLETE
-      Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.COMPLETE);
+    Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.COMPLETE);
 #ENDIF COMPLETE
-    }
+    init();
+  }
 
     private void init() {
-#IF PARTIAL1
-      partialResult = new Object[3];
-      resultCount = new LongWritable();
-      resultSum = new DoubleWritable();
-      resultVariance = new DoubleWritable();
-      partialResult[0] = resultCount;
-      partialResult[1] = resultSum;
-      partialResult[2] = resultVariance;
-      initPartialResultInspector();
-#ENDIF PARTIAL1
 #IF COMPLETE
-      fullResult = new DoubleWritable();
-      initFullResultInspector();
+      String aggregateName = vecAggrDesc.getAggrDesc().getGenericUDAFName();
+      varianceKind = VarianceKind.nameMap.get(aggregateName);
 #ENDIF COMPLETE
     }
 
-#IF PARTIAL1
-  private void initPartialResultInspector() {
-        List<ObjectInspector> foi = new ArrayList<ObjectInspector>();
-        foi.add(PrimitiveObjectInspectorFactory.writableLongObjectInspector);
-        foi.add(PrimitiveObjectInspectorFactory.writableDoubleObjectInspector);
-        foi.add(PrimitiveObjectInspectorFactory.writableDoubleObjectInspector);
-
-        List<String> fname = new ArrayList<String>();
-        fname.add("count");
-        fname.add("sum");
-        fname.add("variance");
-
-        soi = ObjectInspectorFactory.getStandardStructObjectInspector(fname, foi);
-    }
-#ENDIF PARTIAL1
-#IF COMPLETE
-    private void initFullResultInspector() {
-      oi = PrimitiveObjectInspectorFactory.writableDoubleObjectInspector;
-    }
-#ENDIF COMPLETE
-
     private Aggregation getCurrentAggregationBuffer(
         VectorAggregationBufferRow[] aggregationBufferSets,
         int aggregateIndex,
@@ -183,8 +150,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
       inputExpression.evaluate(batch);
 
-      <InputColumnVectorType> inputVector = (<InputColumnVectorType>)batch.
-        cols[this.inputExpression.getOutputColumn()];
+      <InputColumnVectorType> inputVector =
+          (<InputColumnVectorType>) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
 
       int batchSize = batch.size;
 
@@ -328,8 +296,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
       inputExpression.evaluate(batch);
 
-      <InputColumnVectorType> inputVector = (<InputColumnVectorType>)batch.
-        cols[this.inputExpression.getOutputColumn()];
+      <InputColumnVectorType> inputVector =
+          (<InputColumnVectorType>) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
 
       int batchSize = batch.size;
 
@@ -473,68 +442,92 @@ public class <ClassName> extends VectorAggregateExpression {
       myAgg.reset();
     }
 
-    @Override
-    public Object evaluateOutput(
-        AggregationBuffer agg) throws HiveException {
-      Aggregation myagg = (Aggregation) agg;
-      if (myagg.isNull) {
-        return null;
-       } else {
+  @Override
+  public long getAggregationBufferFixedSize() {
+      JavaDataModel model = JavaDataModel.get();
+      return JavaDataModel.alignUp(
+        model.object() +
+        model.primitive2()*3+
+        model.primitive1(),
+        model.memoryAlign());
+  }
+
+  @Override
+  public boolean matches(String name, ColumnVector.Type inputColVectorType,
+      ColumnVector.Type outputColVectorType, Mode mode) {
+
+    /*
+     * Variance input is <UpperCaseColumnVectorType>.
 #IF PARTIAL1
-        resultCount.set (myagg.count);
-        resultSum.set (myagg.sum);
-        resultVariance.set (myagg.variance);
-        return partialResult;
+     * Output is STRUCT.
+     *
+     * Mode PARTIAL1.
 #ENDIF PARTIAL1
 #IF COMPLETE
-        if (myagg.count == 0) {
-          return null;   // SQL standard - return null for zero elements
-        } else if (myagg.count > 1) {
-#IF VARIANCE
-          fullResult.set(myagg.variance / (myagg.count));
-#ENDIF VARIANCE
-#IF VARIANCE_SAMPLE
-          fullResult.set(myagg.variance / (myagg.count - 1));
-#ENDIF VARIANCE_SAMPLE
-#IF STD
-          fullResult.set(Math.sqrt(myagg.variance / (myagg.count)));
-#ENDIF STD
-#IF STD_SAMPLE
-          fullResult.set(Math.sqrt(myagg.variance / (myagg.count - 1)));
-#ENDIF STD_SAMPLE
-        } else {
-
-          // For one element the variance is always 0.
-          fullResult.set(0);
-        }
-        return fullResult;
+     * Output is DOUBLE.
+     *
+     * Mode COMPLETE.
 #ENDIF COMPLETE
-      }
-    }
-
-  @Override
-  public ObjectInspector getOutputObjectInspector() {
+     */
+    return
+        GenericUDAFVariance.isVarianceFamilyName(name) &&
+        inputColVectorType == ColumnVector.Type.<UpperCaseColumnVectorType> &&
 #IF PARTIAL1
-    return soi;
+        outputColVectorType == ColumnVector.Type.STRUCT &&
+        mode == Mode.PARTIAL1;
 #ENDIF PARTIAL1
 #IF COMPLETE
-    return oi;
+        outputColVectorType == ColumnVector.Type.DOUBLE &&
+        mode == Mode.COMPLETE;
 #ENDIF COMPLETE
   }
 
   @Override
-  public long getAggregationBufferFixedSize() {
-      JavaDataModel model = JavaDataModel.get();
-      return JavaDataModel.alignUp(
-        model.object() +
-        model.primitive2()*3+
-        model.primitive1(),
-        model.memoryAlign());
-  }
+  public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum,
+      AggregationBuffer agg) throws HiveException {
 
-  @Override
-  public void init(AggregationDesc desc) throws HiveException {
-    init();
+#IF PARTIAL1
+    StructColumnVector outputColVector = (StructColumnVector) batch.cols[columnNum];
+
+    Aggregation myagg = (Aggregation) agg;
+    if (myagg.isNull) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+
+    ColumnVector[] fields = outputColVector.fields;
+    ((LongColumnVector) fields[VARIANCE_COUNT_FIELD_INDEX]).vector[batchIndex] = myagg.count;
+    ((DoubleColumnVector) fields[VARIANCE_SUM_FIELD_INDEX]).vector[batchIndex] = myagg.sum;
+    ((DoubleColumnVector) fields[VARIANCE_VARIANCE_FIELD_INDEX]).vector[batchIndex] = myagg.variance;
+#ENDIF PARTIAL1
+#IF COMPLETE
+    DoubleColumnVector outputColVector = (DoubleColumnVector) batch.cols[columnNum];
+
+    Aggregation myagg = (Aggregation) agg;
+    if (GenericUDAFVariance.isVarianceNull(myagg.count, varianceKind)) {
+
+      // SQL standard - return null for zero (or 1 for sample) elements
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+
+    final double result;
+    if (myagg.count > 1) {
+
+      // Use the common variance family result calculation method.
+      result = GenericUDAFVariance.calculateVarianceFamilyResult(
+          myagg.variance, myagg.count, varianceKind);
+    } else {
+
+      // For one element the variance is always 0.
+      result = 0.0;
+    }
+    outputColVector.vector[batchIndex] = result;
+#ENDIF COMPLETE
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
index ba246e2..2fadaa7 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
@@ -27,18 +27,21 @@ import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
 import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationBufferRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationDesc;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFVariance;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFVariance.VarianceKind;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.hive.serde2.io.DoubleWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 
 import com.google.common.base.Preconditions;
 
@@ -87,8 +90,9 @@ public class <ClassName> extends VectorAggregateExpression {
           sum += value;
           count++;
           if (count > 1) {
-            double t = count * value - sum;
-            variance += (t * t) / ((double) count * (count - 1));
+            variance =
+                GenericUDAFVariance.calculateIntermediate(
+                    count, sum, value, variance);
           }
         }
       }
@@ -112,68 +116,33 @@ public class <ClassName> extends VectorAggregateExpression {
 
     }
 
-#IF PARTIAL1
-    transient private LongWritable resultCount;
-    transient private DoubleWritable resultSum;
-    transient private DoubleWritable resultVariance;
-    transient private Object[] partialResult;
-
-    transient private ObjectInspector soi;
-#ENDIF PARTIAL1
 #IF COMPLETE
-      transient private DoubleWritable fullResult;
-
-      transient private ObjectInspector oi;
+    transient private VarianceKind varianceKind = VarianceKind.NONE;
 #ENDIF COMPLETE
 
-    public <ClassName>(VectorExpression inputExpression, GenericUDAFEvaluator.Mode mode) {
-      super(inputExpression, mode);
+  // This constructor is used to momentarily create the object so match can be called.
+  public <ClassName>() {
+    super();
+  }
+
+  public <ClassName>(VectorAggregationDesc vecAggrDesc) {
+    super(vecAggrDesc);
 #IF PARTIAL1
-      Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.PARTIAL1);
+    Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.PARTIAL1);
 #ENDIF PARTIAL1
 #IF COMPLETE
-      Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.COMPLETE);
+    Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.COMPLETE);
 #ENDIF COMPLETE
-    }
+    init();
+  }
 
     private void init() {
-#IF PARTIAL1
-      partialResult = new Object[3];
-      resultCount = new LongWritable();
-      resultSum = new DoubleWritable();
-      resultVariance = new DoubleWritable();
-      partialResult[0] = resultCount;
-      partialResult[1] = resultSum;
-      partialResult[2] = resultVariance;
-      initPartialResultInspector();
-#ENDIF PARTIAL1
 #IF COMPLETE
-      fullResult = new DoubleWritable();
-      initFullResultInspector();
+      String aggregateName = vecAggrDesc.getAggrDesc().getGenericUDAFName();
+      varianceKind = VarianceKind.nameMap.get(aggregateName);
 #ENDIF COMPLETE
     }
 
-#IF PARTIAL1
-  private void initPartialResultInspector() {
-        List<ObjectInspector> foi = new ArrayList<ObjectInspector>();
-        foi.add(PrimitiveObjectInspectorFactory.writableLongObjectInspector);
-        foi.add(PrimitiveObjectInspectorFactory.writableDoubleObjectInspector);
-        foi.add(PrimitiveObjectInspectorFactory.writableDoubleObjectInspector);
-
-        List<String> fname = new ArrayList<String>();
-        fname.add("count");
-        fname.add("sum");
-        fname.add("variance");
-
-        soi = ObjectInspectorFactory.getStandardStructObjectInspector(fname, foi);
-    }
-#ENDIF PARTIAL1
-#IF COMPLETE
-    private void initFullResultInspector() {
-      oi = PrimitiveObjectInspectorFactory.writableDoubleObjectInspector;
-    }
-#ENDIF COMPLETE
-
     private Aggregation getCurrentAggregationBuffer(
         VectorAggregationBufferRow[] aggregationBufferSets,
         int aggregateIndex,
@@ -192,8 +161,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
       inputExpression.evaluate(batch);
 
-      DecimalColumnVector inputVector = (DecimalColumnVector)batch.
-        cols[this.inputExpression.getOutputColumn()];
+      DecimalColumnVector inputVector =
+          (DecimalColumnVector) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
 
       int batchSize = batch.size;
 
@@ -326,8 +296,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
       inputExpression.evaluate(batch);
 
-      DecimalColumnVector inputVector = (DecimalColumnVector)batch.
-        cols[this.inputExpression.getOutputColumn()];
+      DecimalColumnVector inputVector =
+          (DecimalColumnVector) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
 
       int batchSize = batch.size;
 
@@ -435,68 +406,91 @@ public class <ClassName> extends VectorAggregateExpression {
       myAgg.reset();
     }
 
-    @Override
-    public Object evaluateOutput(
-        AggregationBuffer agg) throws HiveException {
-      Aggregation myagg = (Aggregation) agg;
-      if (myagg.isNull) {
-        return null;
-      } else {
+  @Override
+  public long getAggregationBufferFixedSize() {
+      JavaDataModel model = JavaDataModel.get();
+      return JavaDataModel.alignUp(
+        model.object() +
+        model.primitive2()*3+
+        model.primitive1(),
+        model.memoryAlign());
+  }
+
+  @Override
+  public boolean matches(String name, ColumnVector.Type inputColVectorType,
+      ColumnVector.Type outputColVectorType, Mode mode) {
+
+    /*
+     * Variance input is DECIMAL.
 #IF PARTIAL1
-        resultCount.set (myagg.count);
-        resultSum.set (myagg.sum);
-        resultVariance.set (myagg.variance);
-        return partialResult;
+     * Output is STRUCT.
+     *
+     * Mode PARTIAL1.
 #ENDIF PARTIAL1
 #IF COMPLETE
-        if (myagg.count == 0) {
-          return null;   // SQL standard - return null for zero elements
-        } else if (myagg.count > 1) {
-#IF VARIANCE
-          fullResult.set(myagg.variance / (myagg.count));
-#ENDIF VARIANCE
-#IF VARIANCE_SAMPLE
-          fullResult.set(myagg.variance / (myagg.count - 1));
-#ENDIF VARIANCE_SAMPLE
-#IF STD
-          fullResult.set(Math.sqrt(myagg.variance / (myagg.count)));
-#ENDIF STD
-#IF STD_SAMPLE
-          fullResult.set(Math.sqrt(myagg.variance / (myagg.count - 1)));
-#ENDIF STD_SAMPLE
-        } else {
-
-          // For one element the variance is always 0.
-          fullResult.set(0);
-        }
-
-        return fullResult;
+     * Output is DOUBLE.
+     *
+     * Mode COMPLETE.
 #ENDIF COMPLETE
-      }
-    }
-
-  @Override
-  public ObjectInspector getOutputObjectInspector() {
+     */
+    return
+        GenericUDAFVariance.isVarianceFamilyName(name) &&
+        inputColVectorType == ColumnVector.Type.DECIMAL &&
 #IF PARTIAL1
-    return soi;
+        outputColVectorType == ColumnVector.Type.STRUCT &&
+        mode == Mode.PARTIAL1;
 #ENDIF PARTIAL1
 #IF COMPLETE
-    return oi;
+        outputColVectorType == ColumnVector.Type.DOUBLE &&
+        mode == Mode.COMPLETE;
 #ENDIF COMPLETE
   }
 
   @Override
-  public long getAggregationBufferFixedSize() {
-      JavaDataModel model = JavaDataModel.get();
-      return JavaDataModel.alignUp(
-        model.object() +
-        model.primitive2()*3+
-        model.primitive1(),
-        model.memoryAlign());
-  }
+  public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum,
+      AggregationBuffer agg) throws HiveException {
 
-  @Override
-  public void init(AggregationDesc desc) throws HiveException {
-    init();
+#IF PARTIAL1
+    StructColumnVector outputColVector = (StructColumnVector) batch.cols[columnNum];
+
+    Aggregation myagg = (Aggregation) agg;
+    if (myagg.isNull) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+
+    ColumnVector[] fields = outputColVector.fields;
+    ((LongColumnVector) fields[VARIANCE_COUNT_FIELD_INDEX]).vector[batchIndex] = myagg.count;
+    ((DoubleColumnVector) fields[VARIANCE_SUM_FIELD_INDEX]).vector[batchIndex] = myagg.sum;
+    ((DoubleColumnVector) fields[VARIANCE_VARIANCE_FIELD_INDEX]).vector[batchIndex] = myagg.variance;
+#ENDIF PARTIAL1
+#IF COMPLETE
+    DoubleColumnVector outputColVector = (DoubleColumnVector) batch.cols[columnNum];
+
+    Aggregation myagg = (Aggregation) agg;
+    if (GenericUDAFVariance.isVarianceNull(myagg.count, varianceKind)) {
+
+      // SQL standard - return null for zero (or 1 for sample) elements
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+
+    final double result;
+    if (myagg.count > 1) {
+
+      // Use the common variance family result calculation method.
+      result = GenericUDAFVariance.calculateVarianceFamilyResult(
+          myagg.variance, myagg.count, varianceKind);
+    } else {
+
+      // For one element the variance is always 0.
+      result = 0.0;
+    }
+    outputColVector.vector[batchIndex] = result;
+#ENDIF COMPLETE
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarMerge.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarMerge.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarMerge.txt
index 447685b..3b311a8 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarMerge.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarMerge.txt
@@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
 import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationBufferRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationDesc;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
@@ -34,14 +35,10 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFVariance;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFVariance.VarianceKind;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.hive.serde2.io.DoubleWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 
 import com.google.common.base.Preconditions;
 
@@ -69,32 +66,28 @@ public class <ClassName> extends VectorAggregateExpression {
       transient private boolean isNull = true;
 
       public void merge(long partialCount, double partialSum, double partialVariance) {
-        final long origMergeCount;
+
         if (isNull || mergeCount == 0) {
+
           // Just copy the information since there is nothing so far.
-          origMergeCount = 0;
           mergeCount = partialCount;
           mergeSum = partialSum;
           mergeVariance = partialVariance;
           isNull = false;
-        } else {
-          origMergeCount = mergeCount;
+          return;
         }
 
-        if (partialCount > 0 && origMergeCount > 0) {
+        if (partialCount > 0 && mergeCount > 0) {
 
-          // Merge the two partials
+          // Merge the two partials.
+          mergeVariance +=
+              GenericUDAFVariance.calculateMerge(
+                  partialCount, mergeCount, partialSum, mergeSum,
+                  partialVariance, mergeVariance);
 
+          // Update these after calculation.
           mergeCount += partialCount;
-          final double origMergeSum = mergeSum;
           mergeSum += partialSum;
-
-          final double doublePartialCount = (double) partialCount;
-          final double doubleOrigMergeCount = (double) origMergeCount;
-          double t = (doublePartialCount / doubleOrigMergeCount) * origMergeSum - partialSum;
-          mergeVariance +=
-              partialVariance + ((doubleOrigMergeCount / doublePartialCount) /
-                  (doubleOrigMergeCount + doublePartialCount)) * t * t;
         }
       }
 
@@ -112,68 +105,33 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-#IF PARTIAL2
-    transient private Object[] partialResult;
-    transient private LongWritable resultCount;
-    transient private DoubleWritable resultSum;
-    transient private DoubleWritable resultVariance;
-    transient private StructObjectInspector soi;
-#ENDIF PARTIAL2
 #IF FINAL
-    transient private DoubleWritable fullResult;
-    transient private ObjectInspector oi;
+    transient private VarianceKind varianceKind = VarianceKind.NONE;
 #ENDIF FINAL
 
-    private transient int countOffset;
-    private transient int sumOffset;
-    private transient int varianceOffset;
+  // This constructor is used to momentarily create the object so match can be called.
+  public <ClassName>() {
+    super();
+  }
 
-    public <ClassName>(VectorExpression inputExpression, GenericUDAFEvaluator.Mode mode) {
-      super(inputExpression, mode);
+  public <ClassName>(VectorAggregationDesc vecAggrDesc) {
+    super(vecAggrDesc);
 #IF PARTIAL2
-      Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.PARTIAL2);
+    Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.PARTIAL2);
 #ENDIF PARTIAL2
 #IF FINAL
-      Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.FINAL);
+    Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.FINAL);
 #ENDIF FINAL
-    }
+    init();
+  }
 
     private void init() {
-#IF PARTIAL2
-      partialResult = new Object[3];
-      resultCount = new LongWritable();
-      resultSum = new DoubleWritable();
-      resultVariance = new DoubleWritable();
-      partialResult[0] = resultCount;
-      partialResult[1] = resultSum;
-      partialResult[2] = resultVariance;
-      initPartialResultInspector();
-#ENDIF PARTIAL2
 #IF FINAL
-      fullResult = new DoubleWritable();
-      initFullResultInspector();
+      String aggregateName = vecAggrDesc.getAggrDesc().getGenericUDAFName();
+      varianceKind = VarianceKind.nameMap.get(aggregateName);
 #ENDIF FINAL
     }
 
-#IF PARTIAL2
-    private void initPartialResultInspector() {
-        List<ObjectInspector> foi = new ArrayList<ObjectInspector>();
-        foi.add(PrimitiveObjectInspectorFactory.writableLongObjectInspector);
-        foi.add(PrimitiveObjectInspectorFactory.writableDoubleObjectInspector);
-        foi.add(PrimitiveObjectInspectorFactory.writableDoubleObjectInspector);
-        List<String> fname = new ArrayList<String>();
-        fname.add("count");
-        fname.add("sum");
-        fname.add("variance");
-        soi = ObjectInspectorFactory.getStandardStructObjectInspector(fname, foi);
-    }
-#ENDIF PARTIAL2
-#IF FINAL
-    private void initFullResultInspector() {
-      oi = PrimitiveObjectInspectorFactory.writableDoubleObjectInspector;
-    }
-#ENDIF FINAL
-
     private Aggregation getCurrentAggregationBuffer(
         VectorAggregationBufferRow[] aggregationBufferSets,
         int bufferIndex,
@@ -198,12 +156,14 @@ public class <ClassName> extends VectorAggregateExpression {
       inputExpression.evaluate(batch);
 
       StructColumnVector inputStructColVector =
-          (StructColumnVector) batch.cols[this.inputExpression.getOutputColumn()];
+          (StructColumnVector) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
+
       ColumnVector[] fields = inputStructColVector.fields;
 
-      long[] countVector = ((LongColumnVector) fields[countOffset]).vector;
-      double[] sumVector = ((DoubleColumnVector) fields[sumOffset]).vector;
-      double[] varianceVector = ((DoubleColumnVector) fields[varianceOffset]).vector;
+      long[] countVector = ((LongColumnVector) fields[VARIANCE_COUNT_FIELD_INDEX]).vector;
+      double[] sumVector = ((DoubleColumnVector) fields[VARIANCE_SUM_FIELD_INDEX]).vector;
+      double[] varianceVector = ((DoubleColumnVector) fields[VARIANCE_VARIANCE_FIELD_INDEX]).vector;
 
       if (inputStructColVector.noNulls) {
         if (inputStructColVector.isRepeating) {
@@ -393,12 +353,14 @@ public class <ClassName> extends VectorAggregateExpression {
       inputExpression.evaluate(batch);
 
       StructColumnVector inputStructColVector =
-          (StructColumnVector) batch.cols[this.inputExpression.getOutputColumn()];
+          (StructColumnVector) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
+
       ColumnVector[] fields = inputStructColVector.fields;
 
-      long[] countVector = ((LongColumnVector) fields[countOffset]).vector;
-      double[] sumVector = ((DoubleColumnVector) fields[sumOffset]).vector;
-      double[] varianceVector = ((DoubleColumnVector) fields[varianceOffset]).vector;
+      long[] countVector = ((LongColumnVector) fields[VARIANCE_COUNT_FIELD_INDEX]).vector;
+      double[] sumVector = ((DoubleColumnVector) fields[VARIANCE_SUM_FIELD_INDEX]).vector;
+      double[] varianceVector = ((DoubleColumnVector) fields[VARIANCE_VARIANCE_FIELD_INDEX]).vector;
 
       int batchSize = batch.size;
 
@@ -499,75 +461,90 @@ public class <ClassName> extends VectorAggregateExpression {
       myAgg.reset();
     }
 
-    @Override
-    public Object evaluateOutput(
-        AggregationBuffer agg) throws HiveException {
-      Aggregation myagg = (Aggregation) agg;
-      if (myagg.isNull) {
-        return null;
-      } else {
+  @Override
+  public long getAggregationBufferFixedSize() {
+    JavaDataModel model = JavaDataModel.get();
+    return JavaDataModel.alignUp(
+      model.object() +
+      model.primitive2() * 2,
+      model.memoryAlign());
+  }
+
+  @Override
+  public boolean matches(String name, ColumnVector.Type inputColVectorType,
+      ColumnVector.Type outputColVectorType, Mode mode) {
+
+    /*
+     * Variance input is STRUCT.
 #IF PARTIAL2
-        resultCount.set (myagg.mergeCount);
-        resultSum.set (myagg.mergeSum);
-        resultVariance.set (myagg.mergeVariance);
-        return partialResult;
+     * Output is STRUCT.
+     *
+     * Mode PARTIAL2.
 #ENDIF PARTIAL2
 #IF FINAL
-       if (myagg.mergeCount == 0) {
-          return null;   // SQL standard - return null for zero elements
-        } else if (myagg.mergeCount > 1) {
-#IF VARIANCE
-          fullResult.set(myagg.mergeVariance / (myagg.mergeCount));
-#ENDIF VARIANCE
-#IF VARIANCE_SAMPLE
-          fullResult.set(myagg.mergeVariance / (myagg.mergeCount - 1));
-#ENDIF VARIANCE_SAMPLE
-#IF STD
-          fullResult.set(Math.sqrt(myagg.mergeVariance / (myagg.mergeCount)));
-#ENDIF STD
-#IF STD_SAMPLE
-          fullResult.set(Math.sqrt(myagg.mergeVariance / (myagg.mergeCount - 1)));
-#ENDIF STD_SAMPLE
-        } else {
-
-          // For one element the variance is always 0.
-          fullResult.set(0);
-        }
-
-        return fullResult;
+     * Output is DOUBLE.
+     *
+     * Mode FINAL.
 #ENDIF FINAL
-      }
-    }
-
-  @Override
-  public ObjectInspector getOutputObjectInspector() {
+     */
+    return
+        GenericUDAFVariance.isVarianceFamilyName(name) &&
+        inputColVectorType == ColumnVector.Type.STRUCT &&
 #IF PARTIAL2
-    return soi;
+        outputColVectorType == ColumnVector.Type.STRUCT &&
+        mode == Mode.PARTIAL2;
 #ENDIF PARTIAL2
 #IF FINAL
-    return oi;
+        outputColVectorType == ColumnVector.Type.DOUBLE &&
+        mode == Mode.FINAL;
 #ENDIF FINAL
   }
 
   @Override
-  public long getAggregationBufferFixedSize() {
-    JavaDataModel model = JavaDataModel.get();
-    return JavaDataModel.alignUp(
-      model.object() +
-      model.primitive2() * 2,
-      model.memoryAlign());
-  }
+  public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum,
+      AggregationBuffer agg) throws HiveException {
 
-  @Override
-  public void init(AggregationDesc desc) throws HiveException {
-    init();
+#IF PARTIAL2
+    StructColumnVector outputColVector = (StructColumnVector) batch.cols[columnNum];
+
+    Aggregation myagg = (Aggregation) agg;
+    if (myagg.isNull) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+
+    ColumnVector[] fields = outputColVector.fields;
+    ((LongColumnVector) fields[VARIANCE_COUNT_FIELD_INDEX]).vector[batchIndex] = myagg.mergeCount;
+    ((DoubleColumnVector) fields[VARIANCE_SUM_FIELD_INDEX]).vector[batchIndex] = myagg.mergeSum;
+    ((DoubleColumnVector) fields[VARIANCE_VARIANCE_FIELD_INDEX]).vector[batchIndex] = myagg.mergeVariance;
+#ENDIF PARTIAL2
+#IF FINAL
+    DoubleColumnVector outputColVector = (DoubleColumnVector) batch.cols[columnNum];
+
+    Aggregation myagg = (Aggregation) agg;
+    if (GenericUDAFVariance.isVarianceNull(myagg.mergeCount, varianceKind)) {
+
+      // SQL standard - return null for zero (or 1 for sample) elements
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+
+    final double result;
+    if (myagg.mergeCount > 1) {
 
-    ExprNodeDesc inputExpr = desc.getParameters().get(0);
-    StructTypeInfo partialStructTypeInfo = (StructTypeInfo) inputExpr.getTypeInfo();
+      // Use the common variance family result calculation method.
+      result = GenericUDAFVariance.calculateVarianceFamilyResult(
+          myagg.mergeVariance, myagg.mergeCount, varianceKind);
+    } else {
 
-    ArrayList<String> fieldNames =  partialStructTypeInfo.getAllStructFieldNames();
-    countOffset = fieldNames.indexOf("count");
-    sumOffset = fieldNames.indexOf("sum");
-    varianceOffset = fieldNames.indexOf("variance");
+      // For one element the variance is always 0.
+      result = 0.0;
+    }
+    outputColVector.vector[batchIndex] = result;
+#ENDIF FINAL
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarTimestamp.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarTimestamp.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarTimestamp.txt
index 8ef1a9f..881f631 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarTimestamp.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarTimestamp.txt
@@ -25,18 +25,21 @@ import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
 import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationBufferRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationDesc;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFVariance;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFVariance.VarianceKind;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.hive.serde2.io.DoubleWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 
 import com.google.common.base.Preconditions;
 
@@ -82,8 +85,9 @@ public class <ClassName> extends VectorAggregateExpression {
           sum += value;
           count++;
           if (count > 1) {
-            double t = count * value - sum;
-            variance += (t * t) / ((double) count * (count - 1));
+            variance =
+                GenericUDAFVariance.calculateIntermediate(
+                    count, sum, value, variance);
           }
         }
       }
@@ -102,70 +106,33 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-#IF PARTIAL1
-    transient private LongWritable resultCount;
-    transient private DoubleWritable resultSum;
-    transient private DoubleWritable resultVariance;
-    transient private Object[] partialResult;
-
-    transient private ObjectInspector soi;
-#ENDIF PARTIAL1
 #IF COMPLETE
-    transient private DoubleWritable fullResult;
-
-    transient private ObjectInspector oi;
+    transient private VarianceKind varianceKind = VarianceKind.NONE;
 #ENDIF COMPLETE
 
+  // This constructor is used to momentarily create the object so match can be called.
+  public <ClassName>() {
+    super();
+  }
 
-    public <ClassName>(VectorExpression inputExpression,
-        GenericUDAFEvaluator.Mode mode) {
-      super(inputExpression, mode);
+  public <ClassName>(VectorAggregationDesc vecAggrDesc) {
+    super(vecAggrDesc);
 #IF PARTIAL1
-      Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.PARTIAL1);
+    Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.PARTIAL1);
 #ENDIF PARTIAL1
 #IF COMPLETE
-      Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.COMPLETE);
+    Preconditions.checkState(this.mode == GenericUDAFEvaluator.Mode.COMPLETE);
 #ENDIF COMPLETE
-    }
+    init();
+  }
 
     private void init() {
-#IF PARTIAL1
-      partialResult = new Object[3];
-      resultCount = new LongWritable();
-      resultSum = new DoubleWritable();
-      resultVariance = new DoubleWritable();
-      partialResult[0] = resultCount;
-      partialResult[1] = resultSum;
-      partialResult[2] = resultVariance;
-      initPartialResultInspector();
-#ENDIF PARTIAL1
 #IF COMPLETE
-      fullResult = new DoubleWritable();
-      initFullResultInspector();
+      String aggregateName = vecAggrDesc.getAggrDesc().getGenericUDAFName();
+      varianceKind = VarianceKind.nameMap.get(aggregateName);
 #ENDIF COMPLETE
     }
 
-#IF PARTIAL1
-  private void initPartialResultInspector() {
-        List<ObjectInspector> foi = new ArrayList<ObjectInspector>();
-        foi.add(PrimitiveObjectInspectorFactory.writableLongObjectInspector);
-        foi.add(PrimitiveObjectInspectorFactory.writableDoubleObjectInspector);
-        foi.add(PrimitiveObjectInspectorFactory.writableDoubleObjectInspector);
-
-        List<String> fname = new ArrayList<String>();
-        fname.add("count");
-        fname.add("sum");
-        fname.add("variance");
-
-        soi = ObjectInspectorFactory.getStandardStructObjectInspector(fname, foi);
-    }
-#ENDIF PARTIAL1
-#IF COMPLETE
-    private void initFullResultInspector() {
-      oi = PrimitiveObjectInspectorFactory.writableDoubleObjectInspector;
-    }
-#ENDIF COMPLETE
-
     private Aggregation getCurrentAggregationBuffer(
         VectorAggregationBufferRow[] aggregationBufferSets,
         int aggregateIndex,
@@ -184,8 +151,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
       inputExpression.evaluate(batch);
 
-      TimestampColumnVector inputColVector = (TimestampColumnVector)batch.
-        cols[this.inputExpression.getOutputColumn()];
+      TimestampColumnVector inputColVector =
+          (TimestampColumnVector) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
 
       int batchSize = batch.size;
 
@@ -309,8 +277,9 @@ public class <ClassName> extends VectorAggregateExpression {
 
       inputExpression.evaluate(batch);
 
-      TimestampColumnVector inputColVector = (TimestampColumnVector)batch.
-        cols[this.inputExpression.getOutputColumn()];
+      TimestampColumnVector inputColVector =
+          (TimestampColumnVector) batch.cols[
+              this.inputExpression.getOutputColumnNum()];
 
       int batchSize = batch.size;
 
@@ -409,69 +378,91 @@ public class <ClassName> extends VectorAggregateExpression {
       myAgg.reset();
     }
 
-    @Override
-    public Object evaluateOutput(
-        AggregationBuffer agg) throws HiveException {
-      Aggregation myagg = (Aggregation) agg;
-      if (myagg.isNull) {
-        return null;
-      } else {
+  @Override
+  public long getAggregationBufferFixedSize() {
+      JavaDataModel model = JavaDataModel.get();
+      return JavaDataModel.alignUp(
+        model.object() +
+        model.primitive2()*3+
+        model.primitive1(),
+        model.memoryAlign());
+  }
+
+  @Override
+  public boolean matches(String name, ColumnVector.Type inputColVectorType,
+      ColumnVector.Type outputColVectorType, Mode mode) {
+
+    /*
+     * Variance input is TIMESTAMP.
 #IF PARTIAL1
-        resultCount.set (myagg.count);
-        resultSum.set (myagg.sum);
-        resultVariance.set (myagg.variance);
-        return partialResult;
+     * Output is STRUCT.
+     *
+     * Mode PARTIAL1.
 #ENDIF PARTIAL1
 #IF COMPLETE
-       if (myagg.count == 0) {
-          return null;   // SQL standard - return null for zero elements
-        } else if (myagg.count > 1) {
-#IF VARIANCE
-          fullResult.set(myagg.variance / (myagg.count));
-#ENDIF VARIANCE
-#IF VARIANCE_SAMPLE
-          fullResult.set(myagg.variance / (myagg.count - 1));
-#ENDIF VARIANCE_SAMPLE
-#IF STD
-          fullResult.set(Math.sqrt(myagg.variance / (myagg.count)));
-#ENDIF STD
-#IF STD_SAMPLE
-          fullResult.set(Math.sqrt(myagg.variance / (myagg.count - 1)));
-#ENDIF STD_SAMPLE
-        } else {
-
-          // For one element the variance is always 0.
-          fullResult.set(0);
-        }
-
-        return fullResult;
+     * Output is DOUBLE.
+     *
+     * Mode COMPLETE.
 #ENDIF COMPLETE
-      }
-    }
-
-  @Override
-  public ObjectInspector getOutputObjectInspector() {
+     */
+    return
+        GenericUDAFVariance.isVarianceFamilyName(name) &&
+        inputColVectorType == ColumnVector.Type.TIMESTAMP &&
 #IF PARTIAL1
-    return soi;
+        outputColVectorType == ColumnVector.Type.STRUCT &&
+        mode == Mode.PARTIAL1;
 #ENDIF PARTIAL1
 #IF COMPLETE
-    return oi;
+        outputColVectorType == ColumnVector.Type.DOUBLE &&
+        mode == Mode.COMPLETE;
 #ENDIF COMPLETE
   }
 
   @Override
-  public long getAggregationBufferFixedSize() {
-      JavaDataModel model = JavaDataModel.get();
-      return JavaDataModel.alignUp(
-        model.object() +
-        model.primitive2()*3+
-        model.primitive1(),
-        model.memoryAlign());
-  }
+  public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum,
+      AggregationBuffer agg) throws HiveException {
 
-  @Override
-  public void init(AggregationDesc desc) throws HiveException {
-    init();
+#IF PARTIAL1
+    StructColumnVector outputColVector = (StructColumnVector) batch.cols[columnNum];
+
+    Aggregation myagg = (Aggregation) agg;
+    if (myagg.isNull) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+
+    ColumnVector[] fields = outputColVector.fields;
+    ((LongColumnVector) fields[VARIANCE_COUNT_FIELD_INDEX]).vector[batchIndex] = myagg.count;
+    ((DoubleColumnVector) fields[VARIANCE_SUM_FIELD_INDEX]).vector[batchIndex] = myagg.sum;
+    ((DoubleColumnVector) fields[VARIANCE_VARIANCE_FIELD_INDEX]).vector[batchIndex] = myagg.variance;
+#ENDIF PARTIAL1
+#IF COMPLETE
+    DoubleColumnVector outputColVector = (DoubleColumnVector) batch.cols[columnNum];
+
+    Aggregation myagg = (Aggregation) agg;
+    if (GenericUDAFVariance.isVarianceNull(myagg.count, varianceKind)) {
+
+      // SQL standard - return null for zero (or 1 for sample) elements
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+
+    final double result;
+    if (myagg.count > 1) {
+
+      // Use the common variance family result calculation method.
+      result = GenericUDAFVariance.calculateVarianceFamilyResult(
+          myagg.variance, myagg.count, varianceKind);
+    } else {
+
+      // For one element the variance is always 0.
+      result = 0.0;
+    }
+    outputColVector.vector[batchIndex] = result;
+#ENDIF COMPLETE
   }
 }
-

http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
index 391666a..885c203 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
@@ -326,6 +326,8 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
   @SuppressWarnings("unchecked")
   public final void initialize(Configuration hconf, ObjectInspector[] inputOIs)
       throws HiveException {
+    // String className = this.getClass().getName();
+
     this.done = false;
     if (state == State.INIT) {
       return;
@@ -344,7 +346,6 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
       inputObjInspectors = inputOIs;
     }
 
-    // initialize structure to maintain child op info. operator tree changes
     // while initializing so this need to be done here instead of constructor
     childOperatorsArray = new Operator[childOperators.size()];
     for (int i = 0; i < childOperatorsArray.length; i++) {

http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
index 993da83..e665064 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.exec;
 
+import java.lang.reflect.Constructor;
 import java.util.ArrayList;
 import java.util.IdentityHashMap;
 import java.util.List;
@@ -146,26 +147,39 @@ public final class OperatorFactory {
 
   public static <T extends OperatorDesc> Operator<T> getVectorOperator(
     Class<? extends Operator<?>> opClass, CompilationOpContext cContext, T conf,
-        VectorizationContext vContext) throws HiveException {
+        VectorizationContext vContext, VectorDesc vectorDesc) throws HiveException {
+
+    Constructor<? extends Operator<?>> constructor;
+    try {
+      constructor = opClass.getDeclaredConstructor(
+          CompilationOpContext.class, OperatorDesc.class,
+          VectorizationContext.class, VectorDesc.class);
+    } catch (Exception e) {
+      e.printStackTrace();
+      throw new HiveException(
+          "Constructor " + opClass.getSimpleName() +
+          "(CompilationOpContext, OperatorDesc, VectorizationContext, VectorDesc) not found", e);
+    }
     try {
-      VectorDesc vectorDesc = ((AbstractOperatorDesc) conf).getVectorDesc();
       vectorDesc.setVectorOp(opClass);
-      Operator<T> op = (Operator<T>) opClass.getDeclaredConstructor(
-          CompilationOpContext.class, VectorizationContext.class, OperatorDesc.class)
-          .newInstance(cContext, vContext, conf);
+      Operator<T> op = (Operator<T>) constructor.newInstance(
+          cContext, conf, vContext, vectorDesc);
       return op;
     } catch (Exception e) {
       e.printStackTrace();
-      throw new HiveException(e);
+      throw new HiveException(
+          "Error encountered calling constructor " + opClass.getSimpleName() +
+          "(CompilationOpContext, OperatorDesc, VectorizationContext, VectorDesc)", e);
     }
   }
 
   public static <T extends OperatorDesc> Operator<T> getVectorOperator(
-      CompilationOpContext cContext, T conf, VectorizationContext vContext) throws HiveException {
+      CompilationOpContext cContext, T conf, VectorizationContext vContext, VectorDesc vectorDesc)
+          throws HiveException {
     Class<T> descClass = (Class<T>) conf.getClass();
     Class<?> opClass = vectorOpvec.get(descClass);
     if (opClass != null) {
-      return getVectorOperator(vectorOpvec.get(descClass), cContext, conf, vContext);
+      return getVectorOperator(vectorOpvec.get(descClass), cContext, conf, vContext, vectorDesc);
     }
     throw new HiveException("No vector operator for descriptor class " + descClass.getName());
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
index 8fe037e..42ac1de 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizationContextRegion;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
@@ -55,9 +57,11 @@ import org.apache.hadoop.mapred.JobConf;
  * read as part of map-reduce framework
  **/
 public class TableScanOperator extends Operator<TableScanDesc> implements
-    Serializable {
+    Serializable, VectorizationContextRegion {
   private static final long serialVersionUID = 1L;
 
+  private VectorizationContext taskVectorizationContext;
+
   protected transient JobConf jc;
   private transient boolean inputFileChanged = false;
   private TableDesc tableDesc;
@@ -403,4 +407,13 @@ public class TableScanOperator extends Operator<TableScanDesc> implements
     this.insideView = insiderView;
   }
 
+  public void setTaskVectorizationContext(VectorizationContext taskVectorizationContext) {
+    this.taskVectorizationContext = taskVectorizationContext;
+  }
+
+  @Override
+  public VectorizationContext getOutputVectorizationContext() {
+    return taskVectorizationContext;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
index 3519e1d..6c0bf2d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hive.ql.exec.persistence.MapJoinBytesTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinObjectSerDeContext;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizationOperator;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.BucketMapJoinContext;
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
@@ -75,9 +76,11 @@ public class HashTableLoader implements org.apache.hadoop.hive.ql.exec.HashTable
     this.desc = joinOp.getConf();
     if (desc.getVectorMode() && HiveConf.getBoolVar(
         hconf, HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED)) {
-      VectorMapJoinDesc vectorDesc = (VectorMapJoinDesc) desc.getVectorDesc();
-      useFastContainer = vectorDesc != null && vectorDesc.getHashTableImplementationType() ==
-          VectorMapJoinDesc.HashTableImplementationType.FAST;
+      if (joinOp instanceof VectorizationOperator) {
+        VectorMapJoinDesc vectorDesc = (VectorMapJoinDesc) ((VectorizationOperator) joinOp).getVectorDesc();
+        useFastContainer = vectorDesc != null && vectorDesc.getHashTableImplementationType() ==
+            VectorMapJoinDesc.HashTableImplementationType.FAST;
+      }
     }
   }
 


Mime
View raw message