hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mmccl...@apache.org
Subject [14/51] [partial] hive git commit: HIVE-17433: Vectorization: Support Decimal64 in Hive Query Engine (Matt McCline, reviewed by Teddy Choi)
Date Sun, 29 Oct 2017 20:39:53 GMT
http://git-wip-us.apache.org/repos/asf/hive/blob/e63ebccc/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out
index 631bd04..ea01380 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out
@@ -48,10 +48,16 @@ POSTHOOK: Input: default@decimal_udf_txt
 POSTHOOK: Output: default@decimal_udf
 POSTHOOK: Lineage: decimal_udf.key SIMPLE [(decimal_udf_txt)decimal_udf_txt.FieldSchema(name:key, type:decimal(20,10), comment:null), ]
 POSTHOOK: Lineage: decimal_udf.value SIMPLE [(decimal_udf_txt)decimal_udf_txt.FieldSchema(name:value, type:int, comment:null), ]
-PREHOOK: query: EXPLAIN SELECT key + key FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key + key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key + key FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key + key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -66,12 +72,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (key + key) (type: decimal(21,10))
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: DecimalColAddDecimalColumn(col 0:decimal(20,10), col 0:decimal(20,10)) -> 3:decimal(21,10)
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -79,6 +96,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(21,10)]
 
   Stage: Stage-0
     Fetch Operator
@@ -132,10 +164,16 @@ NULL
 2.0000000000
 -2469135780.2469135780
 2469135780.2469135600
-PREHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key + value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key + value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -150,12 +188,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (key + CAST( value AS decimal(10,0))) (type: decimal(21,10))
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [4]
+                        selectExpressions: DecimalColAddDecimalColumn(col 0:decimal(20,10), col 3:decimal(10,0))(children: CastLongToDecimal(col 1:int) -> 3:decimal(10,0)) -> 4:decimal(21,10)
                     Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -163,6 +212,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0, 1]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(10,0), decimal(21,10)]
 
   Stage: Stage-0
     Fetch Operator
@@ -216,10 +280,16 @@ NULL
 2.0000000000
 -2469135780.1234567890
 2469135780.1234567800
-PREHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key + (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key + (value/2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -234,12 +304,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (UDFToDouble(key) + (UDFToDouble(value) / 2.0)) (type: double)
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [4]
+                        selectExpressions: DoubleColAddDoubleColumn(col 3:double, col 5:double)(children: CastDecimalToDouble(col 0:decimal(20,10)) -> 3:double, DoubleColDivideDoubleScalar(col 4:double, val 2.0)(children: CastLongToDouble(col 1:int) -> 4:double) -> 5:double) -> 4:double
                     Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -247,6 +328,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0, 1]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [double, double, double]
 
   Stage: Stage-0
     Fetch Operator
@@ -300,10 +396,16 @@ NULL
 1.5
 -1.8518518351234567E9
 1.8518518351234567E9
-PREHOOK: query: EXPLAIN SELECT key + '1.0' FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key + '1.0' FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key + '1.0' FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key + '1.0' FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -318,12 +420,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (UDFToDouble(key) + 1.0) (type: double)
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [4]
+                        selectExpressions: DoubleColAddDoubleScalar(col 3:double, val 1.0)(children: CastDecimalToDouble(col 0:decimal(20,10)) -> 3:double) -> 4:double
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -331,6 +444,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [double, double]
 
   Stage: Stage-0
     Fetch Operator
@@ -384,10 +512,16 @@ NULL
 2.0
 -1.2345678891234567E9
 1.2345678911234567E9
-PREHOOK: query: EXPLAIN SELECT key - key FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key - key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key - key FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key - key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -402,12 +536,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (key - key) (type: decimal(21,10))
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: DecimalColSubtractDecimalColumn(col 0:decimal(20,10), col 0:decimal(20,10)) -> 3:decimal(21,10)
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -415,6 +560,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(21,10)]
 
   Stage: Stage-0
     Fetch Operator
@@ -468,10 +628,16 @@ NULL
 0.0000000000
 0.0000000000
 0.0000000000
-PREHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key - value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key - value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -486,12 +652,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (key - CAST( value AS decimal(10,0))) (type: decimal(21,10))
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [4]
+                        selectExpressions: DecimalColSubtractDecimalColumn(col 0:decimal(20,10), col 3:decimal(10,0))(children: CastLongToDecimal(col 1:int) -> 3:decimal(10,0)) -> 4:decimal(21,10)
                     Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -499,6 +676,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0, 1]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(10,0), decimal(21,10)]
 
   Stage: Stage-0
     Fetch Operator
@@ -552,10 +744,16 @@ NULL
 0.0000000000
 -0.1234567890
 0.1234567800
-PREHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key - (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key - (value/2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -570,12 +768,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (UDFToDouble(key) - (UDFToDouble(value) / 2.0)) (type: double)
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [4]
+                        selectExpressions: DoubleColSubtractDoubleColumn(col 3:double, col 5:double)(children: CastDecimalToDouble(col 0:decimal(20,10)) -> 3:double, DoubleColDivideDoubleScalar(col 4:double, val 2.0)(children: CastLongToDouble(col 1:int) -> 4:double) -> 5:double) -> 4:double
                     Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -583,6 +792,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0, 1]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [double, double, double]
 
   Stage: Stage-0
     Fetch Operator
@@ -636,10 +860,16 @@ NULL
 0.5
 -6.172839451234567E8
 6.172839451234567E8
-PREHOOK: query: EXPLAIN SELECT key - '1.0' FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key - '1.0' FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key - '1.0' FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key - '1.0' FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -654,12 +884,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (UDFToDouble(key) - 1.0) (type: double)
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [4]
+                        selectExpressions: DoubleColSubtractDoubleScalar(col 3:double, val 1.0)(children: CastDecimalToDouble(col 0:decimal(20,10)) -> 3:double) -> 4:double
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -667,6 +908,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [double, double]
 
   Stage: Stage-0
     Fetch Operator
@@ -720,10 +976,16 @@ NULL
 0.0
 -1.2345678911234567E9
 1.2345678891234567E9
-PREHOOK: query: EXPLAIN SELECT key * key FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key * key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key * key FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key * key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -738,12 +1000,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (key * key) (type: decimal(38,17))
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: DecimalColMultiplyDecimalColumn(col 0:decimal(20,10), col 0:decimal(20,10)) -> 3:decimal(38,17)
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -751,6 +1024,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(38,17)]
 
   Stage: Stage-0
     Fetch Operator
@@ -804,10 +1092,16 @@ NULL
 1.00000000000000000
 1524157875323883675.01905199875019052
 1524157875323883652.79682997652796840
-PREHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key, value FROM DECIMAL_UDF where key * value > 0
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key, value FROM DECIMAL_UDF where key * value > 0
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -822,15 +1116,29 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterDecimalColGreaterDecimalScalar(col 4:decimal(31,10), val 0)(children: DecimalColMultiplyDecimalColumn(col 0:decimal(20,10), col 3:decimal(10,0))(children: CastLongToDecimal(col 1:int) -> 3:decimal(10,0)) -> 4:decimal(31,10))
                     predicate: ((key * CAST( value AS decimal(10,0))) > 0) (type: boolean)
                     Statistics: Num rows: 12 Data size: 1392 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: decimal(20,10)), value (type: int)
                       outputColumnNames: _col0, _col1
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumnNums: [0, 1]
                       Statistics: Num rows: 12 Data size: 1392 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
                         Statistics: Num rows: 12 Data size: 1392 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -838,6 +1146,21 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0, 1]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(10,0), decimal(31,10)]
 
   Stage: Stage-0
     Fetch Operator
@@ -876,10 +1199,16 @@ POSTHOOK: Input: default@decimal_udf
 1.0000000000	1
 -1234567890.1234567890	-1234567890
 1234567890.1234567800	1234567890
-PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key * value FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key * value FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -894,12 +1223,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (key * CAST( value AS decimal(10,0))) (type: decimal(31,10))
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [4]
+                        selectExpressions: DecimalColMultiplyDecimalColumn(col 0:decimal(20,10), col 3:decimal(10,0))(children: CastLongToDecimal(col 1:int) -> 3:decimal(10,0)) -> 4:decimal(31,10)
                     Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -907,6 +1247,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0, 1]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(10,0), decimal(31,10)]
 
   Stage: Stage-0
     Fetch Operator
@@ -960,10 +1315,16 @@ NULL
 1.0000000000
 1524157875171467887.5019052100
 1524157875171467876.3907942000
-PREHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key * (value/2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key * (value/2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -978,12 +1339,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (UDFToDouble(key) * (UDFToDouble(value) / 2.0)) (type: double)
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [4]
+                        selectExpressions: DoubleColMultiplyDoubleColumn(col 3:double, col 5:double)(children: CastDecimalToDouble(col 0:decimal(20,10)) -> 3:double, DoubleColDivideDoubleScalar(col 4:double, val 2.0)(children: CastLongToDouble(col 1:int) -> 4:double) -> 5:double) -> 4:double
                     Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -991,6 +1363,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0, 1]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [double, double, double]
 
   Stage: Stage-0
     Fetch Operator
@@ -1044,10 +1431,16 @@ NULL
 0.5
 7.6207893758573389E17
 7.6207893758573389E17
-PREHOOK: query: EXPLAIN SELECT key * '2.0' FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key * '2.0' FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key * '2.0' FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key * '2.0' FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1062,12 +1455,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (UDFToDouble(key) * 2.0) (type: double)
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [4]
+                        selectExpressions: DoubleColMultiplyDoubleScalar(col 3:double, val 2.0)(children: CastDecimalToDouble(col 0:decimal(20,10)) -> 3:double) -> 4:double
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1075,6 +1479,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [double, double]
 
   Stage: Stage-0
     Fetch Operator
@@ -1128,10 +1547,16 @@ NULL
 2.0
 -2.4691357802469134E9
 2.4691357802469134E9
-PREHOOK: query: EXPLAIN SELECT key / 0 FROM DECIMAL_UDF limit 1
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key / 0 FROM DECIMAL_UDF limit 1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key / 0 FROM DECIMAL_UDF limit 1
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key / 0 FROM DECIMAL_UDF limit 1
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1146,15 +1571,29 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (key / 0) (type: decimal(22,12))
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: DecimalColDivideDecimalScalar(col 0:decimal(20,10), val 0) -> 3:decimal(22,12)
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
+                      Limit Vectorization:
+                          className: VectorLimitOperator
+                          native: true
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
                         Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1162,6 +1601,21 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(22,12)]
 
   Stage: Stage-0
     Fetch Operator
@@ -1178,60 +1632,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf
 #### A masked pattern was here ####
 NULL
-PREHOOK: query: EXPLAIN SELECT key / NULL FROM DECIMAL_UDF limit 1
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key / NULL FROM DECIMAL_UDF limit 1
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0
 POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: decimal_udf
-                  Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: (UDFToDouble(key) / null) (type: double)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
-                    Limit
-                      Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            LLAP IO: all inputs
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 1
-      Processor Tree:
-        ListSink
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
 
-PREHOOK: query: SELECT key / NULL FROM DECIMAL_UDF limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@decimal_udf
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key / NULL FROM DECIMAL_UDF limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@decimal_udf
-#### A masked pattern was here ####
-NULL
-PREHOOK: query: EXPLAIN SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0
-POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1246,15 +1656,30 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterDecimalColNotEqualDecimalScalar(col 0:decimal(20,10), val 0)
                     predicate: (key <> 0) (type: boolean)
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: (key / key) (type: decimal(38,18))
                       outputColumnNames: _col0
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumnNums: [3]
+                          selectExpressions: DecimalColDivideDecimalColumn(col 0:decimal(20,10), col 0:decimal(20,10)) -> 3:decimal(38,18)
                       Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
                         Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1262,6 +1687,21 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(38,18)]
 
   Stage: Stage-0
     Fetch Operator
@@ -1311,10 +1751,16 @@ POSTHOOK: Input: default@decimal_udf
 1.000000000000000000
 1.000000000000000000
 1.000000000000000000
-PREHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1329,15 +1775,30 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterLongColNotEqualLongScalar(col 1:int, val 0)
                     predicate: (value <> 0) (type: boolean)
                     Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: (key / CAST( value AS decimal(10,0))) (type: decimal(31,21))
                       outputColumnNames: _col0
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumnNums: [4]
+                          selectExpressions: DecimalColDivideDecimalColumn(col 0:decimal(20,10), col 3:decimal(10,0))(children: CastLongToDecimal(col 1:int) -> 3:decimal(10,0)) -> 4:decimal(31,21)
                       Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
                         Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1345,6 +1806,21 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0, 1]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(10,0), decimal(31,21)]
 
   Stage: Stage-0
     Fetch Operator
@@ -1384,10 +1860,16 @@ POSTHOOK: Input: default@decimal_udf
 1.000000000000000000000
 1.000000000100000000000
 1.000000000099999992710
-PREHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT key / (value/2) FROM DECIMAL_UDF  WHERE value is not null and value <> 0
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1402,15 +1884,30 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterLongColNotEqualLongScalar(col 1:int, val 0)
                     predicate: (value <> 0) (type: boolean)
                     Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: (UDFToDouble(key) / (UDFToDouble(value) / 2.0)) (type: double)
                       outputColumnNames: _col0
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumnNums: [4]
+                          selectExpressions: DoubleColDivideDoubleColumn(col 3:double, col 5:double)(children: CastDecimalToDouble(col 0:decimal(20,10)) -> 3:double, DoubleColDivideDoubleScalar(col 4:double, val 2.0)(children: CastLongToDouble(col 1:int) -> 4:double) -> 5:double) -> 4:double
                       Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
                         Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1418,6 +1915,21 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0, 1]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [double, double, double]
 
   Stage: Stage-0
     Fetch Operator
@@ -1457,10 +1969,16 @@ POSTHOOK: Input: default@decimal_udf
 2.0
 2.0000000002
 2.0000000002
-PREHOOK: query: EXPLAIN SELECT 1 + (key / '2.0') FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT 1 + (key / '2.0') FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT 1 + (key / '2.0') FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT 1 + (key / '2.0') FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1475,12 +1993,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (1.0 + (UDFToDouble(key) / 2.0)) (type: double)
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: DoubleScalarAddDoubleColumn(val 1.0, col 4:double)(children: DoubleColDivideDoubleScalar(col 3:double, val 2.0)(children: CastDecimalToDouble(col 0:decimal(20,10)) -> 3:double) -> 4:double) -> 3:double
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1488,6 +2017,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [double, double]
 
   Stage: Stage-0
     Fetch Operator
@@ -1541,10 +2085,16 @@ NULL
 1.5
 -6.172839440617284E8
 6.172839460617284E8
-PREHOOK: query: EXPLAIN SELECT abs(key) FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT abs(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT abs(key) FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT abs(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1559,12 +2109,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: abs(key) (type: decimal(20,10))
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: FuncAbsDecimalToDecimal(col 0:decimal(20,10)) -> 3:decimal(20,10)
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1572,6 +2133,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(20,10)]
 
   Stage: Stage-0
     Fetch Operator
@@ -1625,10 +2201,16 @@ NULL
 1.0000000000
 1234567890.1234567890
 1234567890.1234567800
-PREHOOK: query: EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1647,12 +2229,27 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: key (type: decimal(20,10)), value (type: int)
                     outputColumnNames: key, value
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1]
                     Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(key), count(key), avg(key)
+                      Group By Vectorization:
+                          aggregators: VectorUDAFSumDecimal(col 0:decimal(20,10)) -> decimal(30,10), VectorUDAFCount(col 0:decimal(20,10)) -> bigint, VectorUDAFAvgDecimal(col 0:decimal(20,10)) -> struct<count:bigint,sum:decimal(30,10),input:decimal(20,10)>
+                          className: VectorGroupByOperator
+                          groupByMode: HASH
+                          keyExpressions: col 1:int
+                          native: false
+                          vectorProcessingMode: HASH
+                          projectedOutputColumnNums: [0, 1, 2]
                       keys: value (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
@@ -1661,15 +2258,57 @@ STAGE PLANS:
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
+                        Reduce Sink Vectorization:
+                            className: VectorReduceSinkLongOperator
+                            keyColumnNums: [0]
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            valueColumnNums: [1, 2, 3]
                         Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: decimal(30,10)), _col2 (type: bigint), _col3 (type: struct<count:bigint,sum:decimal(30,10),input:decimal(20,10)>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0, 1]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: []
         Reducer 2 
             Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: a
+                reduceColumnSortOrder: +
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    dataColumns: KEY._col0:int, VALUE._col0:decimal(30,10), VALUE._col1:bigint, VALUE._col2:struct<count:bigint,sum:decimal(30,10),input:decimal(20,10)>
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: []
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0), count(VALUE._col1), avg(VALUE._col2)
+                Group By Vectorization:
+                    aggregators: VectorUDAFSumDecimal(col 1:decimal(30,10)) -> decimal(30,10), VectorUDAFCountMerge(col 2:bigint) -> bigint, VectorUDAFAvgDecimalFinal(col 3:struct<count:bigint,sum:decimal(30,10),input:decimal(20,10)>) -> decimal(24,14)
+                    className: VectorGroupByOperator
+                    groupByMode: MERGEPARTIAL
+                    keyExpressions: col 0:int
+                    native: false
+                    vectorProcessingMode: MERGE_PARTIAL
+                    projectedOutputColumnNums: [0, 1, 2]
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
@@ -1677,21 +2316,52 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col0 (type: int), (_col1 / CAST( _col2 AS decimal(19,0))) (type: decimal(38,18)), _col3 (type: decimal(24,14)), _col1 (type: decimal(30,10))
                   outputColumnNames: _col0, _col1, _col2, _col3
+                  Select Vectorization:
+                      className: VectorSelectOperator
+                      native: true
+                      projectedOutputColumnNums: [0, 5, 3, 1]
+                      selectExpressions: DecimalColDivideDecimalColumn(col 1:decimal(30,10), col 4:decimal(19,0))(children: CastLongToDecimal(col 2:bigint) -> 4:decimal(19,0)) -> 5:decimal(38,18)
                   Statistics: Num rows: 19 Data size: 2204 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: int)
                     sort order: +
+                    Reduce Sink Vectorization:
+                        className: VectorReduceSinkObjectHashOperator
+                        keyColumnNums: [0]
+                        native: true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        valueColumnNums: [5, 3, 1]
                     Statistics: Num rows: 19 Data size: 2204 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: decimal(38,18)), _col2 (type: decimal(24,14)), _col3 (type: decimal(30,10))
         Reducer 3 
             Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: a
+                reduceColumnSortOrder: +
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    dataColumns: KEY.reducesinkkey0:int, VALUE._col0:decimal(38,18), VALUE._col1:decimal(24,14), VALUE._col2:decimal(30,10)
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: []
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: decimal(38,18)), VALUE._col1 (type: decimal(24,14)), VALUE._col2 (type: decimal(30,10))
                 outputColumnNames: _col0, _col1, _col2, _col3
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [0, 1, 2, 3]
                 Statistics: Num rows: 19 Data size: 2204 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  File Sink Vectorization:
+                      className: VectorFileSinkOperator
+                      native: false
                   Statistics: Num rows: 19 Data size: 2204 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1729,10 +2399,16 @@ POSTHOOK: Input: default@decimal_udf
 200	200.000000000000000000	200.00000000000000	200.0000000000
 4400	-4400.000000000000000000	-4400.00000000000000	-4400.0000000000
 1234567890	1234567890.123456780000000000	1234567890.12345678000000	1234567890.1234567800
-PREHOOK: query: EXPLAIN SELECT -key FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT -key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT -key FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT -key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1747,12 +2423,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (- key) (type: decimal(20,10))
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: FuncNegateDecimalToDecimal(col 0:decimal(20,10)) -> 3:decimal(20,10)
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1760,6 +2447,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(20,10)]
 
   Stage: Stage-0
     Fetch Operator
@@ -1813,10 +2515,16 @@ NULL
 -1.0000000000
 1234567890.1234567890
 -1234567890.1234567800
-PREHOOK: query: EXPLAIN SELECT +key FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT +key FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT +key FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT +key FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -1962,10 +2670,16 @@ NULL
 1
 -1234567890
 1234567891
-PREHOOK: query: EXPLAIN SELECT FLOOR(key) FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT FLOOR(key) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT FLOOR(key) FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT FLOOR(key) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1980,12 +2694,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: floor(key) (type: decimal(11,0))
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: FuncFloorDecimalToDecimal(col 0:decimal(20,10)) -> 3:decimal(11,0)
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1993,6 +2718,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(11,0)]
 
   Stage: Stage-0
     Fetch Operator
@@ -2046,10 +2786,16 @@ NULL
 1
 -1234567891
 1234567890
-PREHOOK: query: EXPLAIN SELECT ROUND(key, 2) FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT ROUND(key, 2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT ROUND(key, 2) FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT ROUND(key, 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2064,12 +2810,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: round(key, 2) (type: decimal(13,2))
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 0:decimal(20,10), decimalPlaces 2) -> 3:decimal(13,2)
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -2077,6 +2834,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [decimal(13,2)]
 
   Stage: Stage-0
     Fetch Operator
@@ -2130,10 +2902,16 @@ NULL
 1.00
 -1234567890.12
 1234567890.12
-PREHOOK: query: EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT POWER(key, 2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT POWER(key, 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2148,12 +2926,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: power(key, 2) (type: double)
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: VectorUDFAdaptor(power(key, 2)) -> 3:double
                     Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -2161,6 +2950,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: true
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0]
+                    dataColumns: key:decimal(20,10), value:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [double]
 
   Stage: Stage-0
     Fetch Operator
@@ -2214,10 +3018,16 @@ NULL
 1.0
 1.52415787532388352E18
 1.52415787532388352E18
-PREHOOK: query: EXPLAIN SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
+SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2232,12 +3042,23 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf
                   Statistics: Num rows: 38 Data size: 4256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: ((key + 1) % (key / 2)) (type: decimal(22,12))
                     outputColumnNames: _col0
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [5]
+                        selectExpressions: DecimalColModuloDecimalColumn(col 3:decimal(21,10), col 4:decimal(22,12))(children: DecimalColAddDecimalScalar(col 0:decimal(20,10), val 1) -> 3:decimal(21,10), DecimalColDivideDecimalScalar(col 0:decimal(20

<TRUNCATED>

Mime
View raw message