hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject svn commit: r1514554 [16/18] - in /hive/branches/vectorization: ./ beeline/src/java/org/apache/hive/beeline/ cli/src/java/org/apache/hadoop/hive/cli/ cli/src/test/org/apache/hadoop/hive/cli/ common/src/java/org/apache/hadoop/hive/conf/ conf/ contrib/sr...
Date Fri, 16 Aug 2013 01:22:02 GMT
Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/multi_join_union.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/multi_join_union.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/multi_join_union.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/multi_join_union.q.out Fri Aug 16 01:21:54 2013
@@ -41,7 +41,7 @@ ABSTRACT SYNTAX TREE:
 
 STAGE DEPENDENCIES:
   Stage-8 is a root stage
-  Stage-7 depends on stages: Stage-8
+  Stage-6 depends on stages: Stage-8
   Stage-0 is a root stage
 
 STAGE PLANS:
@@ -125,7 +125,7 @@ STAGE PLANS:
                       1 [Column[_col1]]
                     Position of Big Table: 0
 
-  Stage: Stage-7
+  Stage: Stage-6
     Map Reduce
       Alias -> Map Operator Tree:
         b 

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out Fri Aug 16 01:21:54 2013
@@ -42,3 +42,412 @@ STAGE PLANS:
       limit: -1
 
 
+PREHOOK: query: -- This test query is introduced for HIVE-4968.
+-- First, we do not convert the join to MapJoin.
+EXPLAIN
+SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+      FROM (SELECT *
+            FROM (SELECT key, value
+                  FROM src1) tmp1 ) tmp2
+      JOIN (SELECT count(*) as count
+            FROM src1) tmp3
+      ) tmp4 order by key, value, count
+PREHOOK: type: QUERY
+POSTHOOK: query: -- This test query is introduced for HIVE-4968.
+-- First, we do not convert the join to MapJoin.
+EXPLAIN
+SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+      FROM (SELECT *
+            FROM (SELECT key, value
+                  FROM src1) tmp1 ) tmp2
+      JOIN (SELECT count(*) as count
+            FROM src1) tmp3
+      ) tmp4 order by key, value, count
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))))) tmp1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) tmp2) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count) count)))) tmp3))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp3) count) count)))) tmp4)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tm
 p4) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) count) count)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL count)))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-3 depends on stages: Stage-2
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        tmp4:tmp3:src1 
+          TableScan
+            alias: src1
+            Select Operator
+              Group By Operator
+                aggregations:
+                      expr: count()
+                bucketGroup: false
+                mode: hash
+                outputColumnNames: _col0
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: bigint
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations:
+                expr: count(VALUE._col0)
+          bucketGroup: false
+          mode: mergepartial
+          outputColumnNames: _col0
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: bigint
+            outputColumnNames: _col0
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+  Stage: Stage-2
+    Map Reduce
+      Alias -> Map Operator Tree:
+        $INTNAME 
+            Reduce Output Operator
+              sort order: 
+              tag: 1
+              value expressions:
+                    expr: _col0
+                    type: bigint
+        tmp4:tmp2:tmp1:src1 
+          TableScan
+            alias: src1
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Reduce Output Operator
+                sort order: 
+                tag: 0
+                value expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col1}
+            1 {VALUE._col0}
+          handleSkewJoin: false
+          outputColumnNames: _col0, _col1, _col2
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: string
+                  expr: _col1
+                  type: string
+                  expr: _col2
+                  type: bigint
+            outputColumnNames: _col0, _col1, _col2
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+  Stage: Stage-3
+    Map Reduce
+      Alias -> Map Operator Tree:
+#### A masked pattern was here ####
+            Reduce Output Operator
+              key expressions:
+                    expr: _col0
+                    type: string
+                    expr: _col1
+                    type: string
+                    expr: _col2
+                    type: bigint
+              sort order: +++
+              tag: -1
+              value expressions:
+                    expr: _col0
+                    type: string
+                    expr: _col1
+                    type: string
+                    expr: _col2
+                    type: bigint
+      Reduce Operator Tree:
+        Extract
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+      FROM (SELECT *
+            FROM (SELECT key, value
+                  FROM src1) tmp1 ) tmp2
+      JOIN (SELECT count(*) as count
+            FROM src1) tmp3
+      ) tmp4 order by key, value, count
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+      FROM (SELECT *
+            FROM (SELECT key, value
+                  FROM src1) tmp1 ) tmp2
+      JOIN (SELECT count(*) as count
+            FROM src1) tmp3
+      ) tmp4 order by key, value, count
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+		25
+		25
+		25
+		25
+	val_165	25
+	val_193	25
+	val_265	25
+	val_27	25
+	val_409	25
+	val_484	25
+128		25
+146	val_146	25
+150	val_150	25
+213	val_213	25
+224		25
+238	val_238	25
+255	val_255	25
+273	val_273	25
+278	val_278	25
+311	val_311	25
+369		25
+401	val_401	25
+406	val_406	25
+66	val_66	25
+98	val_98	25
+PREHOOK: query: -- Then, we convert the join to MapJoin.
+EXPLAIN
+SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+      FROM (SELECT *
+            FROM (SELECT key, value
+                  FROM src1) tmp1 ) tmp2
+      JOIN (SELECT count(*) as count
+            FROM src1) tmp3
+      ) tmp4 order by key, value, count
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Then, we convert the join to MapJoin.
+EXPLAIN
+SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+      FROM (SELECT *
+            FROM (SELECT key, value
+                  FROM src1) tmp1 ) tmp2
+      JOIN (SELECT count(*) as count
+            FROM src1) tmp3
+      ) tmp4 order by key, value, count
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))))) tmp1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) tmp2) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count) count)))) tmp3))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp2) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp3) count) count)))) tmp4)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tm
 p4) value) value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp4) count) count)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL count)))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-6 depends on stages: Stage-1
+  Stage-3 depends on stages: Stage-6
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        tmp4:tmp3:src1 
+          TableScan
+            alias: src1
+            Select Operator
+              Group By Operator
+                aggregations:
+                      expr: count()
+                bucketGroup: false
+                mode: hash
+                outputColumnNames: _col0
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: bigint
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations:
+                expr: count(VALUE._col0)
+          bucketGroup: false
+          mode: mergepartial
+          outputColumnNames: _col0
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: bigint
+            outputColumnNames: _col0
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+  Stage: Stage-6
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        tmp4:tmp2:tmp1:src1 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        tmp4:tmp2:tmp1:src1 
+          TableScan
+            alias: src1
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              HashTable Sink Operator
+                condition expressions:
+                  0 {_col0} {_col1}
+                  1 {_col0}
+                handleSkewJoin: false
+                keys:
+                  0 []
+                  1 []
+                Position of Big Table: 1
+
+  Stage: Stage-3
+    Map Reduce
+      Alias -> Map Operator Tree:
+        $INTNAME 
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              condition expressions:
+                0 {_col0} {_col1}
+                1 {_col0}
+              handleSkewJoin: false
+              keys:
+                0 []
+                1 []
+              outputColumnNames: _col0, _col1, _col2
+              Position of Big Table: 1
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+                      expr: _col2
+                      type: bigint
+                outputColumnNames: _col0, _col1, _col2
+                Reduce Output Operator
+                  key expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                        expr: _col2
+                        type: bigint
+                  sort order: +++
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                        expr: _col2
+                        type: bigint
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Extract
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+      FROM (SELECT *
+            FROM (SELECT key, value
+                  FROM src1) tmp1 ) tmp2
+      JOIN (SELECT count(*) as count
+            FROM src1) tmp3
+      ) tmp4 order by key, value, count
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
+FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
+      FROM (SELECT *
+            FROM (SELECT key, value
+                  FROM src1) tmp1 ) tmp2
+      JOIN (SELECT count(*) as count
+            FROM src1) tmp3
+      ) tmp4 order by key, value, count
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+		25
+		25
+		25
+		25
+	val_165	25
+	val_193	25
+	val_265	25
+	val_27	25
+	val_409	25
+	val_484	25
+128		25
+146	val_146	25
+150	val_150	25
+213	val_213	25
+224		25
+238	val_238	25
+255	val_255	25
+273	val_273	25
+278	val_278	25
+311	val_311	25
+369		25
+401	val_401	25
+406	val_406	25
+66	val_66	25
+98	val_98	25

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/partition_date.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/partition_date.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/partition_date.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/partition_date.q.out Fri Aug 16 01:21:54 2013
@@ -93,12 +93,12 @@ POSTHOOK: Lineage: partition_date_1 PART
 POSTHOOK: Lineage: partition_date_1 PARTITION(dt=2013-08-08,region=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 2000-01-01
 2013-08-08
-PREHOOK: query: select *, cast(dt as timestamp) from partition_date_1 where dt = '2000-01-01' and region = 2
+PREHOOK: query: select *, cast(dt as timestamp) from partition_date_1 where dt = '2000-01-01' and region = 2 order by key,value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partition_date_1
 PREHOOK: Input: default@partition_date_1@dt=2000-01-01/region=2
 #### A masked pattern was here ####
-POSTHOOK: query: select *, cast(dt as timestamp) from partition_date_1 where dt = '2000-01-01' and region = 2
+POSTHOOK: query: select *, cast(dt as timestamp) from partition_date_1 where dt = '2000-01-01' and region = 2 order by key,value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_date_1
 POSTHOOK: Input: default@partition_date_1@dt=2000-01-01/region=2
@@ -111,11 +111,11 @@ POSTHOOK: Lineage: partition_date_1 PART
 POSTHOOK: Lineage: partition_date_1 PARTITION(dt=2013-08-08,region=10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_date_1 PARTITION(dt=2013-08-08,region=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_date_1 PARTITION(dt=2013-08-08,region=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+165	val_165	2000-01-01	2	1969-12-31 16:00:00
 238	val_238	2000-01-01	2	1969-12-31 16:00:00
-86	val_86	2000-01-01	2	1969-12-31 16:00:00
-311	val_311	2000-01-01	2	1969-12-31 16:00:00
 27	val_27	2000-01-01	2	1969-12-31 16:00:00
-165	val_165	2000-01-01	2	1969-12-31 16:00:00
+311	val_311	2000-01-01	2	1969-12-31 16:00:00
+86	val_86	2000-01-01	2	1969-12-31 16:00:00
 PREHOOK: query: -- 15
 select count(*) from partition_date_1 where dt = date '2000-01-01'
 PREHOOK: type: QUERY

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/partition_date2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/partition_date2.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/partition_date2.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/partition_date2.q.out Fri Aug 16 01:21:54 2013
@@ -8,43 +8,25 @@ POSTHOOK: query: create table partition_
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@partition_date2_1
 PREHOOK: query: -- test date literal syntax
-insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=1)
-  select * from src limit 1
+from (select * from src limit 1) x
+insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=1) select *
+insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2) select *
+insert overwrite table partition_date2_1 partition(dt=date '1999-01-01', region=2) select *
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
+PREHOOK: Output: default@partition_date2_1@dt=1999-01-01/region=2
 PREHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=1
+PREHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=2
 POSTHOOK: query: -- test date literal syntax
-insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=1)
-  select * from src limit 1
+from (select * from src limit 1) x
+insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=1) select *
+insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2) select *
+insert overwrite table partition_date2_1 partition(dt=date '1999-01-01', region=2) select *
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
+POSTHOOK: Output: default@partition_date2_1@dt=1999-01-01/region=2
 POSTHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=1
-POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2)
-  select * from src limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=2
-POSTHOOK: query: insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2)
-  select * from src limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
 POSTHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=2
-POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_date2_1 partition(dt=date '1999-01-01', region=2)
-  select * from src limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@partition_date2_1@dt=1999-01-01/region=2
-POSTHOOK: query: insert overwrite table partition_date2_1 partition(dt=date '1999-01-01', region=2)
-  select * from src limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@partition_date2_1@dt=1999-01-01/region=2
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=1999-01-01,region=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=1999-01-01,region=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
@@ -427,14 +409,14 @@ POSTHOOK: Lineage: partition_date2_1 PAR
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).value SIMPLE []
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: select * from partition_date2_1
+PREHOOK: query: select * from partition_date2_1 order by key,value,dt,region
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partition_date2_1
 PREHOOK: Input: default@partition_date2_1@dt=1980-01-02/region=3
 PREHOOK: Input: default@partition_date2_1@dt=2000-01-01/region=1
 PREHOOK: Input: default@partition_date2_1@dt=2000-01-01/region=2
 #### A masked pattern was here ####
-POSTHOOK: query: select * from partition_date2_1
+POSTHOOK: query: select * from partition_date2_1 order by key,value,dt,region
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_date2_1
 POSTHOOK: Input: default@partition_date2_1@dt=1980-01-02/region=3
@@ -452,8 +434,8 @@ POSTHOOK: Lineage: partition_date2_1 PAR
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_date2_1 PARTITION(dt=2000-01-01,region=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 238	val_238	1980-01-02	3
-86	val_86	1980-01-02	3
 238	val_238	2000-01-01	1
+86	val_86	1980-01-02	3
 PREHOOK: query: -- alter table set location
 alter table partition_date2_1 partition(dt=date '1980-01-02', region=3)
 #### A masked pattern was here ####

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/show_functions.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/show_functions.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/show_functions.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/show_functions.q.out Fri Aug 16 01:21:54 2013
@@ -29,6 +29,7 @@ asin
 assert_true
 atan
 avg
+base64
 between
 bin
 case
@@ -53,11 +54,13 @@ date_sub
 datediff
 day
 dayofmonth
+decode
 degrees
 dense_rank
 div
 e
 elt
+encode
 ewah_bitmap
 ewah_bitmap_and
 ewah_bitmap_empty
@@ -169,6 +172,7 @@ to_utc_timestamp
 translate
 trim
 ucase
+unbase64
 unhex
 unix_timestamp
 upper
@@ -218,7 +222,9 @@ POSTHOOK: type: SHOWFUNCTIONS
 assert_true
 case
 coalesce
+decode
 e
+encode
 explode
 first_value
 from_unixtime

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out Fri Aug 16 01:21:54 2013
@@ -81,6 +81,52 @@ STAGE PLANS:
         b 
           Fetch Operator
             limit: -1
+            Partition Description:
+                Partition
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    part 1
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part_2
+                    numFiles 2
+                    numRows 500
+                    partition_columns part
+                    rawDataSize 5312
+                    serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part_2
+                      numFiles 2
+                      numPartitions 1
+                      numRows 500
+                      partition_columns part
+                      rawDataSize 5312
+                      serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part_2
+                  name: default.srcbucket_mapjoin_part_2
       Alias -> Map Local Operator Tree:
         b 
           TableScan

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out Fri Aug 16 01:21:54 2013
@@ -145,6 +145,97 @@ STAGE PLANS:
         b 
           Fetch Operator
             limit: -1
+            Partition Description:
+                Partition
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    part 1
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part_2
+                    numFiles 2
+                    numRows 500
+                    partition_columns part
+                    rawDataSize 5312
+                    serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part_2
+                      numFiles 4
+                      numPartitions 2
+                      numRows 1000
+                      partition_columns part
+                      rawDataSize 10624
+                      serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 11624
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part_2
+                  name: default.srcbucket_mapjoin_part_2
+                Partition
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    part 2
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part_2
+                    numFiles 2
+                    numRows 500
+                    partition_columns part
+                    rawDataSize 5312
+                    serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part_2
+                      numFiles 4
+                      numPartitions 2
+                      numRows 1000
+                      partition_columns part
+                      rawDataSize 10624
+                      serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 11624
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part_2
+                  name: default.srcbucket_mapjoin_part_2
       Alias -> Map Local Operator Tree:
         b 
           TableScan

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/stats11.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/stats11.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/stats11.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/stats11.q.out Fri Aug 16 01:21:54 2013
@@ -298,6 +298,52 @@ STAGE PLANS:
         b 
           Fetch Operator
             limit: -1
+            Partition Description:
+                Partition
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 4
+                    bucket_field_name key
+                    columns key,value
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part
+                    numFiles 4
+                    numRows 0
+                    partition_columns ds
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 4
+                      bucket_field_name key
+                      columns key,value
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part
+                      numFiles 4
+                      numPartitions 1
+                      numRows 0
+                      partition_columns ds
+                      rawDataSize 0
+                      serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part
+                  name: default.srcbucket_mapjoin_part
       Alias -> Map Local Operator Tree:
         b 
           TableScan

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/udaf_context_ngrams.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/udaf_context_ngrams.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/udaf_context_ngrams.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/udaf_context_ngrams.q.out Fri Aug 16 01:21:54 2013
@@ -26,7 +26,7 @@ POSTHOOK: query: SELECT context_ngrams(s
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@kafka
 #### A masked pattern was here ####
-[{"ngram":["was"],"estfrequency":17.0},{"ngram":["had"],"estfrequency":16.0},{"ngram":["thought"],"estfrequency":13.0},{"ngram":["could"],"estfrequency":9.0},{"ngram":["would"],"estfrequency":7.0},{"ngram":["lay"],"estfrequency":5.0},{"ngram":["looked"],"estfrequency":4.0},{"ngram":["s"],"estfrequency":4.0},{"ngram":["wanted"],"estfrequency":4.0},{"ngram":["did"],"estfrequency":4.0},{"ngram":["felt"],"estfrequency":4.0},{"ngram":["needed"],"estfrequency":3.0},{"ngram":["must"],"estfrequency":3.0},{"ngram":["told"],"estfrequency":3.0},{"ngram":["lifted"],"estfrequency":3.0},{"ngram":["tried"],"estfrequency":3.0},{"ngram":["finally"],"estfrequency":3.0},{"ngram":["slid"],"estfrequency":3.0},{"ngram":["reported"],"estfrequency":2.0},{"ngram":["drew"],"estfrequency":2.0},{"ngram":["is"],"estfrequency":2.0},{"ngram":["wouldn't"],"estfrequency":2.0},{"ngram":["always"],"estfrequency":2.0},{"ngram":["really"],"estfrequency":2.0},{"ngram":["let"],"estfrequency":2.0},{"ngram":["threw"],"estf
 requency":2.0},{"ngram":["found"],"estfrequency":2.0},{"ngram":["also"],"estfrequency":2.0},{"ngram":["made"],"estfrequency":2.0},{"ngram":["didn't"],"estfrequency":2.0},{"ngram":["touched"],"estfrequency":2.0},{"ngram":["do"],"estfrequency":2.0},{"ngram":["began"],"estfrequency":2.0},{"ngram":["preferred"],"estfrequency":1.0},{"ngram":["maintained"],"estfrequency":1.0},{"ngram":["managed"],"estfrequency":1.0},{"ngram":["urged"],"estfrequency":1.0},{"ngram":["will"],"estfrequency":1.0},{"ngram":["failed"],"estfrequency":1.0},{"ngram":["have"],"estfrequency":1.0},{"ngram":["heard"],"estfrequency":1.0},{"ngram":["were"],"estfrequency":1.0},{"ngram":["caught"],"estfrequency":1.0},{"ngram":["hit"],"estfrequency":1.0},{"ngram":["turned"],"estfrequency":1.0},{"ngram":["slowly"],"estfrequency":1.0},{"ngram":["stood"],"estfrequency":1.0},{"ngram":["chose"],"estfrequency":1.0},{"ngram":["swung"],"estfrequency":1.0},{"ngram":["denied"],"estfrequency":1.0},{"ngram":["intended"],"estfrequency":
 1.0},{"ngram":["became"],"estfrequency":1.0},{"ngram":["sits"],"estfrequency":1.0},{"ngram":["discovered"],"estfrequency":1.0},{"ngram":["called"],"estfrequency":1.0},{"ngram":["never"],"estfrequency":1.0},{"ngram":["cut"],"estfrequency":1.0},{"ngram":["directed"],"estfrequency":1.0},{"ngram":["hoped"],"estfrequency":1.0},{"ngram":["remembered"],"estfrequency":1.0},{"ngram":["said"],"estfrequency":1.0},{"ngram":["allowed"],"estfrequency":1.0},{"ngram":["confined"],"estfrequency":1.0},{"ngram":["almost"],"estfrequency":1.0},{"ngram":["retracted"],"estfrequency":1.0}]
+[{"ngram":["was"],"estfrequency":17.0},{"ngram":["had"],"estfrequency":16.0},{"ngram":["thought"],"estfrequency":13.0},{"ngram":["could"],"estfrequency":9.0},{"ngram":["would"],"estfrequency":7.0},{"ngram":["lay"],"estfrequency":5.0},{"ngram":["did"],"estfrequency":4.0},{"ngram":["felt"],"estfrequency":4.0},{"ngram":["looked"],"estfrequency":4.0},{"ngram":["s"],"estfrequency":4.0},{"ngram":["wanted"],"estfrequency":4.0},{"ngram":["finally"],"estfrequency":3.0},{"ngram":["lifted"],"estfrequency":3.0},{"ngram":["must"],"estfrequency":3.0},{"ngram":["needed"],"estfrequency":3.0},{"ngram":["slid"],"estfrequency":3.0},{"ngram":["told"],"estfrequency":3.0},{"ngram":["tried"],"estfrequency":3.0},{"ngram":["also"],"estfrequency":2.0},{"ngram":["always"],"estfrequency":2.0},{"ngram":["began"],"estfrequency":2.0},{"ngram":["didn't"],"estfrequency":2.0},{"ngram":["do"],"estfrequency":2.0},{"ngram":["drew"],"estfrequency":2.0},{"ngram":["found"],"estfrequency":2.0},{"ngram":["is"],"estfrequency
 ":2.0},{"ngram":["let"],"estfrequency":2.0},{"ngram":["made"],"estfrequency":2.0},{"ngram":["really"],"estfrequency":2.0},{"ngram":["reported"],"estfrequency":2.0},{"ngram":["threw"],"estfrequency":2.0},{"ngram":["touched"],"estfrequency":2.0},{"ngram":["wouldn't"],"estfrequency":2.0},{"ngram":["allowed"],"estfrequency":1.0},{"ngram":["almost"],"estfrequency":1.0},{"ngram":["became"],"estfrequency":1.0},{"ngram":["called"],"estfrequency":1.0},{"ngram":["caught"],"estfrequency":1.0},{"ngram":["chose"],"estfrequency":1.0},{"ngram":["confined"],"estfrequency":1.0},{"ngram":["cut"],"estfrequency":1.0},{"ngram":["denied"],"estfrequency":1.0},{"ngram":["directed"],"estfrequency":1.0},{"ngram":["discovered"],"estfrequency":1.0},{"ngram":["failed"],"estfrequency":1.0},{"ngram":["have"],"estfrequency":1.0},{"ngram":["heard"],"estfrequency":1.0},{"ngram":["hit"],"estfrequency":1.0},{"ngram":["hoped"],"estfrequency":1.0},{"ngram":["intended"],"estfrequency":1.0},{"ngram":["maintained"],"estfre
 quency":1.0},{"ngram":["managed"],"estfrequency":1.0},{"ngram":["never"],"estfrequency":1.0},{"ngram":["preferred"],"estfrequency":1.0},{"ngram":["remembered"],"estfrequency":1.0},{"ngram":["retracted"],"estfrequency":1.0},{"ngram":["said"],"estfrequency":1.0},{"ngram":["sits"],"estfrequency":1.0},{"ngram":["slowly"],"estfrequency":1.0},{"ngram":["stood"],"estfrequency":1.0},{"ngram":["swung"],"estfrequency":1.0},{"ngram":["turned"],"estfrequency":1.0},{"ngram":["urged"],"estfrequency":1.0},{"ngram":["were"],"estfrequency":1.0},{"ngram":["will"],"estfrequency":1.0}]
 PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null,"salesmen"), 100, 1000) FROM kafka
 PREHOOK: type: QUERY
 PREHOOK: Input: default@kafka

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/udf_hex.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/udf_hex.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/udf_hex.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/udf_hex.q.out Fri Aug 16 01:21:54 2013
@@ -2,14 +2,14 @@ PREHOOK: query: DESCRIBE FUNCTION hex
 PREHOOK: type: DESCFUNCTION
 POSTHOOK: query: DESCRIBE FUNCTION hex
 POSTHOOK: type: DESCFUNCTION
-hex(n or str) - Convert the argument to hexadecimal 
+hex(n, bin, or str) - Convert the argument to hexadecimal 
 PREHOOK: query: DESCRIBE FUNCTION EXTENDED hex
 PREHOOK: type: DESCFUNCTION
 POSTHOOK: query: DESCRIBE FUNCTION EXTENDED hex
 POSTHOOK: type: DESCFUNCTION
-hex(n or str) - Convert the argument to hexadecimal 
+hex(n, bin, or str) - Convert the argument to hexadecimal 
 If the argument is a string, returns two hex digits for each character in the string.
-If the argument is a number, returns the hexadecimal representation.
+If the argument is a number or binary, returns the hexadecimal representation.
 Example:
   > SELECT hex(17) FROM src LIMIT 1;
   'H1'

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/udf_sentences.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/udf_sentences.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/udf_sentences.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/udf_sentences.q.out Fri Aug 16 01:21:54 2013
@@ -9,12 +9,12 @@ POSTHOOK: query: CREATE TABLE sent_tmp2 
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@sent_tmp2
 PREHOOK: query: INSERT OVERWRITE TABLE sent_tmp
-SELECT explode(sentences(unhex("486976652065737420756E20657863656C6C656E74206F7574696C20706F7572206C65732072657175C3AA74657320646520646F6E6EC3A965732C20657420706575742DC3AA74726520706C757320706F6C7976616C656E7420717565206C612074726164756374696F6E206175746F6D61746971756521206C6120706F6E6374756174696F6E206D756C7469706C65732C206465732070687261736573206D616C20666F726DC3A96573202E2E2E20636F6E667573696F6E202D20657420706F757274616E742063652055444620666F6E6374696F6E6E6520656E636F72652121"), "fr")) AS val FROM src LIMIT 3
+SELECT explode(sentences(decode(unhex("486976652065737420756E20657863656C6C656E74206F7574696C20706F7572206C65732072657175C3AA74657320646520646F6E6EC3A965732C20657420706575742DC3AA74726520706C757320706F6C7976616C656E7420717565206C612074726164756374696F6E206175746F6D61746971756521206C6120706F6E6374756174696F6E206D756C7469706C65732C206465732070687261736573206D616C20666F726DC3A96573202E2E2E20636F6E667573696F6E202D20657420706F757274616E742063652055444620666F6E6374696F6E6E6520656E636F72652121"), "UTF-8"), "fr")) AS val FROM src LIMIT 3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@sent_tmp
 POSTHOOK: query: INSERT OVERWRITE TABLE sent_tmp
-SELECT explode(sentences(unhex("486976652065737420756E20657863656C6C656E74206F7574696C20706F7572206C65732072657175C3AA74657320646520646F6E6EC3A965732C20657420706575742DC3AA74726520706C757320706F6C7976616C656E7420717565206C612074726164756374696F6E206175746F6D61746971756521206C6120706F6E6374756174696F6E206D756C7469706C65732C206465732070687261736573206D616C20666F726DC3A96573202E2E2E20636F6E667573696F6E202D20657420706F757274616E742063652055444620666F6E6374696F6E6E6520656E636F72652121"), "fr")) AS val FROM src LIMIT 3
+SELECT explode(sentences(decode(unhex("486976652065737420756E20657863656C6C656E74206F7574696C20706F7572206C65732072657175C3AA74657320646520646F6E6EC3A965732C20657420706575742DC3AA74726520706C757320706F6C7976616C656E7420717565206C612074726164756374696F6E206175746F6D61746971756521206C6120706F6E6374756174696F6E206D756C7469706C65732C206465732070687261736573206D616C20666F726DC3A96573202E2E2E20636F6E667573696F6E202D20657420706F757274616E742063652055444620666F6E6374696F6E6E6520656E636F72652121"), "UTF-8"), "fr")) AS val FROM src LIMIT 3
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@sent_tmp
@@ -126,12 +126,12 @@ POSTHOOK: Output: default@sent_tmp2
 POSTHOOK: Lineage: sent_tmp.val SCRIPT []
 POSTHOOK: Lineage: sent_tmp2.val SCRIPT [(sent_tmp)sent_tmp.FieldSchema(name:val, type:array<string>, comment:null), ]
 PREHOOK: query: INSERT OVERWRITE TABLE sent_tmp
-SELECT explode(sentences(unhex("48697665206973742065696E2061757367657A656963686E65746573205765726B7A6575672066C3BC7220646965204162667261676520766F6E20446174656E2C20756E64207669656C6C6569636874207669656C736569746967657220616C7320646965206D61736368696E656C6C6520C39C6265727365747A756E6721204D756C7469706C652C207363686C6563687420676562696C646574656E2053C3A4747A65202E2E2E205665727765636873656C756E6720496E74657270756E6B74696F6E202D20756E6420646F636820697374206469657365205544462066756E6B74696F6E6965727420696D6D6572206E6F63682121"), "de")) AS val FROM src LIMIT 3
+SELECT explode(sentences(decode(unhex("48697665206973742065696E2061757367657A656963686E65746573205765726B7A6575672066C3BC7220646965204162667261676520766F6E20446174656E2C20756E64207669656C6C6569636874207669656C736569746967657220616C7320646965206D61736368696E656C6C6520C39C6265727365747A756E6721204D756C7469706C652C207363686C6563687420676562696C646574656E2053C3A4747A65202E2E2E205665727765636873656C756E6720496E74657270756E6B74696F6E202D20756E6420646F636820697374206469657365205544462066756E6B74696F6E6965727420696D6D6572206E6F63682121"), "UTF-8"), "de")) AS val FROM src LIMIT 3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@sent_tmp
 POSTHOOK: query: INSERT OVERWRITE TABLE sent_tmp
-SELECT explode(sentences(unhex("48697665206973742065696E2061757367657A656963686E65746573205765726B7A6575672066C3BC7220646965204162667261676520766F6E20446174656E2C20756E64207669656C6C6569636874207669656C736569746967657220616C7320646965206D61736368696E656C6C6520C39C6265727365747A756E6721204D756C7469706C652C207363686C6563687420676562696C646574656E2053C3A4747A65202E2E2E205665727765636873656C756E6720496E74657270756E6B74696F6E202D20756E6420646F636820697374206469657365205544462066756E6B74696F6E6965727420696D6D6572206E6F63682121"), "de")) AS val FROM src LIMIT 3
+SELECT explode(sentences(decode(unhex("48697665206973742065696E2061757367657A656963686E65746573205765726B7A6575672066C3BC7220646965204162667261676520766F6E20446174656E2C20756E64207669656C6C6569636874207669656C736569746967657220616C7320646965206D61736368696E656C6C6520C39C6265727365747A756E6721204D756C7469706C652C207363686C6563687420676562696C646574656E2053C3A4747A65202E2E2E205665727765636873656C756E6720496E74657270756E6B74696F6E202D20756E6420646F636820697374206469657365205544462066756E6B74696F6E6965727420696D6D6572206E6F63682121"), "UTF-8"), "de")) AS val FROM src LIMIT 3
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@sent_tmp

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/udf_unhex.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/udf_unhex.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/udf_unhex.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/udf_unhex.q.out Fri Aug 16 01:21:54 2013
@@ -2,24 +2,20 @@ PREHOOK: query: DESCRIBE FUNCTION unhex
 PREHOOK: type: DESCFUNCTION
 POSTHOOK: query: DESCRIBE FUNCTION unhex
 POSTHOOK: type: DESCFUNCTION
-unhex(str) - Converts hexadecimal argument to string
+unhex(str) - Converts hexadecimal argument to binary
 PREHOOK: query: DESCRIBE FUNCTION EXTENDED unhex
 PREHOOK: type: DESCFUNCTION
 POSTHOOK: query: DESCRIBE FUNCTION EXTENDED unhex
 POSTHOOK: type: DESCFUNCTION
-unhex(str) - Converts hexadecimal argument to string
+unhex(str) - Converts hexadecimal argument to binary
 Performs the inverse operation of HEX(str). That is, it interprets
 each pair of hexadecimal digits in the argument as a number and
-converts it to the character represented by the number. The
+converts it to the byte representation of the number. The
 resulting characters are returned as a binary string.
 
 Example:
-> SELECT UNHEX('4D7953514C') from src limit 1;
+> SELECT DECODE(UNHEX('4D7953514C'), 'UTF-8') from src limit 1;
 'MySQL'
-> SELECT UNHEX(HEX('string')) from src limit 1;
-'string'
-> SELECT HEX(UNHEX('1267')) from src limit 1;
-'1267'
 
 The characters in the argument string must be legal hexadecimal
 digits: '0' .. '9', 'A' .. 'F', 'a' .. 'f'. If UNHEX() encounters

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/union22.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/union22.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/union22.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/union22.q.out Fri Aug 16 01:21:54 2013
@@ -104,6 +104,51 @@ STAGE PLANS:
         null-subquery2:subq-subquery2:b:dst_union22_delta 
           Fetch Operator
             limit: -1
+            Partition Description:
+                Partition
+                  base file name: ds=1
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 1
+                  properties:
+                    bucket_count -1
+                    columns k0,k1,k2,k3,k4,k5
+                    columns.types string:string:string:string:string:string
+#### A masked pattern was here ####
+                    name default.dst_union22_delta
+                    numFiles 1
+                    numRows 500
+                    partition_columns ds
+                    rawDataSize 16936
+                    serialization.ddl struct dst_union22_delta { string k0, string k1, string k2, string k3, string k4, string k5}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 17436
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns k0,k1,k2,k3,k4,k5
+                      columns.types string:string:string:string:string:string
+#### A masked pattern was here ####
+                      name default.dst_union22_delta
+                      numFiles 1
+                      numPartitions 1
+                      numRows 500
+                      partition_columns ds
+                      rawDataSize 16936
+                      serialization.ddl struct dst_union22_delta { string k0, string k1, string k2, string k3, string k4, string k5}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 17436
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.dst_union22_delta
+                  name: default.dst_union22_delta
       Alias -> Map Local Operator Tree:
         null-subquery2:subq-subquery2:b:dst_union22_delta 
           TableScan

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/union34.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/union34.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/union34.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/union34.q.out Fri Aug 16 01:21:54 2013
@@ -1,22 +1,6 @@
-PREHOOK: query: -- HIVE-4342
--- Maponly union(UNION-13) is merged into non-maponly union(UNION-15)
--- In this case, task for UNION-13 should be removed from top-task and merged into task for UNION-15
--- TS[2]-SEL[3]-RS[5]-JOIN[6]-SEL[7]-UNION[15]-SEL[16]-RS[17]-EX[18]-FS[19]
--- TS[0]-SEL[1]-RS[4]-JOIN[6]
--- TS[8]-SEL[9]-UNION[13]-SEL[14]-UNION[15]
--- TS[11]-SEL[12]-UNION[13]
-
-create table src10_1 (key string, value string)
+PREHOOK: query: create table src10_1 (key string, value string)
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: -- HIVE-4342
--- Maponly union(UNION-13) is merged into non-maponly union(UNION-15)
--- In this case, task for UNION-13 should be removed from top-task and merged into task for UNION-15
--- TS[2]-SEL[3]-RS[5]-JOIN[6]-SEL[7]-UNION[15]-SEL[16]-RS[17]-EX[18]-FS[19]
--- TS[0]-SEL[1]-RS[4]-JOIN[6]
--- TS[8]-SEL[9]-UNION[13]-SEL[14]-UNION[15]
--- TS[11]-SEL[12]-UNION[13]
-
-create table src10_1 (key string, value string)
+POSTHOOK: query: create table src10_1 (key string, value string)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@src10_1
 PREHOOK: query: create table src10_2 (key string, value string)
@@ -64,14 +48,18 @@ POSTHOOK: Lineage: src10_3.key SIMPLE [(
 POSTHOOK: Lineage: src10_3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 POSTHOOK: Lineage: src10_4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: src10_4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: explain
+PREHOOK: query: -- When we convert the Join of sub1 and sub0 into a MapJoin,
+-- we can use a single MR job to evaluate this entire query.
+explain
 SELECT * FROM (
   SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key)
   UNION ALL
   SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0
 ) alias1 order by key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: -- When we convert the Join of sub1 and sub0 into a MapJoin,
+-- we can use a single MR job to evaluate this entire query.
+explain
 SELECT * FROM (
   SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key)
   UNION ALL
@@ -91,8 +79,7 @@ ABSTRACT SYNTAX TREE:
 
 STAGE DEPENDENCIES:
   Stage-7 is a root stage
-  Stage-6 depends on stages: Stage-7
-  Stage-2 depends on stages: Stage-6
+  Stage-2 depends on stages: Stage-7
   Stage-0 is a root stage
 
 STAGE PLANS:
@@ -123,7 +110,7 @@ STAGE PLANS:
                   1 [Column[_col0]]
                 Position of Big Table: 1
 
-  Stage: Stage-6
+  Stage: Stage-2
     Map Reduce
       Alias -> Map Operator Tree:
         null-subquery1:alias1-subquery1:sub0:src10_2 
@@ -153,39 +140,25 @@ STAGE PLANS:
                         expr: _col1
                         type: string
                   outputColumnNames: _col0, _col1
-                  File Output Operator
-                    compressed: false
-                    GlobalTableId: 0
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-      Local Work:
-        Map Reduce Local Work
-
-  Stage: Stage-2
-    Map Reduce
-      Alias -> Map Operator Tree:
-#### A masked pattern was here ####
-          TableScan
-            Union
-              Select Operator
-                expressions:
-                      expr: _col0
-                      type: string
-                      expr: _col1
-                      type: string
-                outputColumnNames: _col0, _col1
-                Reduce Output Operator
-                  key expressions:
-                        expr: _col0
-                        type: string
-                  sort order: +
-                  tag: -1
-                  value expressions:
-                        expr: _col0
-                        type: string
-                        expr: _col1
-                        type: string
+                  Union
+                    Select Operator
+                      expressions:
+                            expr: _col0
+                            type: string
+                            expr: _col1
+                            type: string
+                      outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions:
+                              expr: _col0
+                              type: string
+                        sort order: +
+                        tag: -1
+                        value expressions:
+                              expr: _col0
+                              type: string
+                              expr: _col1
+                              type: string
         null-subquery2:alias1-subquery2-subquery1:alias0-subquery1:sub2:src10_3 
           TableScan
             alias: src10_3
@@ -260,6 +233,8 @@ STAGE PLANS:
                               type: string
                               expr: _col1
                               type: string
+      Local Work:
+        Map Reduce Local Work
       Reduce Operator Tree:
         Extract
           File Output Operator
@@ -334,14 +309,22 @@ POSTHOOK: Lineage: src10_4.value SIMPLE 
 98	val_98
 98	val_98
 98	val_98
-PREHOOK: query: explain
+PREHOOK: query: -- When we do not convert the Join of sub1 and sub0 into a MapJoin,
+-- we need to use two MR jobs to evaluate this query.
+-- The first job is for the Join of sub1 and sub2. The second job
+-- is for the UNION ALL and ORDER BY.
+explain
 SELECT * FROM (
   SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key)
   UNION ALL
   SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0
 ) alias1 order by key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: -- When we do not convert the Join of sub1 and sub0 into a MapJoin,
+-- we need to use two MR jobs to evaluate this query.
+-- The first job is for the Join of sub1 and sub2. The second job
+-- is for the UNION ALL and ORDER BY.
+explain
 SELECT * FROM (
   SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key)
   UNION ALL

Modified: hive/branches/vectorization/ql/src/test/results/clientpositive/union_remove_19.q.out
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/results/clientpositive/union_remove_19.q.out?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/results/clientpositive/union_remove_19.q.out (original)
+++ hive/branches/vectorization/ql/src/test/results/clientpositive/union_remove_19.q.out Fri Aug 16 01:21:54 2013
@@ -300,7 +300,7 @@ STAGE PLANS:
             alias: inputtbl1
             Filter Operator
               predicate:
-                  expr: (key = 7.0)
+                  expr: (key = 7)
                   type: boolean
               Select Operator
                 expressions:
@@ -372,7 +372,7 @@ STAGE PLANS:
             alias: inputtbl1
             Filter Operator
               predicate:
-                  expr: (key = 7.0)
+                  expr: (key = 7)
                   type: boolean
               Select Operator
                 expressions:

Modified: hive/branches/vectorization/ql/src/test/templates/TestCliDriver.vm
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/templates/TestCliDriver.vm?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/templates/TestCliDriver.vm (original)
+++ hive/branches/vectorization/ql/src/test/templates/TestCliDriver.vm Fri Aug 16 01:21:54 2013
@@ -25,10 +25,6 @@ import java.io.*;
 import java.util.*;
 
 import org.apache.hadoop.hive.ql.QTestUtil;
-import org.apache.hadoop.hive.ql.history.HiveHistoryViewer;
-import org.apache.hadoop.hive.ql.history.HiveHistory.QueryInfo;
-import org.apache.hadoop.hive.ql.history.HiveHistory.Keys;
-import org.apache.hadoop.hive.ql.history.HiveHistory.TaskInfo;
 import org.apache.hadoop.hive.ql.session.SessionState;
 
 public class $className extends TestCase {
@@ -136,22 +132,6 @@ public class $className extends TestCase
       if (ecode != 0) {
         fail("Client Execution failed with error code = " + ecode + debugHint);
       }
-      if (SessionState.get() != null) {
-        HiveHistoryViewer hv = new HiveHistoryViewer(SessionState.get()
-          .getHiveHistory().getHistFileName());
-        Map<String, QueryInfo> jobInfoMap = hv.getJobInfoMap();
-        Map<String, TaskInfo> taskInfoMap = hv.getTaskInfoMap();
-
-        if(jobInfoMap.size() != 0) {
-          String cmd = (String)jobInfoMap.keySet().toArray()[0];
-          QueryInfo ji = jobInfoMap.get(cmd);
-
-          if (!ji.hm.get(Keys.QUERY_RET_CODE.name()).equals("0")) {
-              fail("Wrong return code in hive history" + debugHint);
-          }
-        }
-      }
-
       ecode = qt.checkCliDriverResults(fname);
       if (ecode != 0) {
         fail("Client execution results failed with error code = " + ecode

Modified: hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java (original)
+++ hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java Fri Aug 16 01:21:54 2013
@@ -123,7 +123,12 @@ public final class LazyBinaryUtils {
     }
   }
 
-  static VInt vInt = new LazyBinaryUtils.VInt();
+  private static ThreadLocal<VInt> vIntThreadLocal = new ThreadLocal<VInt>() {
+    @Override
+    public VInt initialValue() {
+      return new VInt();
+    }
+  };
 
   /**
    * Check a particular field and set its size and offset in bytes based on the
@@ -148,6 +153,7 @@ public final class LazyBinaryUtils {
    */
   public static void checkObjectByteInfo(ObjectInspector objectInspector,
       byte[] bytes, int offset, RecordInfo recordInfo) {
+    VInt vInt = vIntThreadLocal.get();
     Category category = objectInspector.getCategory();
     switch (category) {
     case PRIMITIVE:
@@ -391,9 +397,15 @@ public final class LazyBinaryUtils {
     return 1 + len;
   }
 
-  private static byte[] vLongBytes = new byte[9];
+  private static ThreadLocal<byte[]> vLongBytesThreadLocal = new ThreadLocal<byte[]>() {
+    @Override
+    public byte[] initialValue() {
+      return new byte[9];
+    }
+  };
 
   public static void writeVLong(Output byteStream, long l) {
+    byte[] vLongBytes = vLongBytesThreadLocal.get();
     int len = LazyBinaryUtils.writeVLongToByteArray(vLongBytes, l);
     byteStream.write(vLongBytes, 0, len);
   }

Modified: hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/AbstractPrimitiveObjectInspector.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/AbstractPrimitiveObjectInspector.java?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/AbstractPrimitiveObjectInspector.java (original)
+++ hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/AbstractPrimitiveObjectInspector.java Fri Aug 16 01:21:54 2013
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.serde2.obj
 public abstract class AbstractPrimitiveObjectInspector implements
     PrimitiveObjectInspector {
 
-  PrimitiveTypeEntry typeEntry;
+  transient PrimitiveTypeEntry typeEntry;
 
   /**
    * Construct a AbstractPrimitiveObjectInspector.

Modified: hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java (original)
+++ hive/branches/vectorization/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java Fri Aug 16 01:21:54 2013
@@ -323,6 +323,10 @@ public class PrimitiveObjectInspectorCon
 
     @Override
     public Object convert(Object input) {
+      if (input == null) {
+        return null;
+      }
+
       return outputOI.set(r, PrimitiveObjectInspectorUtils.getBinary(input,
           inputOI));
     }

Modified: hive/branches/vectorization/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorConverters.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorConverters.java?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorConverters.java (original)
+++ hive/branches/vectorization/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorConverters.java Fri Aug 16 01:21:54 2013
@@ -19,7 +19,6 @@ package org.apache.hadoop.hive.serde2.ob
 
 import junit.framework.TestCase;
 
-
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
@@ -49,6 +48,7 @@ public class TestObjectInspectorConverte
           booleanConverter.convert(Integer.valueOf(0)));
       assertEquals("BooleanConverter", new BooleanWritable(true),
           booleanConverter.convert(Integer.valueOf(1)));
+      assertEquals("BooleanConverter", null, booleanConverter.convert(null));
 
       // Byte
       Converter byteConverter = ObjectInspectorConverters.getConverter(
@@ -58,6 +58,7 @@ public class TestObjectInspectorConverte
           .convert(Integer.valueOf(0)));
       assertEquals("ByteConverter", new ByteWritable((byte) 1), byteConverter
           .convert(Integer.valueOf(1)));
+      assertEquals("ByteConverter", null, byteConverter.convert(null));
 
       // Short
       Converter shortConverter = ObjectInspectorConverters.getConverter(
@@ -67,6 +68,7 @@ public class TestObjectInspectorConverte
           shortConverter.convert(Integer.valueOf(0)));
       assertEquals("ShortConverter", new ShortWritable((short) 1),
           shortConverter.convert(Integer.valueOf(1)));
+      assertEquals("ShortConverter", null, shortConverter.convert(null));
 
       // Int
       Converter intConverter = ObjectInspectorConverters.getConverter(
@@ -76,6 +78,7 @@ public class TestObjectInspectorConverte
           .convert(Integer.valueOf(0)));
       assertEquals("IntConverter", new IntWritable(1), intConverter
           .convert(Integer.valueOf(1)));
+      assertEquals("IntConverter", null, intConverter.convert(null));
 
       // Long
       Converter longConverter = ObjectInspectorConverters.getConverter(
@@ -85,6 +88,7 @@ public class TestObjectInspectorConverte
           .convert(Integer.valueOf(0)));
       assertEquals("LongConverter", new LongWritable(1), longConverter
           .convert(Integer.valueOf(1)));
+      assertEquals("LongConverter", null, longConverter.convert(null));
 
       // Float
       Converter floatConverter = ObjectInspectorConverters.getConverter(
@@ -94,6 +98,7 @@ public class TestObjectInspectorConverte
           .convert(Integer.valueOf(0)));
       assertEquals("LongConverter", new FloatWritable(1), floatConverter
           .convert(Integer.valueOf(1)));
+      assertEquals("LongConverter", null, floatConverter.convert(null));
 
       // Double
       Converter doubleConverter = ObjectInspectorConverters.getConverter(
@@ -103,6 +108,7 @@ public class TestObjectInspectorConverte
           .convert(Integer.valueOf(0)));
       assertEquals("DoubleConverter", new DoubleWritable(1), doubleConverter
           .convert(Integer.valueOf(1)));
+      assertEquals("DoubleConverter", null, doubleConverter.convert(null));
 
       // Text
       Converter textConverter = ObjectInspectorConverters.getConverter(
@@ -112,27 +118,36 @@ public class TestObjectInspectorConverte
           .convert(Integer.valueOf(0)));
       assertEquals("TextConverter", new Text("1"), textConverter
           .convert(Integer.valueOf(1)));
+      assertEquals("TextConverter", null, textConverter.convert(null));
+
       textConverter = ObjectInspectorConverters.getConverter(
           PrimitiveObjectInspectorFactory.writableBinaryObjectInspector,
           PrimitiveObjectInspectorFactory.writableStringObjectInspector);
       assertEquals("TextConverter", new Text("hive"), textConverter
           .convert(new BytesWritable(new byte[]
               {(byte)'h', (byte)'i',(byte)'v',(byte)'e'})));
+      assertEquals("TextConverter", null, textConverter.convert(null));
+
       textConverter = ObjectInspectorConverters.getConverter(
           PrimitiveObjectInspectorFactory.writableStringObjectInspector,
           PrimitiveObjectInspectorFactory.writableStringObjectInspector);
       assertEquals("TextConverter", new Text("hive"), textConverter
 	  .convert(new Text("hive")));
+      assertEquals("TextConverter", null, textConverter.convert(null));
+
       textConverter = ObjectInspectorConverters.getConverter(
           PrimitiveObjectInspectorFactory.javaStringObjectInspector,
           PrimitiveObjectInspectorFactory.writableStringObjectInspector);
       assertEquals("TextConverter", new Text("hive"), textConverter
 	  .convert(new String("hive")));
+      assertEquals("TextConverter", null, textConverter.convert(null));
+
       textConverter = ObjectInspectorConverters.getConverter(
           PrimitiveObjectInspectorFactory.javaHiveDecimalObjectInspector,
           PrimitiveObjectInspectorFactory.writableStringObjectInspector);
       assertEquals("TextConverter", new Text("100.001"), textConverter
 	  .convert(new HiveDecimal("100.001")));
+      assertEquals("TextConverter", null, textConverter.convert(null));
 
       // Binary
       Converter baConverter = ObjectInspectorConverters.getConverter(
@@ -141,12 +156,15 @@ public class TestObjectInspectorConverte
       assertEquals("BAConverter", new BytesWritable(new byte[]
           {(byte)'h', (byte)'i',(byte)'v',(byte)'e'}),
           baConverter.convert("hive"));
+      assertEquals("BAConverter", null, baConverter.convert(null));
+
       baConverter = ObjectInspectorConverters.getConverter(
           PrimitiveObjectInspectorFactory.writableStringObjectInspector,
           PrimitiveObjectInspectorFactory.writableBinaryObjectInspector);
       assertEquals("BAConverter", new BytesWritable(new byte[]
           {(byte)'h', (byte)'i',(byte)'v',(byte)'e'}),
           baConverter.convert(new Text("hive")));
+      assertEquals("BAConverter", null, baConverter.convert(null));
     } catch (Throwable e) {
       e.printStackTrace();
       throw e;

Modified: hive/branches/vectorization/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java (original)
+++ hive/branches/vectorization/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java Fri Aug 16 01:21:54 2013
@@ -20,6 +20,7 @@ package org.apache.hive.service.auth;
 import java.io.IOException;
 
 import javax.security.auth.login.LoginException;
+import javax.security.sasl.Sasl;
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -29,8 +30,15 @@ import org.apache.hive.service.cli.thrif
 import org.apache.thrift.TProcessorFactory;
 import org.apache.thrift.transport.TTransportException;
 import org.apache.thrift.transport.TTransportFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.text.MessageFormat;
+import java.util.HashMap;
+import java.util.Map;
 
 public class HiveAuthFactory {
+  private static final Logger LOG = LoggerFactory.getLogger(HiveAuthFactory.class);
 
   public static enum AuthTypes {
     NOSASL("NOSASL"),
@@ -71,13 +79,32 @@ public class HiveAuthFactory {
     }
   }
 
+  public Map<String, String> getSaslProperties() {
+    Map<String, String> saslProps = new HashMap<String, String>();
+    SaslQOP saslQOP =
+            SaslQOP.fromString(conf.getVar(ConfVars.HIVE_SERVER2_THRIFT_SASL_QOP));
+    // hadoop.rpc.protection being set to a higher level than hive.server2.thrift.rpc.protection
+    // does not make sense in most situations. Log warning message in such cases.
+    Map<String, String> hadoopSaslProps =  ShimLoader.getHadoopThriftAuthBridge().
+            getHadoopSaslProperties(conf);
+    SaslQOP hadoopSaslQOP = SaslQOP.fromString(hadoopSaslProps.get(Sasl.QOP));
+    if(hadoopSaslQOP.ordinal() > saslQOP.ordinal()) {
+      LOG.warn(MessageFormat.format("\"hadoop.rpc.protection\" is set to higher security level " +
+              "{0} then {1} which is set to {2}", hadoopSaslQOP.toString(),
+              ConfVars.HIVE_SERVER2_THRIFT_SASL_QOP.varname, saslQOP.toString()));
+    }
+    saslProps.put(Sasl.QOP, saslQOP.toString());
+    saslProps.put(Sasl.SERVER_AUTH, "true");
+    return saslProps;
+  }
+
   public TTransportFactory getAuthTransFactory() throws LoginException {
 
     TTransportFactory transportFactory;
 
     if (authTypeStr.equalsIgnoreCase(AuthTypes.KERBEROS.getAuthName())) {
       try {
-        transportFactory = saslServer.createTransportFactory();
+        transportFactory = saslServer.createTransportFactory(getSaslProperties());
       } catch (TTransportException e) {
         throw new LoginException(e.getMessage());
       }

Modified: hive/branches/vectorization/service/src/java/org/apache/hive/service/auth/KerberosSaslHelper.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/service/src/java/org/apache/hive/service/auth/KerberosSaslHelper.java?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/service/src/java/org/apache/hive/service/auth/KerberosSaslHelper.java (original)
+++ hive/branches/vectorization/service/src/java/org/apache/hive/service/auth/KerberosSaslHelper.java Fri Aug 16 01:21:54 2013
@@ -18,6 +18,7 @@
 package org.apache.hive.service.auth;
 
 import java.io.IOException;
+import java.util.Map;
 
 import javax.security.sasl.SaslException;
 
@@ -56,7 +57,7 @@ public class KerberosSaslHelper {
   }
 
   public static TTransport getKerberosTransport(String principal, String host,
-      final TTransport underlyingTransport) throws SaslException {
+      final TTransport underlyingTransport, Map<String, String> saslProps) throws SaslException {
     try {
       final String names[] = principal.split("[/@]");
       if (names.length != 3) {
@@ -67,7 +68,7 @@ public class KerberosSaslHelper {
       HadoopThriftAuthBridge.Client authBridge =
         ShimLoader.getHadoopThriftAuthBridge().createClientWithConf("kerberos");
       return authBridge.createClientTransport(principal, host,
-          "KERBEROS", null, underlyingTransport);
+          "KERBEROS", null, underlyingTransport, saslProps);
     } catch (IOException e) {
       throw new SaslException("Failed to open client transport", e);
     }

Modified: hive/branches/vectorization/service/src/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/service/src/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/service/src/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java (original)
+++ hive/branches/vectorization/service/src/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java Fri Aug 16 01:21:54 2013
@@ -18,6 +18,7 @@
 
 package org.apache.hive.service.cli.operation;
 
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hive.service.cli.FetchOrientation;
 import org.apache.hive.service.cli.HiveSQLException;
@@ -37,9 +38,14 @@ public class GetTableTypesOperation exte
   .addStringColumn("TABLE_TYPE", "Table type name.");
 
   private RowSet rowSet;
+  private final TableTypeMapping tableTypeMapping;
 
   protected GetTableTypesOperation(HiveSession parentSession) {
     super(parentSession, OperationType.GET_TABLE_TYPES);
+    String tableMappingStr = getParentSession().getHiveConf().
+        getVar(HiveConf.ConfVars.HIVE_SERVER2_TABLE_TYPE_MAPPING);
+    tableTypeMapping =
+      TableTypeMappingFactory.getTableTypeMapping(tableMappingStr);
   }
 
   /* (non-Javadoc)
@@ -51,7 +57,8 @@ public class GetTableTypesOperation exte
     try {
       rowSet = new RowSet();
       for (TableType type : TableType.values()) {
-        rowSet.addRow(RESULT_SET_SCHEMA, new String[] {type.toString()});
+        rowSet.addRow(RESULT_SET_SCHEMA,
+            new String[] {tableTypeMapping.mapToClientType(type.toString())});
       }
       setState(OperationState.FINISHED);
     } catch (Exception e) {

Modified: hive/branches/vectorization/service/src/java/org/apache/hive/service/cli/operation/GetTablesOperation.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/service/src/java/org/apache/hive/service/cli/operation/GetTablesOperation.java?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/service/src/java/org/apache/hive/service/cli/operation/GetTablesOperation.java (original)
+++ hive/branches/vectorization/service/src/java/org/apache/hive/service/cli/operation/GetTablesOperation.java Fri Aug 16 01:21:54 2013
@@ -21,6 +21,7 @@ package org.apache.hive.service.cli.oper
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hive.service.cli.FetchOrientation;
@@ -42,6 +43,7 @@ public class GetTablesOperation extends 
   private final String tableName;
   private final List<String> tableTypes = new ArrayList<String>();
   private final RowSet rowSet = new RowSet();
+  private final TableTypeMapping tableTypeMapping;
 
 
   private static final TableSchema RESULT_SET_SCHEMA = new TableSchema()
@@ -58,6 +60,10 @@ public class GetTablesOperation extends 
     this.catalogName = catalogName;
     this.schemaName = schemaName;
     this.tableName = tableName;
+    String tableMappingStr = getParentSession().getHiveConf().
+        getVar(HiveConf.ConfVars.HIVE_SERVER2_TABLE_TYPE_MAPPING);
+    tableTypeMapping =
+        TableTypeMappingFactory.getTableTypeMapping(tableMappingStr);
     if (tableTypes != null) {
       this.tableTypes.addAll(tableTypes);
     }
@@ -80,10 +86,11 @@ public class GetTablesOperation extends 
               DEFAULT_HIVE_CATALOG,
               table.getDbName(),
               table.getTableName(),
-              table.getTableType(),
+              tableTypeMapping.mapToClientType(table.getTableType()),
               table.getParameters().get("comment")
               };
-          if (tableTypes.isEmpty() || tableTypes.contains(table.getTableType())) {
+          if (tableTypes.isEmpty() || tableTypes.contains(
+                tableTypeMapping.mapToClientType(table.getTableType()))) {
             rowSet.addRow(RESULT_SET_SCHEMA, rowData);
           }
         }

Modified: hive/branches/vectorization/shims/ivy.xml
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/shims/ivy.xml?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/shims/ivy.xml (original)
+++ hive/branches/vectorization/shims/ivy.xml Fri Aug 16 01:21:54 2013
@@ -121,6 +121,12 @@
     </dependency>
 
     <!-- Hadoop 0.20 shim dependencies. Used for building 0.20 shims. -->
+    <dependency org="commons-io" name="commons-io" rev="${commons-io.version}">
+      <include type="jar"/>
+      <exclude org="commons-daemon" module="commons-daemon"/><!--bad POM-->
+      <exclude org="org.apache.commons" module="commons-daemon"/><!--bad POM-->
+    </dependency>
+
     <dependency org="org.apache.hadoop" name="hadoop-core"
                 rev="${hadoop-0.20.version}"
                 conf="hadoop0.20.shim->default">

Modified: hive/branches/vectorization/shims/src/0.20/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/shims/src/0.20/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java?rev=1514554&r1=1514553&r2=1514554&view=diff
==============================================================================
--- hive/branches/vectorization/shims/src/0.20/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java (original)
+++ hive/branches/vectorization/shims/src/0.20/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java Fri Aug 16 01:21:54 2013
@@ -63,6 +63,7 @@ import org.apache.hadoop.mapred.TaskID;
 import org.apache.hadoop.mapred.TaskLogServlet;
 import org.apache.hadoop.mapred.lib.CombineFileInputFormat;
 import org.apache.hadoop.mapred.lib.CombineFileSplit;
+import org.apache.hadoop.mapred.lib.TotalOrderPartitioner;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.security.SecurityUtil;
@@ -71,11 +72,13 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.tools.HadoopArchives;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.util.VersionInfo;
 
 /**
  * Implemention of shims against Hadoop 0.20.0.
  */
 public class Hadoop20Shims implements HadoopShims {
+
   public boolean usesJobShell() {
     return false;
   }
@@ -193,6 +196,10 @@ public class Hadoop20Shims implements Ha
     };
   }
 
+  public void setTotalOrderPartitionFile(JobConf jobConf, Path partitionFile){
+    TotalOrderPartitioner.setPartitionFile(jobConf, partitionFile);
+  }
+
   public static class InputSplitShim extends CombineFileSplit implements HadoopShims.InputSplitShim {
     long shrinkedLength;
     boolean _isShrinked;
@@ -594,13 +601,28 @@ public class Hadoop20Shims implements Ha
   }
 
   @Override
+  public Path createDelegationTokenFile(Configuration conf) throws IOException {
+    throw new UnsupportedOperationException("Tokens are not supported in current hadoop version");
+  }
+
+  @Override
   public UserGroupInformation createRemoteUser(String userName, List<String> groupNames) {
     return new UnixUserGroupInformation(userName, groupNames.toArray(new String[0]));
   }
 
   @Override
   public void loginUserFromKeytab(String principal, String keytabFile) throws IOException {
-    throw new UnsupportedOperationException("Kerberos login is not supported in current hadoop version");
+    throwKerberosUnsupportedError();
+  }
+
+  @Override
+  public void reLoginUserFromKeytab() throws IOException{
+    throwKerberosUnsupportedError();
+  }
+
+  private void throwKerberosUnsupportedError() throws UnsupportedOperationException{
+    throw new UnsupportedOperationException("Kerberos login is not supported" +
+        " in this hadoop version (" + VersionInfo.getVersion() + ")");
   }
 
   @Override
@@ -707,4 +729,11 @@ public class Hadoop20Shims implements Ha
   public short getDefaultReplication(FileSystem fs, Path path) {
     return fs.getDefaultReplication();
   }
+
+  @Override
+  public String getTokenFileLocEnvName() {
+    throw new UnsupportedOperationException(
+        "Kerberos not supported in current hadoop version");
+  }
+
 }



Mime
View raw message