hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From xu...@apache.org
Subject svn commit: r1660293 [23/48] - in /hive/branches/spark: ./ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/ accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/ accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/ accumul...
Date Tue, 17 Feb 2015 06:49:34 GMT
Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby9.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby9.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby9.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby9.q.out Tue Feb 17 06:49:27 2015
@@ -30,12 +30,11 @@ INSERT OVERWRITE TABLE DEST2 SELECT SRC.
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-3 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-3
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-2
-  Stage-1 depends on stages: Stage-5
-  Stage-6 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+  Stage-4 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-2
@@ -44,55 +43,42 @@ STAGE PLANS:
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Reduce Output Operator
-              key expressions: substr(value, 5) (type: string)
-              sort order: +
-              Map-reduce partition columns: substr(value, 5) (type: string)
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: key (type: string), value (type: string)
-      Reduce Operator Tree:
-        Forward
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-          Group By Operator
-            aggregations: count(DISTINCT KEY._col0)
-            keys: VALUE._col0 (type: string)
-            mode: hash
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          Group By Operator
-            aggregations: count(DISTINCT KEY._col0)
-            keys: VALUE._col0 (type: string), VALUE._col1 (type: string)
-            mode: hash
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
+              Group By Operator
+                aggregations: count(DISTINCT substr(value, 5))
+                keys: key (type: string), substr(value, 5) (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: bigint)
+              Group By Operator
+                aggregations: count(DISTINCT substr(value, 5))
+                keys: key (type: string), value (type: string), substr(value, 5) (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
       Reduce Operator Tree:
         Group By Operator
-          aggregations: count(VALUE._col0)
+          aggregations: count(DISTINCT KEY._col1:0._col0)
           keys: KEY._col0 (type: string)
-          mode: final
+          mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Select Operator
@@ -118,24 +104,23 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest1
 
-  Stage: Stage-4
+  Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-5
+  Stage: Stage-4
     Map Reduce
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string)
-              sort order: ++
+              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+              sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
-          aggregations: count(VALUE._col0)
+          aggregations: count(DISTINCT KEY._col2:0._col0)
           keys: KEY._col0 (type: string), KEY._col1 (type: string)
-          mode: final
+          mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Select Operator
@@ -161,7 +146,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest2
 
-  Stage: Stage-6
+  Stage: Stage-5
     Stats-Aggr Operator
 
 PREHOOK: query: FROM SRC
@@ -829,12 +814,11 @@ INSERT OVERWRITE TABLE DEST2 SELECT SRC.
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-3 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-3
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-2
-  Stage-1 depends on stages: Stage-5
-  Stage-6 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+  Stage-4 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-2
@@ -843,55 +827,42 @@ STAGE PLANS:
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Reduce Output Operator
-              key expressions: substr(value, 5) (type: string)
-              sort order: +
-              Map-reduce partition columns: substr(value, 5) (type: string)
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: key (type: string), value (type: string)
-      Reduce Operator Tree:
-        Forward
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-          Group By Operator
-            aggregations: count(DISTINCT KEY._col0)
-            keys: VALUE._col0 (type: string)
-            mode: hash
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          Group By Operator
-            aggregations: count(DISTINCT KEY._col0)
-            keys: VALUE._col1 (type: string), VALUE._col0 (type: string)
-            mode: hash
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
+              Group By Operator
+                aggregations: count(DISTINCT substr(value, 5))
+                keys: key (type: string), substr(value, 5) (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: value (type: string), key (type: string)
+              outputColumnNames: value, key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: bigint)
+              Group By Operator
+                aggregations: count(DISTINCT substr(value, 5))
+                keys: value (type: string), key (type: string), substr(value, 5) (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
       Reduce Operator Tree:
         Group By Operator
-          aggregations: count(VALUE._col0)
+          aggregations: count(DISTINCT KEY._col1:0._col0)
           keys: KEY._col0 (type: string)
-          mode: final
+          mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Select Operator
@@ -917,24 +888,23 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest1
 
-  Stage: Stage-4
+  Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-5
+  Stage: Stage-4
     Map Reduce
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string)
-              sort order: ++
+              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+              sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
-          aggregations: count(VALUE._col0)
+          aggregations: count(DISTINCT KEY._col2:0._col0)
           keys: KEY._col0 (type: string), KEY._col1 (type: string)
-          mode: final
+          mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Select Operator
@@ -960,7 +930,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest2
 
-  Stage: Stage-6
+  Stage: Stage-5
     Stats-Aggr Operator
 
 PREHOOK: query: FROM SRC
@@ -1628,12 +1598,11 @@ INSERT OVERWRITE TABLE DEST2 SELECT SRC.
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-3 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-3
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-2
-  Stage-1 depends on stages: Stage-5
-  Stage-6 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+  Stage-4 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-2
@@ -1642,55 +1611,42 @@ STAGE PLANS:
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Reduce Output Operator
-              key expressions: substr(value, 5) (type: string)
-              sort order: +
-              Map-reduce partition columns: substr(value, 5) (type: string)
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: key (type: string), value (type: string)
-      Reduce Operator Tree:
-        Forward
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-          Group By Operator
-            aggregations: count(DISTINCT KEY._col0)
-            keys: VALUE._col0 (type: string)
-            mode: hash
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          Group By Operator
-            aggregations: count(DISTINCT KEY._col0)
-            keys: VALUE._col0 (type: string), VALUE._col1 (type: string)
-            mode: hash
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
+              Group By Operator
+                aggregations: count(DISTINCT substr(value, 5))
+                keys: key (type: string), substr(value, 5) (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: bigint)
+              Group By Operator
+                aggregations: count(DISTINCT substr(value, 5))
+                keys: key (type: string), value (type: string), substr(value, 5) (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
       Reduce Operator Tree:
         Group By Operator
-          aggregations: count(VALUE._col0)
+          aggregations: count(DISTINCT KEY._col1:0._col0)
           keys: KEY._col0 (type: string)
-          mode: final
+          mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Select Operator
@@ -1716,24 +1672,23 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest1
 
-  Stage: Stage-4
+  Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-5
+  Stage: Stage-4
     Map Reduce
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string)
-              sort order: ++
+              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+              sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
-          aggregations: count(VALUE._col0)
+          aggregations: count(DISTINCT KEY._col2:0._col0)
           keys: KEY._col0 (type: string), KEY._col1 (type: string)
-          mode: final
+          mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Select Operator
@@ -1759,7 +1714,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest2
 
-  Stage: Stage-6
+  Stage: Stage-5
     Stats-Aggr Operator
 
 PREHOOK: query: FROM SRC
@@ -3213,12 +3168,11 @@ INSERT OVERWRITE TABLE DEST2 SELECT SRC.
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-3 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-3
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-2
-  Stage-1 depends on stages: Stage-5
-  Stage-6 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+  Stage-4 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-2
@@ -3227,55 +3181,42 @@ STAGE PLANS:
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Reduce Output Operator
-              key expressions: substr(value, 5) (type: string)
-              sort order: +
-              Map-reduce partition columns: substr(value, 5) (type: string)
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: key (type: string), value (type: string)
-      Reduce Operator Tree:
-        Forward
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-          Group By Operator
-            aggregations: count(DISTINCT KEY._col0)
-            keys: VALUE._col0 (type: string)
-            mode: hash
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          Group By Operator
-            aggregations: count(DISTINCT KEY._col0)
-            keys: VALUE._col1 (type: string), VALUE._col0 (type: string)
-            mode: hash
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
+              Group By Operator
+                aggregations: count(DISTINCT substr(value, 5))
+                keys: key (type: string), substr(value, 5) (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: value (type: string), key (type: string)
+              outputColumnNames: value, key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: bigint)
+              Group By Operator
+                aggregations: count(DISTINCT substr(value, 5))
+                keys: value (type: string), key (type: string), substr(value, 5) (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
       Reduce Operator Tree:
         Group By Operator
-          aggregations: count(VALUE._col0)
+          aggregations: count(DISTINCT KEY._col1:0._col0)
           keys: KEY._col0 (type: string)
-          mode: final
+          mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Select Operator
@@ -3301,24 +3242,23 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest1
 
-  Stage: Stage-4
+  Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-5
+  Stage: Stage-4
     Map Reduce
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string)
-              sort order: ++
+              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+              sort order: +++
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
-          aggregations: count(VALUE._col0)
+          aggregations: count(DISTINCT KEY._col2:0._col0)
           keys: KEY._col0 (type: string), KEY._col1 (type: string)
-          mode: final
+          mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Select Operator
@@ -3344,7 +3284,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest2
 
-  Stage: Stage-6
+  Stage: Stage-5
     Stats-Aggr Operator
 
 PREHOOK: query: FROM SRC

Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_window.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_window.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_window.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_window.q.out Tue Feb 17 06:49:27 2015
@@ -93,6 +93,25 @@ STAGE PLANS:
           outputColumnNames: _col0, _col2, _col3
           Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
+            Function definitions:
+                Input definition
+                  input alias: ptf_0
+                  output shape: _col0: int, _col2: int, _col3: int
+                  type: WINDOWING
+                Windowing table definition
+                  input alias: ptf_1
+                  name: windowingtablefunction
+                  order by: _col3
+                  partition by: _col0
+                  raw input shape:
+                  window functions:
+                      window function definition
+                        alias: _wcol0
+                        arguments: _col3
+                        name: rank
+                        window function: GenericUDAFRankEvaluator
+                        window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+                        isPivotResult: true
             Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: _col0 (type: int), _col2 (type: int), _col3 (type: int), _wcol0 (type: int)

Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out Tue Feb 17 06:49:27 2015
@@ -30,200 +30,6 @@ insert overwrite table dest2 select key+
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-3 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-3
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-2
-  Stage-1 depends on stages: Stage-5
-  Stage-6 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Reduce Output Operator
-              key expressions: value (type: string)
-              sort order: +
-              Map-reduce partition columns: value (type: string)
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: key (type: string), (key + key) (type: double)
-      Reduce Operator Tree:
-        Forward
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-          Group By Operator
-            aggregations: count(DISTINCT KEY._col0)
-            keys: VALUE._col0 (type: string)
-            mode: hash
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          Group By Operator
-            aggregations: count(DISTINCT KEY._col0)
-            keys: VALUE._col1 (type: double)
-            mode: hash
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: final
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.dest1
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.dest1
-
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: double)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: double)
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: double)
-          mode: final
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.dest2
-
-  Stage: Stage-1
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.dest2
-
-  Stage: Stage-6
-    Stats-Aggr Operator
-
-PREHOOK: query: from src
-insert overwrite table dest1 select key, count(distinct value) group by key
-insert overwrite table dest2 select key+key, count(distinct value) group by key+key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@dest1
-PREHOOK: Output: default@dest2
-POSTHOOK: query: from src
-insert overwrite table dest1 select key, count(distinct value) group by key
-insert overwrite table dest2 select key+key, count(distinct value) group by key+key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@dest1
-POSTHOOK: Output: default@dest2
-POSTHOOK: Lineage: dest1.cnt EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest2.cnt EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: select * from dest1 where key < 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dest1
-#### A masked pattern was here ####
-POSTHOOK: query: select * from dest1 where key < 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dest1
-#### A masked pattern was here ####
-0	1
-2	1
-4	1
-5	1
-8	1
-9	1
-PREHOOK: query: select * from dest2 where key < 20 order by key limit 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dest2
-#### A masked pattern was here ####
-POSTHOOK: query: select * from dest2 where key < 20 order by key limit 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dest2
-#### A masked pattern was here ####
-0	1
-10	1
-16	1
-18	1
-4	1
-8	1
-PREHOOK: query: -- no need to spray by distinct key first
-explain
-from src
-insert overwrite table dest1 select key, count(distinct value) group by key
-insert overwrite table dest2 select key+key, count(distinct value) group by key+key
-PREHOOK: type: QUERY
-POSTHOOK: query: -- no need to spray by distinct key first
-explain
-from src
-insert overwrite table dest1 select key, count(distinct value) group by key
-insert overwrite table dest2 select key+key, count(distinct value) group by key+key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-2 is a root stage
   Stage-0 depends on stages: Stage-2
   Stage-3 depends on stages: Stage-0
   Stage-4 depends on stages: Stage-2

Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_resolution.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_resolution.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_resolution.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_resolution.q.out Tue Feb 17 06:49:27 2015
@@ -677,6 +677,25 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
+            Function definitions:
+                Input definition
+                  input alias: ptf_0
+                  output shape: _col0: string, _col1: bigint
+                  type: WINDOWING
+                Windowing table definition
+                  input alias: ptf_1
+                  name: windowingtablefunction
+                  order by: _col1
+                  partition by: 0
+                  raw input shape:
+                  window functions:
+                      window function definition
+                        alias: _wcol0
+                        arguments: _col1
+                        name: rank
+                        window function: GenericUDAFRankEvaluator
+                        window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+                        isPivotResult: true
             Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: _col0 (type: string), _col1 (type: bigint), _wcol0 (type: int)

Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_1.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_1.q.out Tue Feb 17 06:49:27 2015
@@ -4335,7 +4335,7 @@ STAGE PLANS:
                     columns _col0,_col1,_col2,_col3,_col4
                     columns.types string:bigint:string:string:bigint
                     escape.delim \
-                    hive.serialization.extend.nesting.levels true
+                    hive.serialization.extend.additional.nesting.levels true
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out Tue Feb 17 06:49:27 2015
@@ -2406,7 +2406,7 @@ ABSTRACT SYNTAX TREE:
 TOK_QUERY
    TOK_FROM
       TOK_SUBQUERY
-         TOK_UNION
+         TOK_UNIONALL
             TOK_QUERY
                TOK_FROM
                   TOK_TABREF
@@ -2903,7 +2903,7 @@ ABSTRACT SYNTAX TREE:
 TOK_QUERY
    TOK_FROM
       TOK_SUBQUERY
-         TOK_UNION
+         TOK_UNIONALL
             TOK_QUERY
                TOK_FROM
                   TOK_TABREF
@@ -4150,7 +4150,7 @@ STAGE PLANS:
                   columns _col0,_col1,_col2,_col3,_col4
                   columns.types string:bigint:string:string:bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out Tue Feb 17 06:49:27 2015
@@ -4725,7 +4725,7 @@ STAGE PLANS:
                     columns _col0,_col1,_col2,_col3,_col4
                     columns.types string:bigint:string:string:bigint
                     escape.delim \
-                    hive.serialization.extend.nesting.levels true
+                    hive.serialization.extend.additional.nesting.levels true
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out Tue Feb 17 06:49:27 2015
@@ -2731,7 +2731,7 @@ ABSTRACT SYNTAX TREE:
 TOK_QUERY
    TOK_FROM
       TOK_SUBQUERY
-         TOK_UNION
+         TOK_UNIONALL
             TOK_QUERY
                TOK_FROM
                   TOK_TABREF
@@ -3228,7 +3228,7 @@ ABSTRACT SYNTAX TREE:
 TOK_QUERY
    TOK_FROM
       TOK_SUBQUERY
-         TOK_UNION
+         TOK_UNIONALL
             TOK_QUERY
                TOK_FROM
                   TOK_TABREF
@@ -4605,7 +4605,7 @@ STAGE PLANS:
                   columns _col0,_col1,_col2,_col3,_col4
                   columns.types string:bigint:string:string:bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/input23.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/input23.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/input23.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/input23.q.out Tue Feb 17 06:49:27 2015
@@ -161,7 +161,7 @@ STAGE PLANS:
                       columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
                       columns.types string:string:string:string:string:string:string:string
                       escape.delim \
-                      hive.serialization.extend.nesting.levels true
+                      hive.serialization.extend.additional.nesting.levels true
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/input25.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/input25.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/input25.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/input25.q.out Tue Feb 17 06:49:27 2015
@@ -22,16 +22,16 @@ POSTHOOK: Output: default@tst
 POSTHOOK: Output: default@tst@d=2009-02-02
 PREHOOK: query: explain
 select * from (
-  select * from tst x where x.d='2009-01-01' limit 10
+  select * from (select * from tst x where x.d='2009-01-01' limit 10)a
     union all
-  select * from tst x where x.d='2009-02-02' limit 10
+  select * from (select * from tst x where x.d='2009-02-02' limit 10)b
 ) subq
 PREHOOK: type: QUERY
 POSTHOOK: query: explain
 select * from (
-  select * from tst x where x.d='2009-01-01' limit 10
+  select * from (select * from tst x where x.d='2009-01-01' limit 10)a
     union all
-  select * from tst x where x.d='2009-02-02' limit 10
+  select * from (select * from tst x where x.d='2009-02-02' limit 10)b
 ) subq
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
@@ -144,9 +144,9 @@ STAGE PLANS:
         ListSink
 
 PREHOOK: query: select * from (
-  select * from tst x where x.d='2009-01-01' limit 10
+  select * from (select * from tst x where x.d='2009-01-01' limit 10)a
     union all
-  select * from tst x where x.d='2009-02-02' limit 10
+  select * from (select * from tst x where x.d='2009-02-02' limit 10)b
 ) subq
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tst
@@ -154,9 +154,9 @@ PREHOOK: Input: default@tst@d=2009-01-01
 PREHOOK: Input: default@tst@d=2009-02-02
 #### A masked pattern was here ####
 POSTHOOK: query: select * from (
-  select * from tst x where x.d='2009-01-01' limit 10
+  select * from (select * from tst x where x.d='2009-01-01' limit 10)a
     union all
-  select * from tst x where x.d='2009-02-02' limit 10
+  select * from (select * from tst x where x.d='2009-02-02' limit 10)b
 ) subq
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tst

Modified: hive/branches/spark/ql/src/test/results/clientpositive/input26.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/input26.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/input26.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/input26.q.out Tue Feb 17 06:49:27 2015
@@ -1,15 +1,15 @@
 PREHOOK: query: explain
 select * from (
-  select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5
+  select * from (select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
     union all
-  select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5
+  select * from (select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
 )subq
 PREHOOK: type: QUERY
 POSTHOOK: query: explain
 select * from (
-  select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5
+  select * from (select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
     union all
-  select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5
+  select * from (select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
 )subq
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
@@ -113,18 +113,18 @@ STAGE PLANS:
         ListSink
 
 PREHOOK: query: select * from (
-  select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5
+  select * from (select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
     union all
-  select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5
+  select * from (select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
 )subq
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 #### A masked pattern was here ####
 POSTHOOK: query: select * from (
-  select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5
+  select * from (select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
     union all
-  select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5
+  select * from (select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
 )subq
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart

Modified: hive/branches/spark/ql/src/test/results/clientpositive/input42.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/input42.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/input42.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/input42.q.out Tue Feb 17 06:49:27 2015
@@ -1220,7 +1220,7 @@ STAGE PLANS:
                         columns _col0,_col1,_col2,_col3
                         columns.types string:string:string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1792,7 +1792,7 @@ STAGE PLANS:
                         columns _col0,_col1,_col2,_col3
                         columns.types string:string:string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/input_part7.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/input_part7.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/input_part7.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/input_part7.q.out Tue Feb 17 06:49:27 2015
@@ -19,7 +19,7 @@ ABSTRACT SYNTAX TREE:
 TOK_QUERY
    TOK_FROM
       TOK_SUBQUERY
-         TOK_UNION
+         TOK_UNIONALL
             TOK_QUERY
                TOK_FROM
                   TOK_TABREF
@@ -283,7 +283,7 @@ STAGE PLANS:
                   columns _col0,_col1,_col2,_col3
                   columns.types string:string:string:string
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/input_part9.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/input_part9.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/input_part9.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/input_part9.q.out Tue Feb 17 06:49:27 2015
@@ -71,7 +71,7 @@ STAGE PLANS:
                         columns _col0,_col1,_col2,_col3
                         columns.types string:string:string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/join34.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/join34.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/join34.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/join34.q.out Tue Feb 17 06:49:27 2015
@@ -40,7 +40,7 @@ TOK_QUERY
    TOK_FROM
       TOK_JOIN
          TOK_SUBQUERY
-            TOK_UNION
+            TOK_UNIONALL
                TOK_QUERY
                   TOK_FROM
                      TOK_TABREF

Modified: hive/branches/spark/ql/src/test/results/clientpositive/join35.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/join35.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/join35.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/join35.q.out Tue Feb 17 06:49:27 2015
@@ -40,7 +40,7 @@ TOK_QUERY
    TOK_FROM
       TOK_JOIN
          TOK_SUBQUERY
-            TOK_UNION
+            TOK_UNIONALL
                TOK_QUERY
                   TOK_FROM
                      TOK_TABREF

Modified: hive/branches/spark/ql/src/test/results/clientpositive/join_filters_overlap.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/join_filters_overlap.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/join_filters_overlap.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/join_filters_overlap.q.out Tue Feb 17 06:49:27 2015
@@ -229,7 +229,7 @@ STAGE PLANS:
                     columns _col0,_col1,_col2,_col3,_col4,_col5
                     columns.types int:int:int:int:int:int
                     escape.delim \
-                    hive.serialization.extend.nesting.levels true
+                    hive.serialization.extend.additional.nesting.levels true
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -480,7 +480,7 @@ STAGE PLANS:
                     columns _col0,_col1,_col2,_col3,_col4,_col5
                     columns.types int:int:int:int:int:int
                     escape.delim \
-                    hive.serialization.extend.nesting.levels true
+                    hive.serialization.extend.additional.nesting.levels true
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -745,7 +745,7 @@ STAGE PLANS:
                     columns _col0,_col1,_col2,_col3,_col4,_col5
                     columns.types int:int:int:int:int:int
                     escape.delim \
-                    hive.serialization.extend.nesting.levels true
+                    hive.serialization.extend.additional.nesting.levels true
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1040,7 +1040,7 @@ STAGE PLANS:
                     columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
                     columns.types int:int:int:int:int:int:int:int
                     escape.delim \
-                    hive.serialization.extend.nesting.levels true
+                    hive.serialization.extend.additional.nesting.levels true
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1329,7 +1329,7 @@ STAGE PLANS:
                     columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
                     columns.types int:int:int:int:int:int:int:int
                     escape.delim \
-                    hive.serialization.extend.nesting.levels true
+                    hive.serialization.extend.additional.nesting.levels true
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/limit_pushdown.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/limit_pushdown.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/limit_pushdown.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/limit_pushdown.q.out Tue Feb 17 06:49:27 2015
@@ -352,34 +352,34 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: alltypesorc
-            Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: cdouble (type: double)
               outputColumnNames: _col0
-              Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 keys: _col0 (type: double)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: double)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: double)
-                  Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.3
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: double)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 20
-            Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -435,22 +435,22 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: alltypesorc
-            Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctinyint (type: tinyint), cdouble (type: double)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(DISTINCT _col1)
                 keys: _col0 (type: tinyint), _col1 (type: double)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: tinyint), _col1 (type: double)
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: tinyint)
-                  Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.3
       Reduce Operator Tree:
         Group By Operator
@@ -458,13 +458,13 @@ STAGE PLANS:
           keys: KEY._col0 (type: tinyint)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 20
-            Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -522,22 +522,22 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: alltypesorc
-            Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string)
               outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(DISTINCT _col1), count(DISTINCT _col2)
                 keys: _col0 (type: tinyint), _col1 (type: string), _col2 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: tinyint), _col1 (type: string), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: tinyint)
-                  Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.3
       Reduce Operator Tree:
         Group By Operator
@@ -545,13 +545,13 @@ STAGE PLANS:
           keys: KEY._col0 (type: tinyint)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 20
-            Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out Tue Feb 17 06:49:27 2015
@@ -466,7 +466,7 @@ STAGE PLANS:
                         columns _col0,_col1
                         columns.types string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out Tue Feb 17 06:49:27 2015
@@ -338,7 +338,7 @@ STAGE PLANS:
                         columns _col0,_col1
                         columns.types string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out Tue Feb 17 06:49:27 2015
@@ -351,7 +351,7 @@ STAGE PLANS:
                         columns _col0,_col1,_col2,_col3,_col4,_col5,_col6
                         columns.types string:string:string:string:string:string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -509,7 +509,7 @@ STAGE PLANS:
                         columns _col0,_col1,_col2,_col3,_col4,_col5,_col6
                         columns.types string:string:string:string:string:string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out Tue Feb 17 06:49:27 2015
@@ -351,7 +351,7 @@ STAGE PLANS:
                         columns _col0,_col1,_col2,_col3,_col4,_col5,_col6
                         columns.types string:string:string:string:string:string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out Tue Feb 17 06:49:27 2015
@@ -309,7 +309,7 @@ STAGE PLANS:
                         columns _col0,_col1
                         columns.types string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out Tue Feb 17 06:49:27 2015
@@ -437,7 +437,7 @@ STAGE PLANS:
                         columns _col0,_col1,_col2,_col3
                         columns.types string:string:string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out Tue Feb 17 06:49:27 2015
@@ -406,7 +406,7 @@ STAGE PLANS:
                         columns _col0,_col1
                         columns.types string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out Tue Feb 17 06:49:27 2015
@@ -848,7 +848,7 @@ STAGE PLANS:
                         columns _col0,_col1,_col2,_col3
                         columns.types string:string:string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out Tue Feb 17 06:49:27 2015
@@ -481,7 +481,7 @@ STAGE PLANS:
                         columns _col0,_col1,_col2,_col3
                         columns.types string:string:string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out Tue Feb 17 06:49:27 2015
@@ -848,7 +848,7 @@ STAGE PLANS:
                         columns _col0,_col1,_col2,_col3
                         columns.types string:string:string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out Tue Feb 17 06:49:27 2015
@@ -189,7 +189,7 @@ STAGE PLANS:
                         columns _col0
                         columns.types string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -349,7 +349,7 @@ STAGE PLANS:
                         columns _col0,_col1
                         columns.types string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -502,7 +502,7 @@ STAGE PLANS:
                         columns _col0
                         columns.types string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -656,7 +656,7 @@ STAGE PLANS:
                         columns _col0,_col1
                         columns.types string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out Tue Feb 17 06:49:27 2015
@@ -187,7 +187,7 @@ STAGE PLANS:
                         columns _col0,_col1
                         columns.types string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -386,7 +386,7 @@ STAGE PLANS:
                         columns _col0
                         columns.types string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -559,7 +559,7 @@ STAGE PLANS:
                         columns _col0,_col1
                         columns.types string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out Tue Feb 17 06:49:27 2015
@@ -305,7 +305,7 @@ STAGE PLANS:
                         columns _col0,_col1,_col2,_col3
                         columns.types string:string:string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -511,7 +511,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -614,7 +614,7 @@ STAGE PLANS:
                         columns _col0,_col1,_col2,_col3
                         columns.types string:string:string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -768,7 +768,7 @@ STAGE PLANS:
                         columns _col0,_col1,_col2,_col3
                         columns.types string:string:string:string
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out?rev=1660293&r1=1660292&r2=1660293&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out Tue Feb 17 06:49:27 2015
@@ -242,7 +242,7 @@ STAGE PLANS:
                         columns _col0
                         columns.types int
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -389,7 +389,7 @@ STAGE PLANS:
                         columns _col0
                         columns.types int
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -532,7 +532,7 @@ STAGE PLANS:
                         columns _col0
                         columns.types int
                         escape.delim \
-                        hive.serialization.extend.nesting.levels true
+                        hive.serialization.extend.additional.nesting.levels true
                         serialization.format 1
                         serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe



Mime
View raw message