hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From xu...@apache.org
Subject svn commit: r1656573 [6/6] - in /hive/branches/spark: data/conf/spark/ data/conf/spark/standalone/ data/conf/spark/yarn-client/ itests/ itests/qtest-spark/ itests/src/test/resources/ itests/util/src/main/java/org/apache/hadoop/hive/ql/ ql/src/java/org/...
Date Mon, 02 Feb 2015 21:10:08 GMT
Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/quotedid_smb.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/quotedid_smb.q.out?rev=1656573&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/quotedid_smb.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/quotedid_smb.q.out Mon Feb  2 21:10:08 2015
@@ -0,0 +1,81 @@
+PREHOOK: query: create table src_b(`x+1` string, `!@#$%^&*()_q` string)  
+clustered by (`!@#$%^&*()_q`) sorted by (`!@#$%^&*()_q`) into 2 buckets
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_b
+POSTHOOK: query: create table src_b(`x+1` string, `!@#$%^&*()_q` string)  
+clustered by (`!@#$%^&*()_q`) sorted by (`!@#$%^&*()_q`) into 2 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_b
+PREHOOK: query: insert overwrite table src_b
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_b
+POSTHOOK: query: insert overwrite table src_b
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_b
+POSTHOOK: Lineage: src_b.!@#$%^&*()_q SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_b.x+1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: create table src_b2(`x+1` string, `!@#$%^&*()_q` string)  
+clustered by (`!@#$%^&*()_q`) sorted by (`!@#$%^&*()_q`) into 2 buckets
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_b2
+POSTHOOK: query: create table src_b2(`x+1` string, `!@#$%^&*()_q` string)  
+clustered by (`!@#$%^&*()_q`) sorted by (`!@#$%^&*()_q`) into 2 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_b2
+PREHOOK: query: insert overwrite table src_b2
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_b2
+POSTHOOK: query: insert overwrite table src_b2
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_b2
+POSTHOOK: Lineage: src_b2.!@#$%^&*()_q SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_b2.x+1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: select a.`x+1`, a.`!@#$%^&*()_q`, b.`x+1`, b.`!@#$%^&*()_q`
+from src_b a join src_b2 b on a.`!@#$%^&*()_q` = b.`!@#$%^&*()_q`
+where a.`x+1` < '11'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_b
+PREHOOK: Input: default@src_b2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.`x+1`, a.`!@#$%^&*()_q`, b.`x+1`, b.`!@#$%^&*()_q`
+from src_b a join src_b2 b on a.`!@#$%^&*()_q` = b.`!@#$%^&*()_q`
+where a.`x+1` < '11'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_b
+POSTHOOK: Input: default@src_b2
+#### A masked pattern was here ####
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+103	val_103	103	val_103
+103	val_103	103	val_103
+103	val_103	103	val_103
+103	val_103	103	val_103
+105	val_105	105	val_105
+10	val_10	10	val_10
+100	val_100	100	val_100
+100	val_100	100	val_100
+100	val_100	100	val_100
+100	val_100	100	val_100
+104	val_104	104	val_104
+104	val_104	104	val_104
+104	val_104	104	val_104
+104	val_104	104	val_104

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out?rev=1656573&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out Mon Feb  2 21:10:08 2015
@@ -0,0 +1,479 @@
+PREHOOK: query: CREATE TABLE bucket5_1(key string, value string) CLUSTERED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket5_1
+POSTHOOK: query: CREATE TABLE bucket5_1(key string, value string) CLUSTERED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket5_1
+PREHOOK: query: explain extended
+insert overwrite table bucket5_1
+select * from src cluster by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucket5_1
+select * from src cluster by key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            src
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_TAB
+            TOK_TABNAME
+               bucket5_1
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_ALLCOLREF
+      TOK_CLUSTERBY
+         TOK_TABLE_OR_COL
+            key
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      tag: -1
+                      value expressions: _col1 (type: string)
+                      auto parallelism: false
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE true
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src
+                    numFiles 1
+                    numRows 500
+                    rawDataSize 5312
+                    serialization.ddl struct src { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE true
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src
+                  name: default.src
+            Truncated Path -> Alias:
+              /src [src]
+        Reducer 2 
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 1
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 2
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      properties:
+                        bucket_count 2
+                        bucket_field_name key
+                        columns key,value
+                        columns.comments 
+                        columns.types string:string
+#### A masked pattern was here ####
+                        name default.bucket5_1
+                        serialization.ddl struct bucket5_1 { string key, string value}
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.bucket5_1
+                  TotalFiles: 2
+                  GatherStats: true
+                  MultiFileSpray: true
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count 2
+                bucket_field_name key
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.bucket5_1
+                serialization.ddl struct bucket5_1 { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucket5_1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucket5_1
+select * from src cluster by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket5_1
+POSTHOOK: query: insert overwrite table bucket5_1
+select * from src cluster by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket5_1
+POSTHOOK: Lineage: bucket5_1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket5_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select sum(hash(key)),sum(hash(value)) from bucket5_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket5_1
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)),sum(hash(value)) from bucket5_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket5_1
+#### A masked pattern was here ####
+21025334	36210398070
+PREHOOK: query: select sum(hash(key)),sum(hash(value)) from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)),sum(hash(value)) from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+21025334	36210398070
+PREHOOK: query: create table complex_tbl_1(aid string, bid string, t int, ctime string, etime bigint, l string, et string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@complex_tbl_1
+POSTHOOK: query: create table complex_tbl_1(aid string, bid string, t int, ctime string, etime bigint, l string, et string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@complex_tbl_1
+PREHOOK: query: create table complex_tbl_2(aet string, aes string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@complex_tbl_2
+POSTHOOK: query: create table complex_tbl_2(aet string, aes string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@complex_tbl_2
+PREHOOK: query: explain extended
+insert overwrite table complex_tbl_1 partition (ds='2010-03-29')
+select s2.* from
+(
+ select TRANSFORM (aid,bid,t,ctime,etime,l,et)
+ USING 'cat'
+ AS (aid string, bid string, t int, ctime string, etime bigint, l string, et string)
+ from
+  (
+   select transform(aet,aes)
+   using 'cat'
+   as (aid string, bid string, t int, ctime string, etime bigint, l string, et string)
+   from complex_tbl_2 where ds ='2010-03-29' cluster by bid
+)s
+)s2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table complex_tbl_1 partition (ds='2010-03-29')
+select s2.* from
+(
+ select TRANSFORM (aid,bid,t,ctime,etime,l,et)
+ USING 'cat'
+ AS (aid string, bid string, t int, ctime string, etime bigint, l string, et string)
+ from
+  (
+   select transform(aet,aes)
+   using 'cat'
+   as (aid string, bid string, t int, ctime string, etime bigint, l string, et string)
+   from complex_tbl_2 where ds ='2010-03-29' cluster by bid
+)s
+)s2
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_SUBQUERY
+         TOK_QUERY
+            TOK_FROM
+               TOK_SUBQUERY
+                  TOK_QUERY
+                     TOK_FROM
+                        TOK_TABREF
+                           TOK_TABNAME
+                              complex_tbl_2
+                     TOK_INSERT
+                        TOK_DESTINATION
+                           TOK_DIR
+                              TOK_TMP_FILE
+                        TOK_SELECT
+                           TOK_SELEXPR
+                              TOK_TRANSFORM
+                                 TOK_EXPLIST
+                                    TOK_TABLE_OR_COL
+                                       aet
+                                    TOK_TABLE_OR_COL
+                                       aes
+                                 TOK_SERDE
+                                 TOK_RECORDWRITER
+                                 'cat'
+                                 TOK_SERDE
+                                 TOK_RECORDREADER
+                                 TOK_TABCOLLIST
+                                    TOK_TABCOL
+                                       aid
+                                       TOK_STRING
+                                    TOK_TABCOL
+                                       bid
+                                       TOK_STRING
+                                    TOK_TABCOL
+                                       t
+                                       TOK_INT
+                                    TOK_TABCOL
+                                       ctime
+                                       TOK_STRING
+                                    TOK_TABCOL
+                                       etime
+                                       TOK_BIGINT
+                                    TOK_TABCOL
+                                       l
+                                       TOK_STRING
+                                    TOK_TABCOL
+                                       et
+                                       TOK_STRING
+                        TOK_WHERE
+                           =
+                              TOK_TABLE_OR_COL
+                                 ds
+                              '2010-03-29'
+                        TOK_CLUSTERBY
+                           TOK_TABLE_OR_COL
+                              bid
+                  s
+            TOK_INSERT
+               TOK_DESTINATION
+                  TOK_DIR
+                     TOK_TMP_FILE
+               TOK_SELECT
+                  TOK_SELEXPR
+                     TOK_TRANSFORM
+                        TOK_EXPLIST
+                           TOK_TABLE_OR_COL
+                              aid
+                           TOK_TABLE_OR_COL
+                              bid
+                           TOK_TABLE_OR_COL
+                              t
+                           TOK_TABLE_OR_COL
+                              ctime
+                           TOK_TABLE_OR_COL
+                              etime
+                           TOK_TABLE_OR_COL
+                              l
+                           TOK_TABLE_OR_COL
+                              et
+                        TOK_SERDE
+                        TOK_RECORDWRITER
+                        'cat'
+                        TOK_SERDE
+                        TOK_RECORDREADER
+                        TOK_TABCOLLIST
+                           TOK_TABCOL
+                              aid
+                              TOK_STRING
+                           TOK_TABCOL
+                              bid
+                              TOK_STRING
+                           TOK_TABCOL
+                              t
+                              TOK_INT
+                           TOK_TABCOL
+                              ctime
+                              TOK_STRING
+                           TOK_TABCOL
+                              etime
+                              TOK_BIGINT
+                           TOK_TABCOL
+                              l
+                              TOK_STRING
+                           TOK_TABCOL
+                              et
+                              TOK_STRING
+         s2
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_TAB
+            TOK_TABNAME
+               complex_tbl_1
+            TOK_PARTSPEC
+               TOK_PARTVAL
+                  ds
+                  '2010-03-29'
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_ALLCOLREF
+               TOK_TABNAME
+                  s2
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+        Reducer 2 
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: int), VALUE._col3 (type: string), VALUE._col4 (type: bigint), VALUE._col5 (type: string), VALUE._col6 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Transform Operator
+                  command: cat
+                  output info:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      properties:
+                        columns _col0,_col1,_col2,_col3,_col4,_col5,_col6
+                        columns.types string,string,int,string,bigint,string,string
+                        field.delim 9
+                        serialization.format 9
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: bigint), _col5 (type: string), _col6 (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      GlobalTableId: 1
+#### A masked pattern was here ####
+                      NumFilesPerFileSink: 1
+                      Static Partition Specification: ds=2010-03-29/
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+#### A masked pattern was here ####
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          properties:
+                            bucket_count -1
+                            columns aid,bid,t,ctime,etime,l,et
+                            columns.comments 
+                            columns.types string:string:int:string:bigint:string:string
+#### A masked pattern was here ####
+                            name default.complex_tbl_1
+                            partition_columns ds
+                            partition_columns.types string
+                            serialization.ddl struct complex_tbl_1 { string aid, string bid, i32 t, string ctime, i64 etime, string l, string et}
+                            serialization.format 1
+                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.complex_tbl_1
+                      TotalFiles: 1
+                      GatherStats: true
+                      MultiFileSpray: false
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2010-03-29
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns aid,bid,t,ctime,etime,l,et
+                columns.comments 
+                columns.types string:string:int:string:bigint:string:string
+#### A masked pattern was here ####
+                name default.complex_tbl_1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct complex_tbl_1 { string aid, string bid, i32 t, string ctime, i64 etime, string l, string et}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.complex_tbl_1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/remote_script.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/remote_script.q.out?rev=1656573&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/remote_script.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/remote_script.q.out Mon Feb  2 21:10:08 2015
@@ -0,0 +1,49 @@
+PREHOOK: query: create table tmp_tmp(key string, value string) stored as rcfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tmp_tmp
+POSTHOOK: query: create table tmp_tmp(key string, value string) stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tmp_tmp
+PREHOOK: query: insert overwrite table tmp_tmp
+SELECT TRANSFORM(key, value) USING
+'python newline.py' AS key, value FROM src limit 6
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tmp_tmp
+POSTHOOK: query: insert overwrite table tmp_tmp
+SELECT TRANSFORM(key, value) USING
+'python newline.py' AS key, value FROM src limit 6
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tmp_tmp
+POSTHOOK: Lineage: tmp_tmp.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: tmp_tmp.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from tmp_tmp ORDER BY key ASC, value ASC
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmp_tmp
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tmp_tmp ORDER BY key ASC, value ASC
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmp_tmp
+#### A masked pattern was here ####
+1	2	NULL
+1	2	NULL
+1	NULL
+2	NULL
+1	NULL
+2	NULL
+1	NULL
+2	NULL
+1	NULL
+2	NULL
+#### A masked pattern was here ####
+PREHOOK: query: drop table tmp_tmp
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@tmp_tmp
+PREHOOK: Output: default@tmp_tmp
+POSTHOOK: query: drop table tmp_tmp
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@tmp_tmp
+POSTHOOK: Output: default@tmp_tmp

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/root_dir_external_table.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/root_dir_external_table.q.out?rev=1656573&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/root_dir_external_table.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/root_dir_external_table.q.out Mon Feb  2 21:10:08 2015
@@ -0,0 +1,26 @@
+#### A masked pattern was here ####
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+PREHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+PREHOOK: Output: database:default
+PREHOOK: Output: default@roottable
+#### A masked pattern was here ####
+POSTHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@roottable
+PREHOOK: query: select count(*) from roottable
+PREHOOK: type: QUERY
+PREHOOK: Input: default@roottable
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from roottable
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@roottable
+#### A masked pattern was here ####
+20
+#### A masked pattern was here ####

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/schemeAuthority.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/schemeAuthority.q.out?rev=1656573&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/schemeAuthority.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/schemeAuthority.q.out Mon Feb  2 21:10:08 2015
@@ -0,0 +1,84 @@
+PREHOOK: query: create external table dynPart (key string) partitioned by (value string) row format delimited fields terminated by '\\t' stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dynPart
+POSTHOOK: query: create external table dynPart (key string) partitioned by (value string) row format delimited fields terminated by '\\t' stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dynPart
+#### A masked pattern was here ####
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+PREHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+PREHOOK: Output: default@dynpart
+#### A masked pattern was here ####
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+POSTHOOK: Output: default@dynpart
+POSTHOOK: Output: default@dynpart@value=0
+#### A masked pattern was here ####
+PREHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+PREHOOK: Output: default@dynpart
+#### A masked pattern was here ####
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+POSTHOOK: Output: default@dynpart
+POSTHOOK: Output: default@dynpart@value=1
+PREHOOK: query: select count(*) from dynPart
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dynpart
+PREHOOK: Input: default@dynpart@value=0
+PREHOOK: Input: default@dynpart@value=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from dynPart
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dynpart
+POSTHOOK: Input: default@dynpart@value=0
+POSTHOOK: Input: default@dynpart@value=1
+#### A masked pattern was here ####
+2
+PREHOOK: query: select key from dynPart
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dynpart
+PREHOOK: Input: default@dynpart@value=0
+PREHOOK: Input: default@dynpart@value=1
+#### A masked pattern was here ####
+POSTHOOK: query: select key from dynPart
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dynpart
+POSTHOOK: Input: default@dynpart@value=0
+POSTHOOK: Input: default@dynpart@value=1
+#### A masked pattern was here ####
+10
+20
+PREHOOK: query: select key from src where (key = 10) order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key from src where (key = 10) order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+10
+PREHOOK: query: select key from src where (key = 20) order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key from src where (key = 20) order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+20
+#### A masked pattern was here ####

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/schemeAuthority2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/schemeAuthority2.q.out?rev=1656573&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/schemeAuthority2.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/schemeAuthority2.q.out Mon Feb  2 21:10:08 2015
@@ -0,0 +1,53 @@
+PREHOOK: query: create external table dynPart (key string) partitioned by (value string, value2 string) row format delimited fields terminated by '\\t' stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dynPart
+POSTHOOK: query: create external table dynPart (key string) partitioned by (value string, value2 string) row format delimited fields terminated by '\\t' stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dynPart
+#### A masked pattern was here ####
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+PREHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+PREHOOK: Output: default@dynpart
+#### A masked pattern was here ####
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+POSTHOOK: Output: default@dynpart
+POSTHOOK: Output: default@dynpart@value=0/value2=clusterA
+#### A masked pattern was here ####
+PREHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+PREHOOK: Output: default@dynpart
+#### A masked pattern was here ####
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+POSTHOOK: Output: default@dynpart
+POSTHOOK: Output: default@dynpart@value=0/value2=clusterB
+PREHOOK: query: select value2, key from dynPart where value='0'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dynpart
+PREHOOK: Input: default@dynpart@value=0/value2=clusterA
+PREHOOK: Input: default@dynpart@value=0/value2=clusterB
+#### A masked pattern was here ####
+POSTHOOK: query: select value2, key from dynPart where value='0'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dynpart
+POSTHOOK: Input: default@dynpart@value=0/value2=clusterA
+POSTHOOK: Input: default@dynpart@value=0/value2=clusterB
+#### A masked pattern was here ####
+clusterA	10
+clusterB	20
+#### A masked pattern was here ####

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/temp_table_external.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/temp_table_external.q.out?rev=1656573&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/temp_table_external.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/temp_table_external.q.out Mon Feb  2 21:10:08 2015
@@ -0,0 +1,34 @@
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+PREHOOK: Output: database:default
+PREHOOK: Output: default@temp_table_external
+#### A masked pattern was here ####
+POSTHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@temp_table_external
+PREHOOK: query: select * from temp_table_external
+PREHOOK: type: QUERY
+PREHOOK: Input: default@temp_table_external
+#### A masked pattern was here ####
+POSTHOOK: query: select * from temp_table_external
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@temp_table_external
+#### A masked pattern was here ####
+NULL	35
+48	NULL
+100	100
+PREHOOK: query: -- Even after we drop the table, the data directory should still be there
+drop table temp_table_external
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@temp_table_external
+PREHOOK: Output: default@temp_table_external
+POSTHOOK: query: -- Even after we drop the table, the data directory should still be there
+drop table temp_table_external
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@temp_table_external
+POSTHOOK: Output: default@temp_table_external
+Found 1 items
+#### A masked pattern was here ####

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/truncate_column_buckets.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/truncate_column_buckets.q.out?rev=1656573&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/truncate_column_buckets.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/truncate_column_buckets.q.out Mon Feb  2 21:10:08 2015
@@ -0,0 +1,70 @@
+PREHOOK: query: -- Tests truncating columns from a bucketed table, table should remain bucketed
+
+CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_tab
+POSTHOOK: query: -- Tests truncating columns from a bucketed table, table should remain bucketed
+
+CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_tab
+PREHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_tab
+POSTHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_tab
+POSTHOOK: Lineage: test_tab.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_tab.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- Check how many rows there are in each bucket, there should be two rows
+SELECT cnt FROM (
+SELECT INPUT__FILE__NAME file_name, count(*) cnt FROM 
+test_tab GROUP BY INPUT__FILE__NAME
+ORDER BY file_name DESC)a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_tab
+#### A masked pattern was here ####
+POSTHOOK: query: -- Check how many rows there are in each bucket, there should be two rows
+SELECT cnt FROM (
+SELECT INPUT__FILE__NAME file_name, count(*) cnt FROM 
+test_tab GROUP BY INPUT__FILE__NAME
+ORDER BY file_name DESC)a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_tab
+#### A masked pattern was here ####
+258
+242
+PREHOOK: query: -- Truncate a column on which the table is not bucketed
+TRUNCATE TABLE test_tab COLUMNS (value)
+PREHOOK: type: TRUNCATETABLE
+PREHOOK: Input: default@test_tab
+PREHOOK: Output: default@test_tab
+POSTHOOK: query: -- Truncate a column on which the table is not bucketed
+TRUNCATE TABLE test_tab COLUMNS (value)
+POSTHOOK: type: TRUNCATETABLE
+POSTHOOK: Input: default@test_tab
+POSTHOOK: Output: default@test_tab
+PREHOOK: query: -- Check how many rows there are in each bucket, this should produce the same rows as before
+-- because truncate should not break bucketing
+SELECT cnt FROM (
+SELECT INPUT__FILE__NAME file_name, count(*) cnt FROM 
+test_tab GROUP BY INPUT__FILE__NAME
+ORDER BY file_name DESC)a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_tab
+#### A masked pattern was here ####
+POSTHOOK: query: -- Check how many rows there are in each bucket, this should produce the same rows as before
+-- because truncate should not break bucketing
+SELECT cnt FROM (
+SELECT INPUT__FILE__NAME file_name, count(*) cnt FROM 
+test_tab GROUP BY INPUT__FILE__NAME
+ORDER BY file_name DESC)a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_tab
+#### A masked pattern was here ####
+258
+242

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/uber_reduce.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/uber_reduce.q.out?rev=1656573&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/uber_reduce.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/uber_reduce.q.out Mon Feb  2 21:10:08 2015
@@ -0,0 +1,31 @@
+PREHOOK: query: -- Uberized mode is a YARN option, ignore this test for non-YARN Hadoop versions
+-- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
+
+CREATE TABLE T1(key STRING, val STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T1
+POSTHOOK: query: -- Uberized mode is a YARN option, ignore this test for non-YARN Hadoop versions
+-- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
+
+CREATE TABLE T1(key STRING, val STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@t1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@t1
+PREHOOK: query: SELECT count(*) FROM T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT count(*) FROM T1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+6

Modified: hive/branches/spark/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java?rev=1656573&r1=1656572&r2=1656573&view=diff
==============================================================================
--- hive/branches/spark/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java (original)
+++ hive/branches/spark/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java Mon Feb  2 21:10:08 2015
@@ -232,6 +232,12 @@ public class Hadoop20SShims extends Hado
     throw new IOException("Cannot run tez on current hadoop, Version: " + VersionInfo.getVersion());
   }
 
+  @Override
+  public MiniMrShim getMiniSparkCluster(Configuration conf, int numberOfTaskTrackers,
+    String nameNode, int numDir) throws IOException {
+    throw new IOException("Cannot run Spark on YARN on current Hadoop, Version: " + VersionInfo.getVersion());
+  }
+
   /**
    * Shim for MiniMrCluster
    */

Modified: hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java?rev=1656573&r1=1656572&r2=1656573&view=diff
==============================================================================
--- hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java (original)
+++ hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java Mon Feb  2 21:10:08 2015
@@ -415,6 +415,73 @@ public class Hadoop23Shims extends Hadoo
     }
   }
 
+  /**
+   * Returns a shim to wrap MiniSparkOnYARNCluster
+   */
+  @Override
+  public MiniMrShim getMiniSparkCluster(Configuration conf, int numberOfTaskTrackers,
+    String nameNode, int numDir) throws IOException {
+    return new MiniSparkShim(conf, numberOfTaskTrackers, nameNode, numDir);
+  }
+
+  /**
+   * Shim for MiniSparkOnYARNCluster
+   */
+  public class MiniSparkShim extends Hadoop23Shims.MiniMrShim {
+
+    private final MiniSparkOnYARNCluster mr;
+    private final Configuration conf;
+
+    public MiniSparkShim(Configuration conf, int numberOfTaskTrackers,
+      String nameNode, int numDir) throws IOException {
+
+      mr = new MiniSparkOnYARNCluster("sparkOnYarn");
+      conf.set("fs.defaultFS", nameNode);
+      mr.init(conf);
+      mr.start();
+      this.conf = mr.getConfig();
+    }
+
+    @Override
+    public int getJobTrackerPort() throws UnsupportedOperationException {
+      String address = conf.get("yarn.resourcemanager.address");
+      address = StringUtils.substringAfterLast(address, ":");
+
+      if (StringUtils.isBlank(address)) {
+        throw new IllegalArgumentException("Invalid YARN resource manager port.");
+      }
+
+      return Integer.parseInt(address);
+    }
+
+    @Override
+    public void shutdown() throws IOException {
+      mr.stop();
+    }
+
+    @Override
+    public void setupConfiguration(Configuration conf) {
+      Configuration config = mr.getConfig();
+      for (Map.Entry<String, String> pair : config) {
+        conf.set(pair.getKey(), pair.getValue());
+      }
+
+      Path jarPath = new Path("hdfs:///user/hive");
+      Path hdfsPath = new Path("hdfs:///user/");
+      try {
+        FileSystem fs = cluster.getFileSystem();
+        jarPath = fs.makeQualified(jarPath);
+        conf.set("hive.jar.directory", jarPath.toString());
+        fs.mkdirs(jarPath);
+        hdfsPath = fs.makeQualified(hdfsPath);
+        conf.set("hive.user.install.directory", hdfsPath.toString());
+        fs.mkdirs(hdfsPath);
+      } catch (Exception e) {
+        e.printStackTrace();
+      }
+    }
+  }
+
   // Don't move this code to the parent class. There's a binary
   // incompatibility between hadoop 1 and 2 wrt MiniDFSCluster and we
   // need to have two different shim classes even though they are

Added: hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/MiniSparkOnYARNCluster.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/MiniSparkOnYARNCluster.java?rev=1656573&view=auto
==============================================================================
--- hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/MiniSparkOnYARNCluster.java (added)
+++ hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/MiniSparkOnYARNCluster.java Mon Feb  2 21:10:08 2015
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.shims;
+
+import org.apache.hadoop.yarn.server.MiniYARNCluster;
+
+/**
+ * This should be a very thin wrapper of MiniYARNCluster, which helps to build a embedded YARN
+ * cluster for Spark.
+ */
+public class MiniSparkOnYARNCluster extends MiniYARNCluster {
+
+  public MiniSparkOnYARNCluster(String testName) {
+    this(testName, 1, 1);
+  }
+
+  public MiniSparkOnYARNCluster(String testName, int numResourceManagers, int numNodeManagers) {
+    this(testName, numResourceManagers, numNodeManagers, 1, 1);
+  }
+
+  public MiniSparkOnYARNCluster(String testName, int numResourceManagers,
+    int numNodeManagers, int numLocalDirs, int numLogDirs) {
+    super(testName, numResourceManagers, numNodeManagers, numLocalDirs, numLogDirs);
+  }
+}
\ No newline at end of file

Modified: hive/branches/spark/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java?rev=1656573&r1=1656572&r2=1656573&view=diff
==============================================================================
--- hive/branches/spark/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java (original)
+++ hive/branches/spark/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java Mon Feb  2 21:10:08 2015
@@ -93,7 +93,10 @@ public interface HadoopShims {
       String nameNode, int numDir) throws IOException;
 
   public MiniMrShim getMiniTezCluster(Configuration conf, int numberOfTaskTrackers,
-                                     String nameNode, int numDir) throws IOException;
+      String nameNode, int numDir) throws IOException;
+
+  public MiniMrShim getMiniSparkCluster(Configuration conf, int numberOfTaskTrackers,
+      String nameNode, int numDir) throws IOException;
 
   /**
    * Shim for MiniMrCluster

Modified: hive/branches/spark/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java?rev=1656573&r1=1656572&r2=1656573&view=diff
==============================================================================
--- hive/branches/spark/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java (original)
+++ hive/branches/spark/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java Mon Feb  2 21:10:08 2015
@@ -68,6 +68,7 @@ class SparkClientImpl implements SparkCl
   private static final String DRIVER_OPTS_KEY = "spark.driver.extraJavaOptions";
   private static final String EXECUTOR_OPTS_KEY = "spark.executor.extraJavaOptions";
   private static final String DRIVER_EXTRA_CLASSPATH = "spark.driver.extraClassPath";
+  private static final String EXECUTOR_EXTRA_CLASSPATH = "spark.executor.extraClassPath";
 
   private final Map<String, String> conf;
   private final HiveConf hiveConf;
@@ -252,14 +253,25 @@ class SparkClientImpl implements SparkCl
       allProps.put(DRIVER_OPTS_KEY, driverJavaOpts);
       allProps.put(EXECUTOR_OPTS_KEY, executorJavaOpts);
 
-      String hiveHadoopTestClasspath = Strings.nullToEmpty(System.getenv("HIVE_HADOOP_TEST_CLASSPATH"));
-      if (!hiveHadoopTestClasspath.isEmpty()) {
-        String extraClasspath = Strings.nullToEmpty((String)allProps.get(DRIVER_EXTRA_CLASSPATH));
-        if (extraClasspath.isEmpty()) {
-          allProps.put(DRIVER_EXTRA_CLASSPATH, hiveHadoopTestClasspath);
-        } else {
-          extraClasspath = extraClasspath.endsWith(File.pathSeparator) ? extraClasspath : extraClasspath + File.pathSeparator;
-          allProps.put(DRIVER_EXTRA_CLASSPATH, extraClasspath + hiveHadoopTestClasspath);
+      String isTesting = conf.get("spark.testing");
+      if (isTesting != null && isTesting.equalsIgnoreCase("true")) {
+        String hiveHadoopTestClasspath = Strings.nullToEmpty(System.getenv("HIVE_HADOOP_TEST_CLASSPATH"));
+        if (!hiveHadoopTestClasspath.isEmpty()) {
+          String extraDriverClasspath = Strings.nullToEmpty((String)allProps.get(DRIVER_EXTRA_CLASSPATH));
+          if (extraDriverClasspath.isEmpty()) {
+            allProps.put(DRIVER_EXTRA_CLASSPATH, hiveHadoopTestClasspath);
+          } else {
+            extraDriverClasspath = extraDriverClasspath.endsWith(File.pathSeparator) ? extraDriverClasspath : extraDriverClasspath + File.pathSeparator;
+            allProps.put(DRIVER_EXTRA_CLASSPATH, extraDriverClasspath + hiveHadoopTestClasspath);
+          }
+
+          String extraExecutorClasspath = Strings.nullToEmpty((String)allProps.get(EXECUTOR_EXTRA_CLASSPATH));
+          if (extraExecutorClasspath.isEmpty()) {
+            allProps.put(EXECUTOR_EXTRA_CLASSPATH, hiveHadoopTestClasspath);
+          } else {
+            extraExecutorClasspath = extraExecutorClasspath.endsWith(File.pathSeparator) ? extraExecutorClasspath : extraExecutorClasspath + File.pathSeparator;
+            allProps.put(EXECUTOR_EXTRA_CLASSPATH, extraExecutorClasspath + hiveHadoopTestClasspath);
+          }
         }
       }
 
@@ -362,6 +374,9 @@ class SparkClientImpl implements SparkCl
       LOG.debug("Running client driver with argv: {}", Joiner.on(" ").join(argv));
 
       ProcessBuilder pb = new ProcessBuilder(argv.toArray(new String[argv.size()]));
+      if (isTesting != null) {
+        pb.environment().put("SPARK_TESTING", isTesting);
+      }
       final Process child = pb.start();
 
       int childId = childIdGenerator.incrementAndGet();



Mime
View raw message