hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gunt...@apache.org
Subject svn commit: r1551819 [1/4] - in /hive/branches/tez: itests/qtest/ ql/src/test/results/clientpositive/tez/
Date Wed, 18 Dec 2013 03:14:10 GMT
Author: gunther
Date: Wed Dec 18 03:14:09 2013
New Revision: 1551819

URL: http://svn.apache.org/r1551819
Log:
HIVE-5065 (part 3): Create proper (i.e.: non .q file based) junit tests for DagUtils and TezTask (Gunther Hagleitner)

Added:
    hive/branches/tez/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/enforce_order.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/fileformat_mix.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/filter_join_breaktask2.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/groupby1.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/groupby2.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/groupby3.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/having.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/insert1.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/insert_into1.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/insert_into2.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/leftsemijoin.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
Modified:
    hive/branches/tez/itests/qtest/pom.xml
    hive/branches/tez/ql/src/test/results/clientpositive/tez/ctas.q.out

Modified: hive/branches/tez/itests/qtest/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/itests/qtest/pom.xml?rev=1551819&r1=1551818&r2=1551819&view=diff
==============================================================================
--- hive/branches/tez/itests/qtest/pom.xml (original)
+++ hive/branches/tez/itests/qtest/pom.xml Wed Dec 18 03:14:09 2013
@@ -39,7 +39,7 @@
     <minimr.query.files>stats_counter_partitioned.q,list_bucket_dml_10.q,input16_cc.q,scriptfile1.q,scriptfile1_win.q,bucket4.q,bucketmapjoin6.q,disable_merge_for_bucketing.q,reduce_deduplicate.q,smb_mapjoin_8.q,join1.q,groupby2.q,bucketizedhiveinputformat.q,bucketmapjoin7.q,optrstat_groupby.q,bucket_num_reducers.q,bucket5.q,load_fs2.q,bucket_num_reducers2.q,infer_bucket_sort_merge.q,infer_bucket_sort_reducers_power_two.q,infer_bucket_sort_dyn_part.q,infer_bucket_sort_bucketed_table.q,infer_bucket_sort_map_operators.q,infer_bucket_sort_num_buckets.q,leftsemijoin_mr.q,schemeAuthority.q,schemeAuthority2.q,truncate_column_buckets.q,remote_script.q,,load_hdfs_file_with_space_in_the_name.q,parallel_orderby.q,import_exported_table.q,stats_counter.q</minimr.query.files>
     <minimr.query.negative.files>cluster_tasklog_retrieval.q,minimr_broken_pipe.q,mapreduce_stack_trace.q,mapreduce_stack_trace_turnoff.q,mapreduce_stack_trace_hadoop20.q,mapreduce_stack_trace_turnoff_hadoop20.q</minimr.query.negative.files>
     <minitez.query.files>tez_join_tests.q,tez_joins_explain.q,mrr.q,tez_dml.q,tez_insert_overwrite_local_directory_1.q</minitez.query.files>
-    <minitez.query.files.shared>join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q</minitez.query.files.shared>
+    <minitez.query.files.shared>join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q,disable_merge_for_bucketing.q,enforce_order.q,fileformat_mix.q,filter_join_breaktask.q,filter_join_breaktask2.q,groupby1.q,groupby2.q,groupby3.q,having.q,insert1.q,insert_into1.q,insert_into2.q,leftsemijoin.q,limit_pushdown.q</minitez.query.files.shared>
     <beeline.positive.exclude>add_part_exist.q,alter1.q,alter2.q,alter4.q,alter5.q,alter_rename_partition.q,alter_rename_partition_authorization.q,archive.q,archive_corrupt.q,archive_multi.q,archive_mr_1806.q,archive_multi_mr_1806.q,authorization_1.q,authorization_2.q,authorization_4.q,authorization_5.q,authorization_6.q,authorization_7.q,ba_table1.q,ba_table2.q,ba_table3.q,ba_table_udfs.q,binary_table_bincolserde.q,binary_table_colserde.q,cluster.q,columnarserde_create_shortcut.q,combine2.q,constant_prop.q,create_nested_type.q,create_or_replace_view.q,create_struct_table.q,create_union_table.q,database.q,database_location.q,database_properties.q,ddltime.q,describe_database_json.q,drop_database_removes_partition_dirs.q,escape1.q,escape2.q,exim_00_nonpart_empty.q,exim_01_nonpart.q,exim_02_00_part_empty.q,exim_02_part.q,exim_03_nonpart_over_compat.q,exim_04_all_part.q,exim_04_evolved_parts.q,exim_05_some_part.q,exim_06_one_part.q,exim_07_all_part_over_nonoverlap.q,exim_08_nonpart_rena
 me.q,exim_09_part_spec_nonoverlap.q,exim_10_external_managed.q,exim_11_managed_external.q,exim_12_external_location.q,exim_13_managed_location.q,exim_14_managed_location_over_existing.q,exim_15_external_part.q,exim_16_part_external.q,exim_17_part_managed.q,exim_18_part_external.q,exim_19_00_part_external_location.q,exim_19_part_external_location.q,exim_20_part_managed_location.q,exim_21_export_authsuccess.q,exim_22_import_exist_authsuccess.q,exim_23_import_part_authsuccess.q,exim_24_import_nonexist_authsuccess.q,global_limit.q,groupby_complex_types.q,groupby_complex_types_multi_single_reducer.q,index_auth.q,index_auto.q,index_auto_empty.q,index_bitmap.q,index_bitmap1.q,index_bitmap2.q,index_bitmap3.q,index_bitmap_auto.q,index_bitmap_rc.q,index_compact.q,index_compact_1.q,index_compact_2.q,index_compact_3.q,index_stale_partitioned.q,init_file.q,input16.q,input16_cc.q,input46.q,input_columnarserde.q,input_dynamicserde.q,input_lazyserde.q,input_testxpath3.q,input_testxpath4.q,insert2_o
 verwrite_partitions.q,insertexternal1.q,join_thrift.q,lateral_view.q,load_binary_data.q,load_exist_part_authsuccess.q,load_nonpart_authsuccess.q,load_part_authsuccess.q,loadpart_err.q,lock1.q,lock2.q,lock3.q,lock4.q,merge_dynamic_partition.q,multi_insert.q,multi_insert_move_tasks_share_dependencies.q,null_column.q,ppd_clusterby.q,query_with_semi.q,rename_column.q,sample6.q,sample_islocalmode_hook.q,set_processor_namespaces.q,show_tables.q,source.q,split_sample.q,str_to_map.q,transform1.q,udaf_collect_set.q,udaf_context_ngrams.q,udaf_histogram_numeric.q,udaf_ngrams.q,udaf_percentile_approx.q,udf_array.q,udf_bitmap_and.q,udf_bitmap_or.q,udf_explode.q,udf_format_number.q,udf_map.q,udf_map_keys.q,udf_map_values.q,udf_max.q,udf_min.q,udf_named_struct.q,udf_percentile.q,udf_printf.q,udf_sentences.q,udf_sort_array.q,udf_split.q,udf_struct.q,udf_substr.q,udf_translate.q,udf_union.q,udf_xpath.q,udtf_stack.q,view.q,virtual_column.q</beeline.positive.exclude>
   </properties>
 

Modified: hive/branches/tez/ql/src/test/results/clientpositive/tez/ctas.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/tez/ctas.q.out?rev=1551819&r1=1551818&r2=1551819&view=diff
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/tez/ctas.q.out (original)
+++ hive/branches/tez/ql/src/test/results/clientpositive/tez/ctas.q.out Wed Dec 18 03:14:09 2013
@@ -226,7 +226,7 @@ STAGE DEPENDENCIES:
   Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
   Stage-5
   Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
-  Stage-9 depends on stages: Stage-2, Stage-0
+  Stage-9 depends on stages: Stage-0, Stage-2
   Stage-3 depends on stages: Stage-9
   Stage-0 depends on stages: Stage-5, Stage-4, Stage-7
   Stage-4
@@ -669,7 +669,7 @@ STAGE DEPENDENCIES:
   Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
   Stage-5
   Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
-  Stage-9 depends on stages: Stage-0, Stage-2
+  Stage-9 depends on stages: Stage-2, Stage-0
   Stage-3 depends on stages: Stage-9
   Stage-0 depends on stages: Stage-5, Stage-4, Stage-7
   Stage-4

Added: hive/branches/tez/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out?rev=1551819&view=auto
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out (added)
+++ hive/branches/tez/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out Wed Dec 18 03:14:09 2013
@@ -0,0 +1,499 @@
+PREHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@bucket2_1
+PREHOOK: query: explain extended
+insert overwrite table bucket2_1
+select * from src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucket2_1
+select * from src
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucket2_1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            Statistics:
+                numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE
+            GatherStats: false
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Statistics:
+                  numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE
+              Reduce Output Operator
+                sort order: 
+                Map-reduce partition columns:
+                      expr: UDFToInteger(_col0)
+                      type: int
+                Statistics:
+                    numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE
+                tag: -1
+                value expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 0
+              rawDataSize 0
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE true
+                bucket_count -1
+                columns key,value
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+      Truncated Path -> Alias:
+        /src [src]
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Extract
+          Statistics:
+              numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE
+          Select Operator
+            expressions:
+                  expr: UDFToInteger(_col0)
+                  type: int
+                  expr: _col1
+                  type: string
+            outputColumnNames: _col0, _col1
+            Statistics:
+                numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 1
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 2
+              Statistics:
+                  numRows: 29 dataSize: 5812 basicStatsState: COMPLETE colStatsState: NONE
+#### A masked pattern was here ####
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.bucket2_1
+                    serialization.ddl struct bucket2_1 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.bucket2_1
+              TotalFiles: 2
+              GatherStats: true
+              MultiFileSpray: true
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count 2
+                bucket_field_name key
+                columns key,value
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.bucket2_1
+                serialization.ddl struct bucket2_1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucket2_1
+#### A masked pattern was here ####
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucket2_1
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket2_1
+POSTHOOK: query: insert overwrite table bucket2_1
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket2_1
+POSTHOOK: Lineage: bucket2_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket2_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Lineage: bucket2_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket2_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME bucket2_1) (TOK_TABLEBUCKETSAMPLE 1 2) s)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Alias -> Map Operator Tree:
+        s 
+          TableScan
+            alias: s
+            Filter Operator
+              predicate:
+                  expr: (((hash(key) & 2147483647) % 2) = 0)
+                  type: boolean
+              Select Operator
+                expressions:
+                      expr: key
+                      type: int
+                      expr: value
+                      type: string
+                outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions:
+                        expr: _col0
+                        type: int
+                  sort order: +
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: int
+                        expr: _col1
+                        type: string
+      Reduce Operator Tree:
+        Extract
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+PREHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket2_1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket2_1
+#### A masked pattern was here ####
+POSTHOOK: Lineage: bucket2_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket2_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+0	val_0
+0	val_0
+0	val_0
+2	val_2
+4	val_4
+8	val_8
+10	val_10
+12	val_12
+12	val_12
+18	val_18
+18	val_18
+20	val_20
+24	val_24
+24	val_24
+26	val_26
+26	val_26
+28	val_28
+30	val_30
+34	val_34
+42	val_42
+42	val_42
+44	val_44
+54	val_54
+58	val_58
+58	val_58
+64	val_64
+66	val_66
+70	val_70
+70	val_70
+70	val_70
+72	val_72
+72	val_72
+74	val_74
+76	val_76
+76	val_76
+78	val_78
+80	val_80
+82	val_82
+84	val_84
+84	val_84
+86	val_86
+90	val_90
+90	val_90
+90	val_90
+92	val_92
+96	val_96
+98	val_98
+98	val_98
+100	val_100
+100	val_100
+104	val_104
+104	val_104
+114	val_114
+116	val_116
+118	val_118
+118	val_118
+120	val_120
+120	val_120
+126	val_126
+128	val_128
+128	val_128
+128	val_128
+134	val_134
+134	val_134
+136	val_136
+138	val_138
+138	val_138
+138	val_138
+138	val_138
+146	val_146
+146	val_146
+150	val_150
+152	val_152
+152	val_152
+156	val_156
+158	val_158
+160	val_160
+162	val_162
+164	val_164
+164	val_164
+166	val_166
+168	val_168
+170	val_170
+172	val_172
+172	val_172
+174	val_174
+174	val_174
+176	val_176
+176	val_176
+178	val_178
+180	val_180
+186	val_186
+190	val_190
+192	val_192
+194	val_194
+196	val_196
+200	val_200
+200	val_200
+202	val_202
+208	val_208
+208	val_208
+208	val_208
+214	val_214
+216	val_216
+216	val_216
+218	val_218
+222	val_222
+224	val_224
+224	val_224
+226	val_226
+228	val_228
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+238	val_238
+238	val_238
+242	val_242
+242	val_242
+244	val_244
+248	val_248
+252	val_252
+256	val_256
+256	val_256
+258	val_258
+260	val_260
+262	val_262
+266	val_266
+272	val_272
+272	val_272
+274	val_274
+278	val_278
+278	val_278
+280	val_280
+280	val_280
+282	val_282
+282	val_282
+284	val_284
+286	val_286
+288	val_288
+288	val_288
+292	val_292
+296	val_296
+298	val_298
+298	val_298
+298	val_298
+302	val_302
+306	val_306
+308	val_308
+310	val_310
+316	val_316
+316	val_316
+316	val_316
+318	val_318
+318	val_318
+318	val_318
+322	val_322
+322	val_322
+332	val_332
+336	val_336
+338	val_338
+342	val_342
+342	val_342
+344	val_344
+344	val_344
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+356	val_356
+360	val_360
+362	val_362
+364	val_364
+366	val_366
+368	val_368
+374	val_374
+378	val_378
+382	val_382
+382	val_382
+384	val_384
+384	val_384
+384	val_384
+386	val_386
+392	val_392
+394	val_394
+396	val_396
+396	val_396
+396	val_396
+400	val_400
+402	val_402
+404	val_404
+404	val_404
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+414	val_414
+414	val_414
+418	val_418
+424	val_424
+424	val_424
+430	val_430
+430	val_430
+430	val_430
+432	val_432
+436	val_436
+438	val_438
+438	val_438
+438	val_438
+444	val_444
+446	val_446
+448	val_448
+452	val_452
+454	val_454
+454	val_454
+454	val_454
+458	val_458
+458	val_458
+460	val_460
+462	val_462
+462	val_462
+466	val_466
+466	val_466
+466	val_466
+468	val_468
+468	val_468
+468	val_468
+468	val_468
+470	val_470
+472	val_472
+478	val_478
+478	val_478
+480	val_480
+480	val_480
+480	val_480
+482	val_482
+484	val_484
+490	val_490
+492	val_492
+492	val_492
+494	val_494
+496	val_496
+498	val_498
+498	val_498
+498	val_498

Added: hive/branches/tez/ql/src/test/results/clientpositive/tez/enforce_order.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/tez/enforce_order.q.out?rev=1551819&view=auto
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/tez/enforce_order.q.out (added)
+++ hive/branches/tez/ql/src/test/results/clientpositive/tez/enforce_order.q.out Wed Dec 18 03:14:09 2013
@@ -0,0 +1,84 @@
+PREHOOK: query: drop table table_asc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table table_asc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table table_desc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table table_desc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table table_asc(key string, value string) clustered by (key) sorted by (key ASC) into 1 BUCKETS
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table table_asc(key string, value string) clustered by (key) sorted by (key ASC) into 1 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@table_asc
+PREHOOK: query: create table table_desc(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table table_desc(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@table_desc
+PREHOOK: query: insert overwrite table table_asc select key, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@table_asc
+POSTHOOK: query: insert overwrite table table_asc select key, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@table_asc
+POSTHOOK: Lineage: table_asc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table_asc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table table_desc select key, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@table_desc
+POSTHOOK: query: insert overwrite table table_desc select key, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@table_desc
+POSTHOOK: Lineage: table_asc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table_asc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: table_desc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table_desc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from table_asc limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@table_asc
+#### A masked pattern was here ####
+POSTHOOK: query: select * from table_asc limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@table_asc
+#### A masked pattern was here ####
+POSTHOOK: Lineage: table_asc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table_asc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: table_desc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table_desc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+100	val_100
+100	val_100
+103	val_103
+103	val_103
+104	val_104
+104	val_104
+PREHOOK: query: select * from table_desc limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@table_desc
+#### A masked pattern was here ####
+POSTHOOK: query: select * from table_desc limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@table_desc
+#### A masked pattern was here ####
+POSTHOOK: Lineage: table_asc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table_asc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: table_desc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table_desc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+98	val_98
+98	val_98
+97	val_97
+97	val_97
+96	val_96
+95	val_95
+95	val_95
+92	val_92
+90	val_90
+90	val_90

Added: hive/branches/tez/ql/src/test/results/clientpositive/tez/fileformat_mix.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/tez/fileformat_mix.q.out?rev=1551819&view=auto
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/tez/fileformat_mix.q.out (added)
+++ hive/branches/tez/ql/src/test/results/clientpositive/tez/fileformat_mix.q.out Wed Dec 18 03:14:09 2013
@@ -0,0 +1,573 @@
+PREHOOK: query: create table fileformat_mix_test (src int, value string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table fileformat_mix_test (src int, value string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@fileformat_mix_test
+PREHOOK: query: alter table fileformat_mix_test set fileformat Sequencefile
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@fileformat_mix_test
+PREHOOK: Output: default@fileformat_mix_test
+POSTHOOK: query: alter table fileformat_mix_test set fileformat Sequencefile
+POSTHOOK: type: ALTERTABLE_FILEFORMAT
+POSTHOOK: Input: default@fileformat_mix_test
+POSTHOOK: Output: default@fileformat_mix_test
+PREHOOK: query: insert overwrite table fileformat_mix_test partition (ds='1')
+select key, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@fileformat_mix_test@ds=1
+POSTHOOK: query: insert overwrite table fileformat_mix_test partition (ds='1')
+select key, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@fileformat_mix_test@ds=1
+POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: alter table fileformat_mix_test add partition (ds='2')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Input: default@fileformat_mix_test
+POSTHOOK: query: alter table fileformat_mix_test add partition (ds='2')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Input: default@fileformat_mix_test
+POSTHOOK: Output: default@fileformat_mix_test@ds=2
+POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: alter table fileformat_mix_test set fileformat rcfile
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@fileformat_mix_test
+PREHOOK: Output: default@fileformat_mix_test
+POSTHOOK: query: alter table fileformat_mix_test set fileformat rcfile
+POSTHOOK: type: ALTERTABLE_FILEFORMAT
+POSTHOOK: Input: default@fileformat_mix_test
+POSTHOOK: Output: default@fileformat_mix_test
+POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select count(1) from fileformat_mix_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@fileformat_mix_test
+PREHOOK: Input: default@fileformat_mix_test@ds=1
+PREHOOK: Input: default@fileformat_mix_test@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from fileformat_mix_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@fileformat_mix_test
+POSTHOOK: Input: default@fileformat_mix_test@ds=1
+POSTHOOK: Input: default@fileformat_mix_test@ds=2
+#### A masked pattern was here ####
+POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+500
+PREHOOK: query: select src from fileformat_mix_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@fileformat_mix_test
+PREHOOK: Input: default@fileformat_mix_test@ds=1
+PREHOOK: Input: default@fileformat_mix_test@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: select src from fileformat_mix_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@fileformat_mix_test
+POSTHOOK: Input: default@fileformat_mix_test@ds=1
+POSTHOOK: Input: default@fileformat_mix_test@ds=2
+#### A masked pattern was here ####
+POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).src EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: fileformat_mix_test PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+238
+86
+311
+27
+165
+409
+255
+278
+98
+484
+265
+193
+401
+150
+273
+224
+369
+66
+128
+213
+146
+406
+429
+374
+152
+469
+145
+495
+37
+327
+281
+277
+209
+15
+82
+403
+166
+417
+430
+252
+292
+219
+287
+153
+193
+338
+446
+459
+394
+237
+482
+174
+413
+494
+207
+199
+466
+208
+174
+399
+396
+247
+417
+489
+162
+377
+397
+309
+365
+266
+439
+342
+367
+325
+167
+195
+475
+17
+113
+155
+203
+339
+0
+455
+128
+311
+316
+57
+302
+205
+149
+438
+345
+129
+170
+20
+489
+157
+378
+221
+92
+111
+47
+72
+4
+280
+35
+427
+277
+208
+356
+399
+169
+382
+498
+125
+386
+437
+469
+192
+286
+187
+176
+54
+459
+51
+138
+103
+239
+213
+216
+430
+278
+176
+289
+221
+65
+318
+332
+311
+275
+137
+241
+83
+333
+180
+284
+12
+230
+181
+67
+260
+404
+384
+489
+353
+373
+272
+138
+217
+84
+348
+466
+58
+8
+411
+230
+208
+348
+24
+463
+431
+179
+172
+42
+129
+158
+119
+496
+0
+322
+197
+468
+393
+454
+100
+298
+199
+191
+418
+96
+26
+165
+327
+230
+205
+120
+131
+51
+404
+43
+436
+156
+469
+468
+308
+95
+196
+288
+481
+457
+98
+282
+197
+187
+318
+318
+409
+470
+137
+369
+316
+169
+413
+85
+77
+0
+490
+87
+364
+179
+118
+134
+395
+282
+138
+238
+419
+15
+118
+72
+90
+307
+19
+435
+10
+277
+273
+306
+224
+309
+389
+327
+242
+369
+392
+272
+331
+401
+242
+452
+177
+226
+5
+497
+402
+396
+317
+395
+58
+35
+336
+95
+11
+168
+34
+229
+233
+143
+472
+322
+498
+160
+195
+42
+321
+430
+119
+489
+458
+78
+76
+41
+223
+492
+149
+449
+218
+228
+138
+453
+30
+209
+64
+468
+76
+74
+342
+69
+230
+33
+368
+103
+296
+113
+216
+367
+344
+167
+274
+219
+239
+485
+116
+223
+256
+263
+70
+487
+480
+401
+288
+191
+5
+244
+438
+128
+467
+432
+202
+316
+229
+469
+463
+280
+2
+35
+283
+331
+235
+80
+44
+193
+321
+335
+104
+466
+366
+175
+403
+483
+53
+105
+257
+406
+409
+190
+406
+401
+114
+258
+90
+203
+262
+348
+424
+12
+396
+201
+217
+164
+431
+454
+478
+298
+125
+431
+164
+424
+187
+382
+5
+70
+397
+480
+291
+24
+351
+255
+104
+70
+163
+438
+119
+414
+200
+491
+237
+439
+360
+248
+479
+305
+417
+199
+444
+120
+429
+169
+443
+323
+325
+277
+230
+478
+178
+468
+310
+317
+333
+493
+460
+207
+249
+265
+480
+83
+136
+353
+172
+214
+462
+233
+406
+133
+175
+189
+454
+375
+401
+421
+407
+384
+256
+26
+134
+67
+384
+379
+18
+462
+492
+100
+298
+9
+341
+498
+146
+458
+362
+186
+285
+348
+167
+18
+273
+183
+281
+344
+97
+469
+315
+84
+28
+37
+448
+152
+348
+307
+194
+414
+477
+222
+126
+90
+169
+403
+400
+200
+97

Added: hive/branches/tez/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out?rev=1551819&view=auto
==============================================================================
--- hive/branches/tez/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out (added)
+++ hive/branches/tez/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out Wed Dec 18 03:14:09 2013
@@ -0,0 +1,360 @@
+PREHOOK: query: CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@filter_join_breaktask
+PREHOOK: query: INSERT OVERWRITE TABLE filter_join_breaktask PARTITION(ds='2008-04-08')
+SELECT key, value from src1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@filter_join_breaktask@ds=2008-04-08
+POSTHOOK: query: INSERT OVERWRITE TABLE filter_join_breaktask PARTITION(ds='2008-04-08')
+SELECT key, value from src1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@filter_join_breaktask@ds=2008-04-08
+POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).key EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: EXPLAIN EXTENDED  
+SELECT f.key, g.value 
+FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) 
+JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='')
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED  
+SELECT f.key, g.value 
+FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) 
+JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='')
+POSTHOOK: type: QUERY
+POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).key EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_TABREF (TOK_TABNAME filter_join_breaktask) f) (TOK_TABREF (TOK_TABNAME filter_join_breaktask) m) (AND (AND (AND (= (. (TOK_TABLE_OR_COL f) key) (. (TOK_TABLE_OR_COL m) key)) (= (. (TOK_TABLE_OR_COL f) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL m) ds) '2008-04-08')) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL f) key)))) (TOK_TABREF (TOK_TABNAME filter_join_breaktask) g) (AND (AND (AND (AND (= (. (TOK_TABLE_OR_COL g) value) (. (TOK_TABLE_OR_COL m) value)) (= (. (TOK_TABLE_OR_COL g) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL m) ds) '2008-04-08')) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL m) value))) (!= (. (TOK_TABLE_OR_COL m) value) '')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL f) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL g) value)))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Alias -> Map Operator Tree:
+        f 
+          TableScan
+            alias: f
+            Statistics:
+                numRows: 25 dataSize: 211 basicStatsState: COMPLETE colStatsState: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate:
+                  expr: key is not null
+                  type: boolean
+              Statistics:
+                  numRows: 13 dataSize: 109 basicStatsState: COMPLETE colStatsState: NONE
+              Reduce Output Operator
+                key expressions:
+                      expr: key
+                      type: int
+                sort order: +
+                Map-reduce partition columns:
+                      expr: key
+                      type: int
+                Statistics:
+                    numRows: 13 dataSize: 109 basicStatsState: COMPLETE colStatsState: NONE
+                tag: 0
+                value expressions:
+                      expr: key
+                      type: int
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.filter_join_breaktask
+              numFiles 1
+              numRows 25
+              partition_columns ds
+              rawDataSize 211
+              serialization.ddl struct filter_join_breaktask { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 236
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.filter_join_breaktask
+                partition_columns ds
+                serialization.ddl struct filter_join_breaktask { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.filter_join_breaktask
+            name: default.filter_join_breaktask
+      Truncated Path -> Alias:
+        /filter_join_breaktask/ds=2008-04-08 [f]
+      Alias -> Map Operator Tree:
+        m 
+          TableScan
+            alias: m
+            Statistics:
+                numRows: 25 dataSize: 211 basicStatsState: COMPLETE colStatsState: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate:
+                  expr: ((key is not null and value is not null) and (value <> ''))
+                  type: boolean
+              Statistics:
+                  numRows: 7 dataSize: 59 basicStatsState: COMPLETE colStatsState: NONE
+              Reduce Output Operator
+                key expressions:
+                      expr: key
+                      type: int
+                sort order: +
+                Map-reduce partition columns:
+                      expr: key
+                      type: int
+                Statistics:
+                    numRows: 7 dataSize: 59 basicStatsState: COMPLETE colStatsState: NONE
+                tag: 1
+                value expressions:
+                      expr: value
+                      type: string
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.filter_join_breaktask
+              numFiles 1
+              numRows 25
+              partition_columns ds
+              rawDataSize 211
+              serialization.ddl struct filter_join_breaktask { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 236
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.filter_join_breaktask
+                partition_columns ds
+                serialization.ddl struct filter_join_breaktask { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.filter_join_breaktask
+            name: default.filter_join_breaktask
+      Truncated Path -> Alias:
+        /filter_join_breaktask/ds=2008-04-08 [m]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          condition expressions:
+            0 {VALUE._col0}
+            1 {VALUE._col1}
+          handleSkewJoin: false
+          outputColumnNames: _col0, _col6
+          Statistics:
+              numRows: 14 dataSize: 119 basicStatsState: COMPLETE colStatsState: NONE
+          Reduce Output Operator
+            key expressions:
+                  expr: _col6
+                  type: string
+            sort order: +
+            Map-reduce partition columns:
+                  expr: _col6
+                  type: string
+            Statistics:
+                numRows: 14 dataSize: 119 basicStatsState: COMPLETE colStatsState: NONE
+            tag: 0
+            value expressions:
+                  expr: _col0
+                  type: int
+      Alias -> Map Operator Tree:
+        g 
+          TableScan
+            alias: g
+            Statistics:
+                numRows: 25 dataSize: 211 basicStatsState: COMPLETE colStatsState: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate:
+                  expr: (value <> '')
+                  type: boolean
+              Statistics:
+                  numRows: 25 dataSize: 211 basicStatsState: COMPLETE colStatsState: NONE
+              Reduce Output Operator
+                key expressions:
+                      expr: value
+                      type: string
+                sort order: +
+                Map-reduce partition columns:
+                      expr: value
+                      type: string
+                Statistics:
+                    numRows: 25 dataSize: 211 basicStatsState: COMPLETE colStatsState: NONE
+                tag: 1
+                value expressions:
+                      expr: value
+                      type: string
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.filter_join_breaktask
+              numFiles 1
+              numRows 25
+              partition_columns ds
+              rawDataSize 211
+              serialization.ddl struct filter_join_breaktask { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 236
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.filter_join_breaktask
+                partition_columns ds
+                serialization.ddl struct filter_join_breaktask { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.filter_join_breaktask
+            name: default.filter_join_breaktask
+      Truncated Path -> Alias:
+        /filter_join_breaktask/ds=2008-04-08 [g]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          condition expressions:
+            0 {VALUE._col0}
+            1 {VALUE._col1}
+          handleSkewJoin: false
+          outputColumnNames: _col0, _col11
+          Statistics:
+              numRows: 27 dataSize: 232 basicStatsState: COMPLETE colStatsState: NONE
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: int
+                  expr: _col11
+                  type: string
+            outputColumnNames: _col0, _col1
+            Statistics:
+                numRows: 27 dataSize: 232 basicStatsState: COMPLETE colStatsState: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              Statistics:
+                  numRows: 27 dataSize: 232 basicStatsState: COMPLETE colStatsState: NONE
+#### A masked pattern was here ####
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    columns _col0,_col1
+                    columns.types int:string
+                    escape.delim \
+                    hive.serialization.extend.nesting.levels true
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+PREHOOK: query: SELECT f.key, g.value 
+FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) 
+JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@filter_join_breaktask
+PREHOOK: Input: default@filter_join_breaktask@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT f.key, g.value 
+FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) 
+JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@filter_join_breaktask
+POSTHOOK: Input: default@filter_join_breaktask@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).key EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+146	val_146
+150	val_150
+213	val_213
+238	val_238
+255	val_255
+273	val_273
+278	val_278
+311	val_311
+401	val_401
+406	val_406
+66	val_66
+98	val_98



Mime
View raw message