incubator-bigtop-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From aba...@apache.org
Subject svn commit: r1148501 [18/47] - in /incubator/bigtop: branches/ tags/ trunk/ trunk/docs/ trunk/src/ trunk/src/pkg/ trunk/src/pkg/common/ trunk/src/pkg/common/flume/ trunk/src/pkg/common/hadoop/ trunk/src/pkg/common/hadoop/conf.pseudo/ trunk/src/pkg/comm...
Date Tue, 19 Jul 2011 19:45:54 GMT
Added: incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/bucketmapjoin5/out
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/bucketmapjoin5/out?rev=1148501&view=auto
==============================================================================
--- incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/bucketmapjoin5/out (added)
+++ incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/bucketmapjoin5/out Tue Jul 19 19:44:48 2011
@@ -0,0 +1,781 @@
+CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+
+load data local inpath 'seed_data_files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket20.txt
+
+load data local inpath 'seed_data_files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket21.txt
+
+
+CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+
+load data local inpath 'seed_data_files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket20.txt
+
+load data local inpath 'seed_data_files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket21.txt
+
+load data local inpath 'seed_data_files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket22.txt
+
+load data local inpath 'seed_data_files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket23.txt
+
+load data local inpath 'seed_data_files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09')
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket20.txt
+
+load data local inpath 'seed_data_files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09')
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket21.txt
+
+load data local inpath 'seed_data_files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09')
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket22.txt
+
+load data local inpath 'seed_data_files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09')
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket23.txt
+
+
+CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+
+load data local inpath 'seed_data_files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket22.txt
+
+load data local inpath 'seed_data_files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket23.txt
+
+load data local inpath 'seed_data_files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09')
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket22.txt
+
+load data local inpath 'seed_data_files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09')
+Copying file: file:/var/lib/hudson/workspace/Nightly-smoke-testing-monster/examples/hive/target/seed_data_files/srcbucket23.txt
+
+
+create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint)
+
+create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint)
+set hive.optimize.bucketmapjoin = true
+
+create table bucketmapjoin_tmp_result (key string , value1 string, value2 string)
+
+
+explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value)))))
+
+STAGE DEPENDENCIES:
+  Stage-7 is a root stage
+  Stage-1 depends on stages: Stage-7
+  Stage-5 depends on stages: Stage-1 , consists of Stage-4, Stage-3
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3
+  Stage-2 depends on stages: Stage-0
+  Stage-3
+
+STAGE PLANS:
+  Stage: Stage-7
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        a 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        a 
+          TableScan
+            alias: a
+            GatherStats: false
+            HashTable Sink Operator
+              condition expressions:
+                0 {key} {value}
+                1 {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key]]
+                1 [Column[key]]
+              Position of Big Table: 1
+      Bucket Mapjoin Context:
+          Alias Bucket Base File Name Mapping:
+            a {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt], srcbucket22.txt=[srcbucket20.txt], srcbucket23.txt=[srcbucket21.txt], ds=2008-04-09/srcbucket20.txt=[srcbucket20.txt], ds=2008-04-09/srcbucket21.txt=[srcbucket21.txt], ds=2008-04-09/srcbucket22.txt=[srcbucket20.txt], ds=2008-04-09/srcbucket23.txt=[srcbucket21.txt]}
+          Alias Bucket File Name Mapping:
+            a {hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt=[hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin/srcbucket20.txt], hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt=[hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin/srcbucket21.txt], hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt=[hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin/srcbucket20.txt], hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt=[hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin/srcbucket21.txt], hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket20.txt=[hdfs://monster01.sf.cloudera.
 com:17020/user/hive/warehouse/srcbucket_mapjoin/srcbucket20.txt], hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket21.txt=[hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin/srcbucket21.txt], hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket22.txt=[hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin/srcbucket20.txt], hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket23.txt=[hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin/srcbucket21.txt]}
+          Alias Bucket Output File Name Mapping:
+            hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt 0
+            hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt 1
+            hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt 2
+            hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt 3
+            hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket20.txt 0
+            hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket21.txt 1
+            hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket22.txt 2
+            hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket23.txt 3
+
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        b 
+          TableScan
+            alias: b
+            GatherStats: false
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              condition expressions:
+                0 {key} {value}
+                1 {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key]]
+                1 [Column[key]]
+              outputColumnNames: _col0, _col1, _col5
+              Position of Big Table: 1
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: int
+                      expr: _col1
+                      type: string
+                      expr: _col5
+                      type: string
+                outputColumnNames: _col0, _col1, _col5
+                Select Operator
+                  expressions:
+                        expr: _col0
+                        type: int
+                        expr: _col1
+                        type: string
+                        expr: _col5
+                        type: string
+                  outputColumnNames: _col0, _col1, _col2
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+                    directory: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-55-59_802_8749339940103211895/-ext-10002
+                    NumFilesPerFileSink: 1
+                    Stats Publishing Key Prefix: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-55-59_802_8749339940103211895/-ext-10000/
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.types string:string:string
+                          file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_tmp_result
+                          name default.bucketmapjoin_tmp_result
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          transient_lastDdlTime 1301691359
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+      Local Work:
+        Map Reduce Local Work
+      Needs Tagging: false
+      Path -> Alias:
+        hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-08 [b]
+        hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-09 [b]
+      Path -> Partition:
+        hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-08 
+          Partition
+            base file name: ds=2008-04-08
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+            properties:
+              bucket_count 4
+              bucket_field_name key
+              columns key,value
+              columns.types int:string
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-08
+              name default.srcbucket_mapjoin_part
+              partition_columns ds
+              serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              transient_lastDdlTime 1301691354
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count 4
+                bucket_field_name key
+                columns key,value
+                columns.types int:string
+                file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part
+                name default.srcbucket_mapjoin_part
+                partition_columns ds
+                serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                transient_lastDdlTime 1301691354
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcbucket_mapjoin_part
+            name: default.srcbucket_mapjoin_part
+        hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-09 
+          Partition
+            base file name: ds=2008-04-09
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-09
+            properties:
+              bucket_count 4
+              bucket_field_name key
+              columns key,value
+              columns.types int:string
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part/ds=2008-04-09
+              name default.srcbucket_mapjoin_part
+              partition_columns ds
+              serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              transient_lastDdlTime 1301691354
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count 4
+                bucket_field_name key
+                columns key,value
+                columns.types int:string
+                file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part
+                name default.srcbucket_mapjoin_part
+                partition_columns ds
+                serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                transient_lastDdlTime 1301691354
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcbucket_mapjoin_part
+            name: default.srcbucket_mapjoin_part
+
+  Stage: Stage-5
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+          source: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-55-59_802_8749339940103211895/-ext-10002
+          destination: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-55-59_802_8749339940103211895/-ext-10000
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          source: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-55-59_802_8749339940103211895/-ext-10000
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value1,value2
+                columns.types string:string:string
+                file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_tmp_result
+                name default.bucketmapjoin_tmp_result
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                transient_lastDdlTime 1301691359
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+          tmp directory: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-55-59_802_8749339940103211895/-ext-10001
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+      Stats Aggregation Key Prefix: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-55-59_802_8749339940103211895/-ext-10000/
+
+  Stage: Stage-3
+    Map Reduce
+      Alias -> Map Operator Tree:
+        hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-55-59_802_8749339940103211895/-ext-10002 
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              directory: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-55-59_802_8749339940103211895/-ext-10000
+              NumFilesPerFileSink: 1
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count -1
+                    columns key,value1,value2
+                    columns.types string:string:string
+                    file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_tmp_result
+                    name default.bucketmapjoin_tmp_result
+                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    transient_lastDdlTime 1301691359
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.bucketmapjoin_tmp_result
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+      Needs Tagging: false
+      Path -> Alias:
+        hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-55-59_802_8749339940103211895/-ext-10002 [hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-55-59_802_8749339940103211895/-ext-10002]
+      Path -> Partition:
+        hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-55-59_802_8749339940103211895/-ext-10002 
+          Partition
+            base file name: -ext-10002
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              bucket_count -1
+              columns key,value1,value2
+              columns.types string:string:string
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_tmp_result
+              name default.bucketmapjoin_tmp_result
+              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              transient_lastDdlTime 1301691359
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value1,value2
+                columns.types string:string:string
+                file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_tmp_result
+                name default.bucketmapjoin_tmp_result
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                transient_lastDdlTime 1301691359
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+            name: default.bucketmapjoin_tmp_result
+
+
+
+
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key
+Deleted hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_tmp_result
+
+
+select count(1) from bucketmapjoin_tmp_result
+928
+
+insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+Deleted hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_hash_result_1
+set hive.optimize.bucketmapjoin = false
+
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key
+Deleted hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_tmp_result
+
+
+select count(1) from bucketmapjoin_tmp_result
+928
+
+insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+Deleted hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_hash_result_2
+
+
+select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+0	0	0
+set hive.optimize.bucketmapjoin = true
+
+explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b 
+on a.key=b.key
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value)))))
+
+STAGE DEPENDENCIES:
+  Stage-7 is a root stage
+  Stage-1 depends on stages: Stage-7
+  Stage-5 depends on stages: Stage-1 , consists of Stage-4, Stage-3
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3
+  Stage-2 depends on stages: Stage-0
+  Stage-3
+
+STAGE PLANS:
+  Stage: Stage-7
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        a 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        a 
+          TableScan
+            alias: a
+            GatherStats: false
+            HashTable Sink Operator
+              condition expressions:
+                0 {key} {value}
+                1 {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key]]
+                1 [Column[key]]
+              Position of Big Table: 1
+      Bucket Mapjoin Context:
+          Alias Bucket Base File Name Mapping:
+            a {srcbucket22.txt=[srcbucket20.txt], srcbucket23.txt=[srcbucket21.txt], ds=2008-04-09/srcbucket22.txt=[srcbucket20.txt], ds=2008-04-09/srcbucket23.txt=[srcbucket21.txt]}
+          Alias Bucket File Name Mapping:
+            a {hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt=[hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin/srcbucket20.txt], hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt=[hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin/srcbucket21.txt], hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket22.txt=[hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin/srcbucket20.txt], hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket23.txt=[hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin/srcbucket21.txt]}
+          Alias Bucket Output File Name Mapping:
+            hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt 0
+            hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt 1
+            hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket22.txt 0
+            hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket23.txt 1
+
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        b 
+          TableScan
+            alias: b
+            GatherStats: false
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              condition expressions:
+                0 {key} {value}
+                1 {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key]]
+                1 [Column[key]]
+              outputColumnNames: _col0, _col1, _col5
+              Position of Big Table: 1
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: int
+                      expr: _col1
+                      type: string
+                      expr: _col5
+                      type: string
+                outputColumnNames: _col0, _col1, _col5
+                Select Operator
+                  expressions:
+                        expr: _col0
+                        type: int
+                        expr: _col1
+                        type: string
+                        expr: _col5
+                        type: string
+                  outputColumnNames: _col0, _col1, _col2
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+                    directory: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-57-41_645_9053634577540746990/-ext-10002
+                    NumFilesPerFileSink: 1
+                    Stats Publishing Key Prefix: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-57-41_645_9053634577540746990/-ext-10000/
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.types string:string:string
+                          file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_tmp_result
+                          name default.bucketmapjoin_tmp_result
+                          numFiles 1
+                          numPartitions 0
+                          numRows 0
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 17966
+                          transient_lastDdlTime 1301691422
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+      Local Work:
+        Map Reduce Local Work
+      Needs Tagging: false
+      Path -> Alias:
+        hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-08 [b]
+        hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-09 [b]
+      Path -> Partition:
+        hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-08 
+          Partition
+            base file name: ds=2008-04-08
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+            properties:
+              bucket_count 2
+              bucket_field_name key
+              columns key,value
+              columns.types int:string
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-08
+              name default.srcbucket_mapjoin_part_2
+              partition_columns ds
+              serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              transient_lastDdlTime 1301691357
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count 2
+                bucket_field_name key
+                columns key,value
+                columns.types int:string
+                file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2
+                name default.srcbucket_mapjoin_part_2
+                partition_columns ds
+                serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                transient_lastDdlTime 1301691357
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcbucket_mapjoin_part_2
+            name: default.srcbucket_mapjoin_part_2
+        hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-09 
+          Partition
+            base file name: ds=2008-04-09
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-09
+            properties:
+              bucket_count 2
+              bucket_field_name key
+              columns key,value
+              columns.types int:string
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2/ds=2008-04-09
+              name default.srcbucket_mapjoin_part_2
+              partition_columns ds
+              serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              transient_lastDdlTime 1301691357
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count 2
+                bucket_field_name key
+                columns key,value
+                columns.types int:string
+                file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcbucket_mapjoin_part_2
+                name default.srcbucket_mapjoin_part_2
+                partition_columns ds
+                serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                transient_lastDdlTime 1301691357
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcbucket_mapjoin_part_2
+            name: default.srcbucket_mapjoin_part_2
+
+  Stage: Stage-5
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+          source: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-57-41_645_9053634577540746990/-ext-10002
+          destination: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-57-41_645_9053634577540746990/-ext-10000
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          source: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-57-41_645_9053634577540746990/-ext-10000
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value1,value2
+                columns.types string:string:string
+                file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_tmp_result
+                name default.bucketmapjoin_tmp_result
+                numFiles 1
+                numPartitions 0
+                numRows 0
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 17966
+                transient_lastDdlTime 1301691422
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+          tmp directory: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-57-41_645_9053634577540746990/-ext-10001
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+      Stats Aggregation Key Prefix: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-57-41_645_9053634577540746990/-ext-10000/
+
+  Stage: Stage-3
+    Map Reduce
+      Alias -> Map Operator Tree:
+        hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-57-41_645_9053634577540746990/-ext-10002 
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              directory: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-57-41_645_9053634577540746990/-ext-10000
+              NumFilesPerFileSink: 1
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count -1
+                    columns key,value1,value2
+                    columns.types string:string:string
+                    file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_tmp_result
+                    name default.bucketmapjoin_tmp_result
+                    numFiles 1
+                    numPartitions 0
+                    numRows 0
+                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 17966
+                    transient_lastDdlTime 1301691422
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.bucketmapjoin_tmp_result
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+      Needs Tagging: false
+      Path -> Alias:
+        hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-57-41_645_9053634577540746990/-ext-10002 [hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-57-41_645_9053634577540746990/-ext-10002]
+      Path -> Partition:
+        hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_13-57-41_645_9053634577540746990/-ext-10002 
+          Partition
+            base file name: -ext-10002
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              bucket_count -1
+              columns key,value1,value2
+              columns.types string:string:string
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_tmp_result
+              name default.bucketmapjoin_tmp_result
+              numFiles 1
+              numPartitions 0
+              numRows 0
+              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 17966
+              transient_lastDdlTime 1301691422
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value1,value2
+                columns.types string:string:string
+                file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_tmp_result
+                name default.bucketmapjoin_tmp_result
+                numFiles 1
+                numPartitions 0
+                numRows 0
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 17966
+                transient_lastDdlTime 1301691422
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+            name: default.bucketmapjoin_tmp_result
+
+
+
+
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b 
+on a.key=b.key
+
+
+select count(1) from bucketmapjoin_tmp_result
+928
+
+insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+Deleted hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_hash_result_1
+set hive.optimize.bucketmapjoin = false
+
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b 
+on a.key=b.key
+
+
+select count(1) from bucketmapjoin_tmp_result
+928
+
+insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+Deleted hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/bucketmapjoin_hash_result_2
+
+
+select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+0	0	0

Added: incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/drop_multi_partitions/in
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/drop_multi_partitions/in?rev=1148501&view=auto
==============================================================================
--- incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/drop_multi_partitions/in (added)
+++ incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/drop_multi_partitions/in Tue Jul 19 19:44:48 2011
@@ -0,0 +1,17 @@
+create table mp (a string) partitioned by (b string, c string);
+
+alter table mp add partition (b='1', c='1');
+alter table mp add partition (b='1', c='2');
+alter table mp add partition (b='2', c='2');
+
+show partitions mp;
+
+explain extended alter table mp drop partition (b='1');
+alter table mp drop partition (b='1');
+
+show partitions mp;
+
+set hive.exec.drop.ignorenonexistent=false;
+alter table mp drop if exists partition (b='3');
+
+show partitions mp;

Added: incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/drop_multi_partitions/out
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/drop_multi_partitions/out?rev=1148501&view=auto
==============================================================================
--- incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/drop_multi_partitions/out (added)
+++ incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/drop_multi_partitions/out Tue Jul 19 19:44:48 2011
@@ -0,0 +1,43 @@
+create table mp (a string) partitioned by (b string, c string)
+
+
+alter table mp add partition (b='1', c='1')
+
+alter table mp add partition (b='1', c='2')
+
+alter table mp add partition (b='2', c='2')
+
+
+show partitions mp
+b=1/c=1
+b=1/c=2
+b=2/c=2
+
+
+explain extended alter table mp drop partition (b='1')
+ABSTRACT SYNTAX TREE:
+  (TOK_ALTERTABLE_DROPPARTS mp (TOK_PARTSPEC (TOK_PARTVAL b '1')))
+
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+      Drop Table Operator:
+        Drop Table
+          table: mp
+
+
+
+alter table mp drop partition (b='1')
+
+
+show partitions mp
+b=2/c=2
+set hive.exec.drop.ignorenonexistent=false
+
+alter table mp drop if exists partition (b='3')
+
+
+show partitions mp
+b=2/c=2

Added: incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/groupby_map_ppr_multi_distinct/filter
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/groupby_map_ppr_multi_distinct/filter?rev=1148501&view=auto
==============================================================================
--- incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/groupby_map_ppr_multi_distinct/filter (added)
+++ incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/groupby_map_ppr_multi_distinct/filter Tue Jul 19 19:44:48 2011
@@ -0,0 +1,3 @@
+sed -re 's#hdfs://[^/]*/#hdfs://HADOOP/#' \
+     -e 's#hdfs://.*/-(ext|mr)-1000#hdfs://HADOOP/-\1-1000#' \
+     -e 's#transient_lastDdlTime [0-9]*#transient_lastDdlTime JUSTNOW#'

Added: incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/groupby_map_ppr_multi_distinct/in
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/groupby_map_ppr_multi_distinct/in?rev=1148501&view=auto
==============================================================================
--- incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/groupby_map_ppr_multi_distinct/in (added)
+++ incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/groupby_map_ppr_multi_distinct/in Tue Jul 19 19:44:48 2011
@@ -0,0 +1,20 @@
+set hive.map.aggr=true;
+set hive.groupby.skewindata=false;
+set mapred.reduce.tasks=31;
+
+CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE;
+
+EXPLAIN EXTENDED
+FROM srcpart src
+INSERT OVERWRITE TABLE dest1 
+SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) 
+WHERE src.ds = '2008-04-08'
+GROUP BY substr(src.key,1,1);
+
+FROM srcpart src
+INSERT OVERWRITE TABLE dest1 
+SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) 
+WHERE src.ds = '2008-04-08'
+GROUP BY substr(src.key,1,1);
+
+SELECT dest1.* FROM dest1;

Added: incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/groupby_map_ppr_multi_distinct/out
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/groupby_map_ppr_multi_distinct/out?rev=1148501&view=auto
==============================================================================
--- incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/groupby_map_ppr_multi_distinct/out (added)
+++ incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/groupby_map_ppr_multi_distinct/out Tue Jul 19 19:44:48 2011
@@ -0,0 +1,283 @@
+set hive.map.aggr=true
+set hive.groupby.skewindata=false
+set mapred.reduce.tasks=31
+
+
+CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE
+
+
+EXPLAIN EXTENDED
+FROM srcpart src
+INSERT OVERWRITE TABLE dest1 
+SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) 
+WHERE src.ds = '2008-04-08'
+GROUP BY substr(src.key,1,1)
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart) src)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI count (. (TOK_TABLE_OR_COL src) value)))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL src) ds) '2008-04-08')) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate:
+                  expr: (ds = '2008-04-08')
+                  type: boolean
+              Select Operator
+                expressions:
+                      expr: key
+                      type: string
+                      expr: value
+                      type: string
+                outputColumnNames: key, value
+                Group By Operator
+                  aggregations:
+                        expr: count(DISTINCT substr(value, 5))
+                        expr: sum(substr(value, 5))
+                        expr: sum(DISTINCT substr(value, 5))
+                        expr: count(DISTINCT value)
+                  bucketGroup: false
+                  keys:
+                        expr: substr(key, 1, 1)
+                        type: string
+                        expr: substr(value, 5)
+                        type: string
+                        expr: value
+                        type: string
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                  Reduce Output Operator
+                    key expressions:
+                          expr: _col0
+                          type: string
+                          expr: _col1
+                          type: string
+                          expr: _col2
+                          type: string
+                    sort order: +++
+                    Map-reduce partition columns:
+                          expr: _col0
+                          type: string
+                    tag: -1
+                    value expressions:
+                          expr: _col3
+                          type: bigint
+                          expr: _col4
+                          type: double
+                          expr: _col5
+                          type: double
+                          expr: _col6
+                          type: bigint
+      Needs Tagging: false
+      Path -> Alias:
+        hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcpart/ds=2008-04-08/hr=11 [src]
+        hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcpart/ds=2008-04-08/hr=12 [src]
+      Path -> Partition:
+        hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcpart/ds=2008-04-08/hr=11 
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              bucket_count -1
+              columns key,value
+              columns.types string:string
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcpart/ds=2008-04-08/hr=11
+              name default.srcpart
+              partition_columns ds/hr
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              transient_lastDdlTime 1301676822
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.types string:string
+                file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcpart
+                name default.srcpart
+                partition_columns ds/hr
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                transient_lastDdlTime 1301676822
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+        hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcpart/ds=2008-04-08/hr=12 
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              bucket_count -1
+              columns key,value
+              columns.types string:string
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcpart/ds=2008-04-08/hr=12
+              name default.srcpart
+              partition_columns ds/hr
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              transient_lastDdlTime 1301676822
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.types string:string
+                file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/srcpart
+                name default.srcpart
+                partition_columns ds/hr
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                transient_lastDdlTime 1301676822
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations:
+                expr: count(DISTINCT KEY._col1:0._col0)
+                expr: sum(VALUE._col1)
+                expr: sum(DISTINCT KEY._col1:1._col0)
+                expr: count(DISTINCT KEY._col1:2._col0)
+          bucketGroup: false
+          keys:
+                expr: KEY._col0
+                type: string
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: string
+                  expr: _col1
+                  type: bigint
+                  expr: concat(_col0, _col2)
+                  type: string
+                  expr: _col3
+                  type: double
+                  expr: _col4
+                  type: bigint
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
+            Select Operator
+              expressions:
+                    expr: _col0
+                    type: string
+                    expr: UDFToInteger(_col1)
+                    type: int
+                    expr: _col2
+                    type: string
+                    expr: UDFToInteger(_col3)
+                    type: int
+                    expr: UDFToInteger(_col4)
+                    type: int
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+                directory: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_10-08-20_880_8656529853875888350/-ext-10000
+                NumFilesPerFileSink: 1
+                Stats Publishing Key Prefix: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_10-08-20_880_8656529853875888350/-ext-10000/
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,c1,c2,c3,c4
+                      columns.types string:int:string:int:int
+                      file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                      file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/dest1
+                      name default.dest1
+                      serialization.ddl struct dest1 { string key, i32 c1, string c2, i32 c3, i32 c4}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      transient_lastDdlTime 1301677700
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.dest1
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          source: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_10-08-20_880_8656529853875888350/-ext-10000
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,c1,c2,c3,c4
+                columns.types string:int:string:int:int
+                file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                location hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/dest1
+                name default.dest1
+                serialization.ddl struct dest1 { string key, i32 c1, string c2, i32 c3, i32 c4}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                transient_lastDdlTime 1301677700
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest1
+          tmp directory: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_10-08-20_880_8656529853875888350/-ext-10001
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+      Stats Aggregation Key Prefix: hdfs://monster01.sf.cloudera.com:17020/tmp/hive-hudson/hive_2011-04-01_10-08-20_880_8656529853875888350/-ext-10000/
+
+
+
+
+FROM srcpart src
+INSERT OVERWRITE TABLE dest1 
+SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) 
+WHERE src.ds = '2008-04-08'
+GROUP BY substr(src.key,1,1)
+Deleted hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/dest1
+
+
+SELECT dest1.* FROM dest1
+0	1	00.0	0	1
+1	71	132828.0	10044	71
+2	69	251142.0	15780	69
+3	62	364008.0	20119	62
+4	74	4105526.0	30965	74
+5	6	5794.0	278	6
+6	5	6796.0	331	5
+7	6	71470.0	447	6
+8	8	81524.0	595	8
+9	7	92094.0	577	7

Added: incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/index_creation/filter
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/index_creation/filter?rev=1148501&view=auto
==============================================================================
--- incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/index_creation/filter (added)
+++ incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/index_creation/filter Tue Jul 19 19:44:48 2011
@@ -0,0 +1,6 @@
+sed -re 's#hdfs://.*/-(ext|mr)-1000#hdfs://HADOOP/-\1-1000#' |
+sed -e  's#hdfs://[^/]*/#hdfs://HADOOP/#' \
+    -e  's#owner:[^,]*,#owner:BORG,#' \
+    -e  's#createTime:[0-9]*,#createTime:JUSTNOW#' \
+    -e  's#location:hdfs://[^/]*/#location:hdfs://HADOOP/#' \
+    -e  's#transient_lastDdlTime=[0-9]*}#transient_lastDdlTime=JUSTNOW}#'

Added: incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/index_creation/in
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/index_creation/in?rev=1148501&view=auto
==============================================================================
--- incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/index_creation/in (added)
+++ incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/index_creation/in Tue Jul 19 19:44:48 2011
@@ -0,0 +1,52 @@
+drop index src_index_2 on src;
+drop index src_index_3 on src;
+drop index src_index_4 on src;
+drop index src_index_5 on src;
+drop index src_index_6 on src;
+drop index src_index_7 on src;
+drop index src_index_8 on src;
+drop index src_index_9 on src;
+drop table `_t`;
+
+create index src_index_2 on table src(key) as 'compact' WITH DEFERRED REBUILD;
+desc extended default__src_src_index_2__;
+
+create index src_index_3 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_3;
+desc extended src_idx_src_index_3;
+
+create index src_index_4 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE;
+desc extended default__src_src_index_4__;
+
+create index src_index_5 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\';
+desc extended default__src_src_index_5__;
+
+create index src_index_6 on table src(key) as 'compact' WITH DEFERRED REBUILD STORED AS RCFILE;
+desc extended default__src_src_index_6__;
+
+create index src_index_7 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_7 STORED AS RCFILE; 
+desc extended src_idx_src_index_7;
+
+create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2"); 
+desc extended default__src_src_index_8__;
+
+create index src_index_9 on table src(key) as 'compact' WITH DEFERRED REBUILD TBLPROPERTIES ("prop1"="val1", "prop2"="val2"); 
+desc extended default__src_src_index_9__;
+
+create table `_t`(`_i` int, `_j` int);
+create index x on table `_t`(`_j`) as 'compact' WITH DEFERRED REBUILD;
+alter index x on `_t` rebuild;
+
+create index x2 on table `_t`(`_i`,`_j`) as 'compact' WITH DEFERRED
+REBUILD;
+alter index x2 on `_t` rebuild;
+
+drop index src_index_2 on src;
+drop index src_index_3 on src;
+drop index src_index_4 on src;
+drop index src_index_5 on src;
+drop index src_index_6 on src;
+drop index src_index_7 on src;
+drop index src_index_8 on src;
+drop index src_index_9 on src;
+drop table `_t`;
+

Added: incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/index_creation/out
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/index_creation/out?rev=1148501&view=auto
==============================================================================
--- incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/index_creation/out (added)
+++ incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/index_creation/out Tue Jul 19 19:44:48 2011
@@ -0,0 +1,131 @@
+drop index src_index_2 on src
+
+drop index src_index_3 on src
+
+drop index src_index_4 on src
+
+drop index src_index_5 on src
+
+drop index src_index_6 on src
+
+drop index src_index_7 on src
+
+drop index src_index_8 on src
+
+drop index src_index_9 on src
+
+drop table `_t`
+
+
+create index src_index_2 on table src(key) as 'compact' WITH DEFERRED REBUILD
+
+desc extended default__src_src_index_2__
+key	string	
+_bucketname	string	
+_offsets	array<bigint>	
+	 	 
+Detailed Table Information	Table(tableName:default__src_src_index_2__, dbName:default, owner:null, createTime:1301677813, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:_bucketname, type:string, comment:), FieldSchema(name:_offsets, type:array<bigint>, comment:)], location:hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/default__src_src_index_2__, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[Order(col:key, order:1)], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1301677813}, viewOriginalText:null, viewExpandedText:null, tableType:INDEX_TABLE)	
+
+
+create index src_index_3 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_3
+
+desc extended src_idx_src_index_3
+key	string	
+_bucketname	string	
+_offsets	array<bigint>	
+	 	 
+Detailed Table Information	Table(tableName:src_idx_src_index_3, dbName:default, owner:null, createTime:1301677813, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:_bucketname, type:string, comment:), FieldSchema(name:_offsets, type:array<bigint>, comment:)], location:hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/src_idx_src_index_3, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[Order(col:key, order:1)], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1301677813}, viewOriginalText:null, viewExpandedText:null, tableType:INDEX_TABLE)	
+
+
+create index src_index_4 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE
+
+desc extended default__src_src_index_4__
+key	string	
+_bucketname	string	
+_offsets	array<bigint>	
+	 	 
+Detailed Table Information	Table(tableName:default__src_src_index_4__, dbName:default, owner:null, createTime:1301677814, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:_bucketname, type:string, comment:), FieldSchema(name:_offsets, type:array<bigint>, comment:)], location:hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/default__src_src_index_4__, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=	, field.delim=
+
+
+create index src_index_5 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\'
+
+desc extended default__src_src_index_5__
+key	string	
+_bucketname	string	
+_offsets	array<bigint>	
+	 	 
+Detailed Table Information	Table(tableName:default__src_src_index_5__, dbName:default, owner:null, createTime:1301677814, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:_bucketname, type:string, comment:), FieldSchema(name:_offsets, type:array<bigint>, comment:)], location:hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/default__src_src_index_5__, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{escape.delim=\, serialization.format=	, field.delim=
+
+
+create index src_index_6 on table src(key) as 'compact' WITH DEFERRED REBUILD STORED AS RCFILE
+
+desc extended default__src_src_index_6__
+key	string	from deserializer
+_bucketname	string	from deserializer
+_offsets	array<bigint>	from deserializer
+	 	 
+Detailed Table Information	Table(tableName:default__src_src_index_6__, dbName:default, owner:null, createTime:1301677814, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:_bucketname, type:string, comment:), FieldSchema(name:_offsets, type:array<bigint>, comment:)], location:hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/default__src_src_index_6__, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[Order(col:key, order:1)], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1301677814}, viewOriginalText:null, viewExpandedText:null, tableType:INDEX_TABLE)	
+
+
+create index src_index_7 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_7 STORED AS RCFILE
+ 
+desc extended src_idx_src_index_7
+key	string	from deserializer
+_bucketname	string	from deserializer
+_offsets	array<bigint>	from deserializer
+	 	 
+Detailed Table Information	Table(tableName:src_idx_src_index_7, dbName:default, owner:null, createTime:1301677815, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:_bucketname, type:string, comment:), FieldSchema(name:_offsets, type:array<bigint>, comment:)], location:hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/src_idx_src_index_7, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[Order(col:key, order:1)], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1301677815}, viewOriginalText:null, viewExpandedText:null, tableType:INDEX_TABLE)	
+
+
+create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
+ 
+desc extended default__src_src_index_8__
+key	string	
+_bucketname	string	
+_offsets	array<bigint>	
+	 	 
+Detailed Table Information	Table(tableName:default__src_src_index_8__, dbName:default, owner:null, createTime:1301677815, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:_bucketname, type:string, comment:), FieldSchema(name:_offsets, type:array<bigint>, comment:)], location:hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/default__src_src_index_8__, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[Order(col:key, order:1)], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1301677815}, viewOriginalText:null, viewExpandedText:null, tableType:INDEX_TABLE)	
+
+
+create index src_index_9 on table src(key) as 'compact' WITH DEFERRED REBUILD TBLPROPERTIES ("prop1"="val1", "prop2"="val2")
+ 
+desc extended default__src_src_index_9__
+key	string	
+_bucketname	string	
+_offsets	array<bigint>	
+	 	 
+Detailed Table Information	Table(tableName:default__src_src_index_9__, dbName:default, owner:null, createTime:1301677815, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:_bucketname, type:string, comment:), FieldSchema(name:_offsets, type:array<bigint>, comment:)], location:hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/default__src_src_index_9__, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[Order(col:key, order:1)], parameters:{}), partitionKeys:[], parameters:{prop2=val2, prop1=val1, transient_lastDdlTime=1301677815}, viewOriginalText:null, viewExpandedText:null, tableType:INDEX_TABLE)	
+
+
+create table `_t`(`_i` int, `_j` int)
+
+create index x on table `_t`(`_j`) as 'compact' WITH DEFERRED REBUILD
+
+alter index x on `_t` rebuild
+Deleted hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/default___t_x__
+
+
+create index x2 on table `_t`(`_i`,`_j`) as 'compact' WITH DEFERRED
+REBUILD
+
+alter index x2 on `_t` rebuild
+Deleted hdfs://monster01.sf.cloudera.com:17020/user/hive/warehouse/default___t_x2__
+
+
+drop index src_index_2 on src
+
+drop index src_index_3 on src
+
+drop index src_index_4 on src
+
+drop index src_index_5 on src
+
+drop index src_index_6 on src
+
+drop index src_index_7 on src
+
+drop index src_index_8 on src
+
+drop index src_index_9 on src
+
+drop table `_t`

Added: incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/join19/in
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/join19/in?rev=1148501&view=auto
==============================================================================
--- incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/join19/in (added)
+++ incubator/bigtop/trunk/test/src/smokes/hive/src/test/resources/scripts/ql/join19/in Tue Jul 19 19:44:48 2011
@@ -0,0 +1,58 @@
+CREATE TABLE triples (foo string, subject string, predicate string, object string, foo2 string);
+
+EXPLAIN
+SELECT t11.subject, t22.object , t33.subject , t55.object, t66.object
+FROM
+(
+SELECT t1.subject
+FROM triples t1
+WHERE
+t1.predicate='http://sofa.semanticweb.org/sofa/v1.0/system#__INSTANCEOF_REL'
+AND
+t1.object='http://ontos/OntosMiner/Common.English/ontology#Citation'
+) t11
+JOIN
+(
+SELECT t2.subject , t2.object
+FROM triples t2
+WHERE
+t2.predicate='http://sofa.semanticweb.org/sofa/v1.0/system#__LABEL_REL'
+) t22
+ON (t11.subject=t22.subject)
+JOIN
+(
+SELECT t3.subject , t3.object
+FROM triples t3
+WHERE
+t3.predicate='http://www.ontosearch.com/2007/12/ontosofa-ns#_from'
+
+) t33
+ON (t11.subject=t33.object)
+JOIN
+(
+SELECT t4.subject
+FROM triples t4
+WHERE
+t4.predicate='http://sofa.semanticweb.org/sofa/v1.0/system#__INSTANCEOF_REL'
+AND
+t4.object='http://ontos/OntosMiner/Common.English/ontology#Author'
+
+) t44
+ON (t44.subject=t33.subject)
+JOIN
+(
+SELECT t5.subject, t5.object
+FROM triples t5
+WHERE
+t5.predicate='http://www.ontosearch.com/2007/12/ontosofa-ns#_to'
+) t55
+ON (t55.subject=t44.subject)
+JOIN
+(
+SELECT t6.subject, t6.object
+FROM triples t6
+WHERE
+t6.predicate='http://sofa.semanticweb.org/sofa/v1.0/system#__LABEL_REL'
+) t66
+ON (t66.subject=t55.object);
+



Mime
View raw message