hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vg...@apache.org
Subject hive git commit: HIVE-19186 : Multi Table INSERT statements query has a flaw for partitioned table when INSERT INTO and INSERT OVERWRITE are used (Steve Yeom via Ashutosh Chauhan)
Date Wed, 25 Apr 2018 03:15:16 GMT
Repository: hive
Updated Branches:
  refs/heads/branch-3 1a8fde196 -> 4db5b55a3


HIVE-19186 : Multi Table INSERT statements query has a flaw for partitioned table when INSERT
INTO and INSERT OVERWRITE are used (Steve Yeom via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <hashutosh@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4db5b55a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4db5b55a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4db5b55a

Branch: refs/heads/branch-3
Commit: 4db5b55a3f8ad41c3abe4585233cf3771b347514
Parents: 1a8fde19
Author: Steve Yeom <syeom@hortonworks.com>
Authored: Thu Apr 12 13:19:00 2018 -0700
Committer: Vineet Garg <vgarg@apache.org>
Committed: Tue Apr 24 20:15:10 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   5 +-
 .../clientpositive/multi_insert_partitioned.q   |  56 ++
 .../multi_insert_partitioned.q.out              | 573 +++++++++++++++++++
 3 files changed, 632 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4db5b55a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 88b5ed8..a00f927 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -7363,10 +7363,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         throw new SemanticException("Failed to allocate write Id", ex);
       }
       ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp, writeId);
+      // For the current context for generating File Sink Operator, it is either INSERT INTO
or INSERT OVERWRITE.
+      // So the next line works.
+      boolean isInsertInto = !qb.getParseInfo().isDestToOpTypeInsertOverwrite(dest);
       // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the
old
       // deltas and base and leave them up to the cleaner to clean up
-      boolean isInsertInto = qb.getParseInfo().isInsertIntoTable(
-          dest_tab.getDbName(), dest_tab.getTableName());
       LoadFileType loadType = (!isInsertInto && !destTableIsTransactional)
           ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING;
       ltd.setLoadFileType(loadType);

http://git-wip-us.apache.org/repos/asf/hive/blob/4db5b55a/ql/src/test/queries/clientpositive/multi_insert_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/multi_insert_partitioned.q b/ql/src/test/queries/clientpositive/multi_insert_partitioned.q
new file mode 100644
index 0000000..cd91c46
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/multi_insert_partitioned.q
@@ -0,0 +1,56 @@
+set hive.stats.column.autogather=false;
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+drop table intermediate;
+
+create table intermediate(key int) partitioned by (p int) stored as orc;
+insert into table intermediate partition(p='455') select distinct key from src where key
>= 0 order by key desc limit 2;
+insert into table intermediate partition(p='456') select distinct key from src where key
is not null order by key asc limit 2;
+insert into table intermediate partition(p='457') select distinct key from src where key
>= 100 order by key asc limit 2;
+
+drop table multi_partitioned;
+
+create table multi_partitioned (key int, key2 int) partitioned by (p int);
+
+from intermediate
+insert into table multi_partitioned partition(p=1) select p, key
+insert into table multi_partitioned partition(p=2) select key, p;
+
+select * from multi_partitioned order by key, key2, p;
+desc formatted multi_partitioned;
+
+from intermediate
+insert overwrite table multi_partitioned partition(p=2) select p, key
+insert overwrite table multi_partitioned partition(p=1) select key, p;
+
+select * from multi_partitioned order by key, key2, p;
+desc formatted multi_partitioned;
+
+from intermediate
+insert into table multi_partitioned partition(p=2) select p, key
+insert overwrite table multi_partitioned partition(p=1) select key, p;
+
+select * from multi_partitioned order by key, key2, p;
+desc formatted multi_partitioned;
+
+from intermediate
+insert into table multi_partitioned partition(p) select p, key, p
+insert into table multi_partitioned partition(p=1) select key, p;
+
+select key, key2, p from multi_partitioned order by key, key2, p;
+desc formatted multi_partitioned;
+
+from intermediate
+insert into table multi_partitioned partition(p) select p, key, 1
+insert into table multi_partitioned partition(p=1) select key, p;
+
+select key, key2, p from multi_partitioned order by key, key2, p;
+desc formatted multi_partitioned;
+
+drop table multi_partitioned;
+
+drop table intermediate;
+
+

http://git-wip-us.apache.org/repos/asf/hive/blob/4db5b55a/ql/src/test/results/clientpositive/multi_insert_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_partitioned.q.out b/ql/src/test/results/clientpositive/multi_insert_partitioned.q.out
new file mode 100644
index 0000000..bc68f05
--- /dev/null
+++ b/ql/src/test/results/clientpositive/multi_insert_partitioned.q.out
@@ -0,0 +1,573 @@
+PREHOOK: query: drop table intermediate
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table intermediate
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@intermediate
+POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@intermediate
+PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from
src where key >= 0 order by key desc limit 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@intermediate@p=455
+POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from
src where key >= 0 order by key desc limit 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@intermediate@p=455
+POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key,
type:string, comment:default), ]
+PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from
src where key is not null order by key asc limit 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@intermediate@p=456
+POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from
src where key is not null order by key asc limit 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@intermediate@p=456
+POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key,
type:string, comment:default), ]
+PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from
src where key >= 100 order by key asc limit 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@intermediate@p=457
+POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from
src where key >= 100 order by key asc limit 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@intermediate@p=457
+POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key,
type:string, comment:default), ]
+PREHOOK: query: drop table multi_partitioned
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table multi_partitioned
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table multi_partitioned (key int, key2 int) partitioned by (p int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@multi_partitioned
+POSTHOOK: query: create table multi_partitioned (key int, key2 int) partitioned by (p int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@multi_partitioned
+PREHOOK: query: from intermediate
+insert into table multi_partitioned partition(p=1) select p, key
+insert into table multi_partitioned partition(p=2) select key, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@intermediate
+PREHOOK: Input: default@intermediate@p=455
+PREHOOK: Input: default@intermediate@p=456
+PREHOOK: Input: default@intermediate@p=457
+PREHOOK: Output: default@multi_partitioned@p=1
+PREHOOK: Output: default@multi_partitioned@p=2
+POSTHOOK: query: from intermediate
+insert into table multi_partitioned partition(p=1) select p, key
+insert into table multi_partitioned partition(p=2) select key, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@intermediate
+POSTHOOK: Input: default@intermediate@p=455
+POSTHOOK: Input: default@intermediate@p=456
+POSTHOOK: Input: default@intermediate@p=457
+POSTHOOK: Output: default@multi_partitioned@p=1
+POSTHOOK: Output: default@multi_partitioned@p=2
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p,
type:int, comment:null), ]
+PREHOOK: query: select * from multi_partitioned order by key, key2, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@multi_partitioned
+PREHOOK: Input: default@multi_partitioned@p=1
+PREHOOK: Input: default@multi_partitioned@p=2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from multi_partitioned order by key, key2, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@multi_partitioned
+POSTHOOK: Input: default@multi_partitioned@p=1
+POSTHOOK: Input: default@multi_partitioned@p=2
+#### A masked pattern was here ####
+0	456	2
+10	456	2
+97	455	2
+98	455	2
+100	457	2
+103	457	2
+455	97	1
+455	98	1
+456	0	1
+456	10	1
+457	100	1
+457	103	1
+PREHOOK: query: desc formatted multi_partitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@multi_partitioned
+POSTHOOK: query: desc formatted multi_partitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@multi_partitioned
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+key2                	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+p                   	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	2                   
+	numPartitions       	2                   
+	numRows             	12                  
+	rawDataSize         	74                  
+	totalSize           	86                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: from intermediate
+insert overwrite table multi_partitioned partition(p=2) select p, key
+insert overwrite table multi_partitioned partition(p=1) select key, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@intermediate
+PREHOOK: Input: default@intermediate@p=455
+PREHOOK: Input: default@intermediate@p=456
+PREHOOK: Input: default@intermediate@p=457
+PREHOOK: Output: default@multi_partitioned@p=1
+PREHOOK: Output: default@multi_partitioned@p=2
+POSTHOOK: query: from intermediate
+insert overwrite table multi_partitioned partition(p=2) select p, key
+insert overwrite table multi_partitioned partition(p=1) select key, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@intermediate
+POSTHOOK: Input: default@intermediate@p=455
+POSTHOOK: Input: default@intermediate@p=456
+POSTHOOK: Input: default@intermediate@p=457
+POSTHOOK: Output: default@multi_partitioned@p=1
+POSTHOOK: Output: default@multi_partitioned@p=2
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key,
type:int, comment:null), ]
+PREHOOK: query: select * from multi_partitioned order by key, key2, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@multi_partitioned
+PREHOOK: Input: default@multi_partitioned@p=1
+PREHOOK: Input: default@multi_partitioned@p=2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from multi_partitioned order by key, key2, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@multi_partitioned
+POSTHOOK: Input: default@multi_partitioned@p=1
+POSTHOOK: Input: default@multi_partitioned@p=2
+#### A masked pattern was here ####
+0	456	1
+10	456	1
+97	455	1
+98	455	1
+100	457	1
+103	457	1
+455	97	2
+455	98	2
+456	0	2
+456	10	2
+457	100	2
+457	103	2
+PREHOOK: query: desc formatted multi_partitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@multi_partitioned
+POSTHOOK: query: desc formatted multi_partitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@multi_partitioned
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+key2                	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+p                   	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	2                   
+	numPartitions       	2                   
+	numRows             	12                  
+	rawDataSize         	74                  
+	totalSize           	86                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: from intermediate
+insert into table multi_partitioned partition(p=2) select p, key
+insert overwrite table multi_partitioned partition(p=1) select key, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@intermediate
+PREHOOK: Input: default@intermediate@p=455
+PREHOOK: Input: default@intermediate@p=456
+PREHOOK: Input: default@intermediate@p=457
+PREHOOK: Output: default@multi_partitioned@p=1
+PREHOOK: Output: default@multi_partitioned@p=2
+POSTHOOK: query: from intermediate
+insert into table multi_partitioned partition(p=2) select p, key
+insert overwrite table multi_partitioned partition(p=1) select key, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@intermediate
+POSTHOOK: Input: default@intermediate@p=455
+POSTHOOK: Input: default@intermediate@p=456
+POSTHOOK: Input: default@intermediate@p=457
+POSTHOOK: Output: default@multi_partitioned@p=1
+POSTHOOK: Output: default@multi_partitioned@p=2
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key,
type:int, comment:null), ]
+PREHOOK: query: select * from multi_partitioned order by key, key2, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@multi_partitioned
+PREHOOK: Input: default@multi_partitioned@p=1
+PREHOOK: Input: default@multi_partitioned@p=2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from multi_partitioned order by key, key2, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@multi_partitioned
+POSTHOOK: Input: default@multi_partitioned@p=1
+POSTHOOK: Input: default@multi_partitioned@p=2
+#### A masked pattern was here ####
+0	456	1
+10	456	1
+97	455	1
+98	455	1
+100	457	1
+103	457	1
+455	97	2
+455	97	2
+455	98	2
+455	98	2
+456	0	2
+456	0	2
+456	10	2
+456	10	2
+457	100	2
+457	100	2
+457	103	2
+457	103	2
+PREHOOK: query: desc formatted multi_partitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@multi_partitioned
+POSTHOOK: query: desc formatted multi_partitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@multi_partitioned
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+key2                	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+p                   	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	3                   
+	numPartitions       	2                   
+	numRows             	18                  
+	rawDataSize         	111                 
+	totalSize           	129                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: from intermediate
+insert into table multi_partitioned partition(p) select p, key, p
+insert into table multi_partitioned partition(p=1) select key, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@intermediate
+PREHOOK: Input: default@intermediate@p=455
+PREHOOK: Input: default@intermediate@p=456
+PREHOOK: Input: default@intermediate@p=457
+PREHOOK: Output: default@multi_partitioned
+PREHOOK: Output: default@multi_partitioned@p=1
+POSTHOOK: query: from intermediate
+insert into table multi_partitioned partition(p) select p, key, p
+insert into table multi_partitioned partition(p=1) select key, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@intermediate
+POSTHOOK: Input: default@intermediate@p=455
+POSTHOOK: Input: default@intermediate@p=456
+POSTHOOK: Input: default@intermediate@p=457
+POSTHOOK: Output: default@multi_partitioned@p=1
+POSTHOOK: Output: default@multi_partitioned@p=455
+POSTHOOK: Output: default@multi_partitioned@p=456
+POSTHOOK: Output: default@multi_partitioned@p=457
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=455).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=456).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=457).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key,
type:int, comment:null), ]
+PREHOOK: query: select key, key2, p from multi_partitioned order by key, key2, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@multi_partitioned
+PREHOOK: Input: default@multi_partitioned@p=1
+PREHOOK: Input: default@multi_partitioned@p=2
+PREHOOK: Input: default@multi_partitioned@p=455
+PREHOOK: Input: default@multi_partitioned@p=456
+PREHOOK: Input: default@multi_partitioned@p=457
+#### A masked pattern was here ####
+POSTHOOK: query: select key, key2, p from multi_partitioned order by key, key2, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@multi_partitioned
+POSTHOOK: Input: default@multi_partitioned@p=1
+POSTHOOK: Input: default@multi_partitioned@p=2
+POSTHOOK: Input: default@multi_partitioned@p=455
+POSTHOOK: Input: default@multi_partitioned@p=456
+POSTHOOK: Input: default@multi_partitioned@p=457
+#### A masked pattern was here ####
+0	456	1
+0	456	1
+10	456	1
+10	456	1
+97	455	1
+97	455	1
+98	455	1
+98	455	1
+100	457	1
+100	457	1
+103	457	1
+103	457	1
+455	97	2
+455	97	2
+455	97	455
+455	98	2
+455	98	2
+455	98	455
+456	0	2
+456	0	2
+456	0	456
+456	10	2
+456	10	2
+456	10	456
+457	100	2
+457	100	2
+457	100	457
+457	103	2
+457	103	2
+457	103	457
+PREHOOK: query: desc formatted multi_partitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@multi_partitioned
+POSTHOOK: query: desc formatted multi_partitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@multi_partitioned
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+key2                	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+p                   	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	7                   
+	numPartitions       	5                   
+	numRows             	30                  
+	rawDataSize         	185                 
+	totalSize           	215                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: from intermediate
+insert into table multi_partitioned partition(p) select p, key, 1
+insert into table multi_partitioned partition(p=1) select key, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@intermediate
+PREHOOK: Input: default@intermediate@p=455
+PREHOOK: Input: default@intermediate@p=456
+PREHOOK: Input: default@intermediate@p=457
+PREHOOK: Output: default@multi_partitioned
+PREHOOK: Output: default@multi_partitioned@p=1
+POSTHOOK: query: from intermediate
+insert into table multi_partitioned partition(p) select p, key, 1
+insert into table multi_partitioned partition(p=1) select key, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@intermediate
+POSTHOOK: Input: default@intermediate@p=455
+POSTHOOK: Input: default@intermediate@p=456
+POSTHOOK: Input: default@intermediate@p=457
+POSTHOOK: Output: default@multi_partitioned@p=1
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key,
type:int, comment:null), ]
+POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p,
type:int, comment:null), ]
+PREHOOK: query: select key, key2, p from multi_partitioned order by key, key2, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@multi_partitioned
+PREHOOK: Input: default@multi_partitioned@p=1
+PREHOOK: Input: default@multi_partitioned@p=2
+PREHOOK: Input: default@multi_partitioned@p=455
+PREHOOK: Input: default@multi_partitioned@p=456
+PREHOOK: Input: default@multi_partitioned@p=457
+#### A masked pattern was here ####
+POSTHOOK: query: select key, key2, p from multi_partitioned order by key, key2, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@multi_partitioned
+POSTHOOK: Input: default@multi_partitioned@p=1
+POSTHOOK: Input: default@multi_partitioned@p=2
+POSTHOOK: Input: default@multi_partitioned@p=455
+POSTHOOK: Input: default@multi_partitioned@p=456
+POSTHOOK: Input: default@multi_partitioned@p=457
+#### A masked pattern was here ####
+0	456	1
+0	456	1
+0	456	1
+10	456	1
+10	456	1
+10	456	1
+97	455	1
+97	455	1
+97	455	1
+98	455	1
+98	455	1
+98	455	1
+100	457	1
+100	457	1
+100	457	1
+103	457	1
+103	457	1
+103	457	1
+455	97	1
+455	97	2
+455	97	2
+455	97	455
+455	98	1
+455	98	2
+455	98	2
+455	98	455
+456	0	1
+456	0	2
+456	0	2
+456	0	456
+456	10	1
+456	10	2
+456	10	2
+456	10	456
+457	100	1
+457	100	2
+457	100	2
+457	100	457
+457	103	1
+457	103	2
+457	103	2
+457	103	457
+PREHOOK: query: desc formatted multi_partitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@multi_partitioned
+POSTHOOK: query: desc formatted multi_partitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@multi_partitioned
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+key2                	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+p                   	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	9                   
+	numPartitions       	5                   
+	numRows             	42                  
+	rawDataSize         	259                 
+	totalSize           	301                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table multi_partitioned
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@multi_partitioned
+PREHOOK: Output: default@multi_partitioned
+POSTHOOK: query: drop table multi_partitioned
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@multi_partitioned
+POSTHOOK: Output: default@multi_partitioned
+PREHOOK: query: drop table intermediate
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@intermediate
+PREHOOK: Output: default@intermediate
+POSTHOOK: query: drop table intermediate
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@intermediate
+POSTHOOK: Output: default@intermediate


Mime
View raw message