From commits-return-33622-archive-asf-public=cust-asf.ponee.io@hive.apache.org Mon May 21 22:24:52 2018 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx-eu-01.ponee.io (Postfix) with SMTP id DDFD91807A1 for ; Mon, 21 May 2018 22:24:47 +0200 (CEST) Received: (qmail 45420 invoked by uid 500); 21 May 2018 20:24:41 -0000 Mailing-List: contact commits-help@hive.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hive-dev@hive.apache.org Delivered-To: mailing list commits@hive.apache.org Received: (qmail 42236 invoked by uid 99); 21 May 2018 20:24:39 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 21 May 2018 20:24:39 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 0466CE111B; Mon, 21 May 2018 20:24:39 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: jcamacho@apache.org To: commits@hive.apache.org Date: Mon, 21 May 2018 20:25:15 -0000 Message-Id: In-Reply-To: <1dae71ecb5bd4b56b5325bc61b2494d2@git.apache.org> References: <1dae71ecb5bd4b56b5325bc61b2494d2@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [38/51] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner) http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q index 705dfdc..32c2854 100644 --- a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q +++ b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q @@ -16,67 +16,67 @@ set hive.stats.fetch.column.stats=true; set hive.tez.bloom.filter.factor=1.0f; -- Create Tables -create table alltypesorc_int ( cint int, cstring string ) stored as ORC; -create table srcpart_date (key string, value string) partitioned by (ds string ) stored as ORC; -CREATE TABLE srcpart_small(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; +create table alltypesorc_int_n1 ( cint int, cstring string ) stored as ORC; +create table srcpart_date_n7 (key string, value string) partitioned by (ds string ) stored as ORC; +CREATE TABLE srcpart_small_n3(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; -- Add Partitions -alter table srcpart_date add partition (ds = "2008-04-08"); -alter table srcpart_date add partition (ds = "2008-04-09"); +alter table srcpart_date_n7 add partition (ds = "2008-04-08"); +alter table srcpart_date_n7 add partition (ds = "2008-04-09"); -alter table srcpart_small add partition (ds = "2008-04-08"); -alter table srcpart_small add partition (ds = "2008-04-09"); +alter table srcpart_small_n3 add partition (ds = "2008-04-08"); +alter table srcpart_small_n3 add partition (ds = "2008-04-09"); -- Load -insert overwrite table alltypesorc_int select cint, cstring1 from alltypesorc; -insert overwrite table srcpart_date partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; -insert overwrite table srcpart_date partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; -insert overwrite table srcpart_small partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; +insert overwrite table alltypesorc_int_n1 select cint, cstring1 from alltypesorc; +insert overwrite table srcpart_date_n7 partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; +insert overwrite table srcpart_date_n7 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; +insert overwrite table srcpart_small_n3 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; set hive.tez.dynamic.semijoin.reduction=false; -analyze table alltypesorc_int compute statistics for columns; -analyze table srcpart_date compute statistics for columns; -analyze table srcpart_small compute statistics for columns; +analyze table alltypesorc_int_n1 compute statistics for columns; +analyze table srcpart_date_n7 compute statistics for columns; +analyze table srcpart_small_n3 compute statistics for columns; -- single column, single key -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); set hive.tez.dynamic.semijoin.reduction=true; -- Mix dynamic partition pruning(DPP) and min/max bloom filter optimizations. Should pick the DPP. -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.ds); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.ds); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.ds); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.ds); set hive.tez.dynamic.semijoin.reduction=false; --multiple sources, single key -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_small_n3.key1 = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_small_n3.key1 = alltypesorc_int_n1.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_small_n3.key1 = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_small_n3.key1 = alltypesorc_int_n1.cstring); set hive.tez.dynamic.semijoin.reduction=false; -- single source, multiple keys -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1 and srcpart_date_n7.value = srcpart_small_n3.value1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1 and srcpart_date_n7.value = srcpart_small_n3.value1); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1 and srcpart_date_n7.value = srcpart_small_n3.value1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1 and srcpart_date_n7.value = srcpart_small_n3.value1); set hive.tez.dynamic.semijoin.reduction=false; -- multiple sources, different keys -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); -- Explain extended to verify fast start for Reducer in semijoin branch -EXPLAIN extended select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN extended select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); set hive.tez.dynamic.semijoin.reduction=false; -- With Mapjoins, there shouldn't be any semijoin parallel to mapjoin. @@ -84,64 +84,64 @@ set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=100000000000; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); set hive.tez.dynamic.semijoin.reduction.for.mapjoin=true; -- Enable semijoin parallel to mapjoins. -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); set hive.tez.dynamic.semijoin.reduction.for.mapjoin=false; set hive.tez.dynamic.semijoin.reduction=false; -- multiple sources, different keys -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); set hive.tez.dynamic.semijoin.reduction.for.mapjoin=true; -- Enable semijoin parallel to mapjoins. -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); set hive.tez.dynamic.semijoin.reduction.for.mapjoin=false; -- HIVE-17323 - with DPP, the 1st mapjoin is on a map with DPP and 2nd mapjoin is on a map which had semijoin but still removed. -create table alltypesorc_int40 as select * from alltypesorc_int limit 40; +create table alltypesorc_int40 as select * from alltypesorc_int_n1 limit 40; set hive.tez.dynamic.semijoin.reduction=false; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.ds = srcpart_small.ds) join alltypesorc_int40 on (srcpart_date.value = alltypesorc_int40.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.ds = srcpart_small.ds) join alltypesorc_int40 on (srcpart_date.value = alltypesorc_int40.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.ds = srcpart_small_n3.ds) join alltypesorc_int40 on (srcpart_date_n7.value = alltypesorc_int40.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.ds = srcpart_small_n3.ds) join alltypesorc_int40 on (srcpart_date_n7.value = alltypesorc_int40.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.ds = srcpart_small.ds) join alltypesorc_int40 on (srcpart_date.value = alltypesorc_int40.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.ds = srcpart_small.ds) join alltypesorc_int40 on (srcpart_date.value = alltypesorc_int40.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.ds = srcpart_small_n3.ds) join alltypesorc_int40 on (srcpart_date_n7.value = alltypesorc_int40.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.ds = srcpart_small_n3.ds) join alltypesorc_int40 on (srcpart_date_n7.value = alltypesorc_int40.cstring); -- HIVE-17399 -create table srcpart_small10 as select * from srcpart_small limit 10; +create table srcpart_small10 as select * from srcpart_small_n3 limit 10; analyze table srcpart_small10 compute statistics for columns; set hive.tez.dynamic.semijoin.reduction=false; -EXPLAIN select count(*) from srcpart_small10, srcpart_small, srcpart_date where srcpart_small.key1 = srcpart_small10.key1 and srcpart_date.ds = srcpart_small.ds; -select count(*) from srcpart_small10, srcpart_small, srcpart_date where srcpart_small.key1 = srcpart_small10.key1 and srcpart_date.ds = srcpart_small.ds; +EXPLAIN select count(*) from srcpart_small10, srcpart_small_n3, srcpart_date_n7 where srcpart_small_n3.key1 = srcpart_small10.key1 and srcpart_date_n7.ds = srcpart_small_n3.ds; +select count(*) from srcpart_small10, srcpart_small_n3, srcpart_date_n7 where srcpart_small_n3.key1 = srcpart_small10.key1 and srcpart_date_n7.ds = srcpart_small_n3.ds; set hive.tez.dynamic.semijoin.reduction=true; set hive.llap.object.cache.enabled=false; -EXPLAIN select count(*) from srcpart_small10, srcpart_small, srcpart_date where srcpart_small.key1 = srcpart_small10.key1 and srcpart_date.ds = srcpart_small.ds; -select count(*) from srcpart_small10, srcpart_small, srcpart_date where srcpart_small.key1 = srcpart_small10.key1 and srcpart_date.ds = srcpart_small.ds; +EXPLAIN select count(*) from srcpart_small10, srcpart_small_n3, srcpart_date_n7 where srcpart_small_n3.key1 = srcpart_small10.key1 and srcpart_date_n7.ds = srcpart_small_n3.ds; +select count(*) from srcpart_small10, srcpart_small_n3, srcpart_date_n7 where srcpart_small_n3.key1 = srcpart_small10.key1 and srcpart_date_n7.ds = srcpart_small_n3.ds; -- HIVE-17936 set hive.tez.dynamic.semijoin.reduction.for.dpp.factor = 0.75; -EXPLAIN select count(*) from srcpart_small10, srcpart_small, srcpart_date where srcpart_small.key1 = srcpart_small10.key1 and srcpart_date.ds = srcpart_small.ds; +EXPLAIN select count(*) from srcpart_small10, srcpart_small_n3, srcpart_date_n7 where srcpart_small_n3.key1 = srcpart_small10.key1 and srcpart_date_n7.ds = srcpart_small_n3.ds; -- semijoin branch should be removed. set hive.tez.dynamic.semijoin.reduction.for.dpp.factor = 0.4; -EXPLAIN select count(*) from srcpart_small10, srcpart_small, srcpart_date where srcpart_small.key1 = srcpart_small10.key1 and srcpart_date.ds = srcpart_small.ds; +EXPLAIN select count(*) from srcpart_small10, srcpart_small_n3, srcpart_date_n7 where srcpart_small_n3.key1 = srcpart_small10.key1 and srcpart_date_n7.ds = srcpart_small_n3.ds; -- With unions -explain select * from alltypesorc_int join - (select srcpart_date.key as key from srcpart_date +explain select * from alltypesorc_int_n1 join + (select srcpart_date_n7.key as key from srcpart_date_n7 union all - select srcpart_small.key1 as key from srcpart_small) unionsrc on (alltypesorc_int.cstring = unionsrc.key); + select srcpart_small_n3.key1 as key from srcpart_small_n3) unionsrc on (alltypesorc_int_n1.cstring = unionsrc.key); -drop table srcpart_date; -drop table srcpart_small; -drop table alltypesorc_int; +drop table srcpart_date_n7; +drop table srcpart_small_n3; +drop table alltypesorc_int_n1; http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_2.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_2.q b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_2.q index 044ce5a..20b657e 100644 --- a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_2.q +++ b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_2.q @@ -12,14 +12,14 @@ set hive.tez.bigtable.minsize.semijoin.reduction=1; set hive.tez.min.bloom.filter.entries=1; set hive.tez.dynamic.semijoin.reduction.threshold=-999999999999; -CREATE TABLE `table_1`( +CREATE TABLE `table_1_n2`( `bigint_col_7` bigint, `decimal2016_col_26` decimal(20,16), `tinyint_col_3` tinyint, `decimal2612_col_77` decimal(26,12), `timestamp_col_9` timestamp); -CREATE TABLE `table_18`( +CREATE TABLE `table_18_n2`( `tinyint_col_15` tinyint, `decimal2709_col_9` decimal(27,9), `tinyint_col_20` tinyint, @@ -31,29 +31,29 @@ CREATE TABLE `table_18`( EXPLAIN SELECT COUNT(*) -FROM table_1 t1 +FROM table_1_n2 t1 -INNER JOIN table_18 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND +INNER JOIN table_18_n2 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND ((t2.decimal2709_col_9) = (t1.decimal2016_col_26))) AND ((t2.tinyint_col_20) = (t1.tinyint_col_3)) WHERE (t2.smallint_col_19) IN (SELECT COALESCE(-92, -994) AS int_col -FROM table_1 tt1 -INNER JOIN table_18 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) +FROM table_1_n2 tt1 +INNER JOIN table_18_n2 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) WHERE (t1.timestamp_col_9) = (tt2.timestamp_col_18)); -drop table table_1; -drop table table_18; +drop table table_1_n2; +drop table table_18_n2; -- Hive 15699 -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n20(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE src2 as select * from src1; -insert into src2 select * from src2; -insert into src2 select * from src2; +CREATE TABLE src2_n7 as select * from src1; +insert into src2_n7 select * from src2_n7; +insert into src2_n7 select * from src2_n7; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n20 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n20 partition(ds='2008-04-08'); set hive.strict.checks.bucketing=false; set hive.join.emit.interval=2; @@ -61,9 +61,9 @@ set hive.stats.fetch.column.stats=true; set hive.optimize.bucketingsorting=false; set hive.stats.autogather=true; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n12(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab_n12 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n20; set hive.convert.join.bucket.mapjoin.tez = true; set hive.auto.convert.sortmerge.join = true; @@ -74,20 +74,20 @@ set hive.mapjoin.hybridgrace.minnumpartitions=4; set hive.llap.memory.oversubscription.max.executors.per.query=3; -CREATE TABLE tab2 (key int, value string, ds string); +CREATE TABLE tab2_n6 (key int, value string, ds string); set hive.exec.dynamic.partition.mode=nonstrict -insert into tab2select key, value, ds from tab; -analyze table tab2 compute statistics; -analyze table tab2 compute statistics for columns; +insert into tab2select key, value, ds from tab_n12; +analyze table tab2_n6 compute statistics; +analyze table tab2_n6 compute statistics for columns; explain select count(*) from - (select x.key as key, min(x.value) as value from tab2 x group by x.key) a + (select x.key as key, min(x.value) as value from tab2_n6 x group by x.key) a join - (select x.key as key, min(x.value) as value from tab2 x group by x.key) b + (select x.key as key, min(x.value) as value from tab2_n6 x group by x.key) b on a.key = b.key join src1 c on a.value = c.value where c.key < 0; http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_4.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_4.q b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_4.q index 67bf7c8..a04ab66 100644 --- a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_4.q +++ b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_4.q @@ -16,17 +16,17 @@ set hive.tez.bloom.filter.factor=1.0f; set hive.disable.unsafe.external.table.operations=true; -- Create Tables -create table srcpart_date (key string, value string) partitioned by (ds string ) stored as ORC; -CREATE TABLE srcpart_small(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; +create table srcpart_date_n1 (key string, value string) partitioned by (ds string ) stored as ORC; +CREATE TABLE srcpart_small_n0(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; create external table srcpart_date_ext (key string, value string) partitioned by (ds string ) stored as ORC; CREATE external TABLE srcpart_small_ext(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; -- Add Partitions -alter table srcpart_date add partition (ds = "2008-04-08"); -alter table srcpart_date add partition (ds = "2008-04-09"); +alter table srcpart_date_n1 add partition (ds = "2008-04-08"); +alter table srcpart_date_n1 add partition (ds = "2008-04-09"); -alter table srcpart_small add partition (ds = "2008-04-08"); -alter table srcpart_small add partition (ds = "2008-04-09"); +alter table srcpart_small_n0 add partition (ds = "2008-04-08"); +alter table srcpart_small_n0 add partition (ds = "2008-04-09"); alter table srcpart_date_ext add partition (ds = "2008-04-08"); alter table srcpart_date_ext add partition (ds = "2008-04-09"); @@ -35,16 +35,16 @@ alter table srcpart_small_ext add partition (ds = "2008-04-08"); alter table srcpart_small_ext add partition (ds = "2008-04-09"); -- Load -insert overwrite table srcpart_date partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; -insert overwrite table srcpart_date partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; -insert overwrite table srcpart_small partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; +insert overwrite table srcpart_date_n1 partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; +insert overwrite table srcpart_date_n1 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; +insert overwrite table srcpart_small_n0 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; insert overwrite table srcpart_date_ext partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; insert overwrite table srcpart_date_ext partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; insert overwrite table srcpart_small_ext partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; -analyze table srcpart_date compute statistics for columns; -analyze table srcpart_small compute statistics for columns; +analyze table srcpart_date_n1 compute statistics for columns; +analyze table srcpart_small_n0 compute statistics for columns; analyze table srcpart_date_ext compute statistics for columns; analyze table srcpart_small_ext compute statistics for columns; @@ -53,13 +53,13 @@ analyze table srcpart_small_ext compute statistics for columns; -- single column, single key set test.comment=This query should use semijoin reduction optimization; set test.comment; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n1 join srcpart_small_n0 on (srcpart_date_n1.key = srcpart_small_n0.key1); set test.comment=Big table is external table - no semijoin reduction opt; set test.comment; -EXPLAIN select count(*) from srcpart_date_ext join srcpart_small on (srcpart_date_ext.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_ext join srcpart_small_n0 on (srcpart_date_ext.key = srcpart_small_n0.key1); set test.comment=Small table is external table - no semijoin reduction opt; set test.comment; -EXPLAIN select count(*) from srcpart_date join srcpart_small_ext on (srcpart_date.key = srcpart_small_ext.key1); +EXPLAIN select count(*) from srcpart_date_n1 join srcpart_small_ext on (srcpart_date_n1.key = srcpart_small_ext.key1); http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_sw.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_sw.q b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_sw.q index 3372cee..e4ca184 100644 --- a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_sw.q +++ b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_sw.q @@ -15,48 +15,48 @@ set hive.tez.min.bloom.filter.entries=1; set hive.stats.fetch.column.stats=true; -- Create Tables -create table alltypesorc_int ( cint int, cstring string ) stored as ORC; -create table srcpart_date (key string, value string) partitioned by (ds string ) stored as ORC; -CREATE TABLE srcpart_small(key1 STRING, value1 STRING) partitioned by (ds1 string) STORED as ORC; +create table alltypesorc_int_n0 ( cint int, cstring string ) stored as ORC; +create table srcpart_date_n6 (key string, value string) partitioned by (ds string ) stored as ORC; +CREATE TABLE srcpart_small_n2(key1 STRING, value1 STRING) partitioned by (ds1 string) STORED as ORC; -- Add Partitions -alter table srcpart_date add partition (ds = "2008-04-08"); -alter table srcpart_date add partition (ds = "2008-04-09"); +alter table srcpart_date_n6 add partition (ds = "2008-04-08"); +alter table srcpart_date_n6 add partition (ds = "2008-04-09"); -alter table srcpart_small add partition (ds1 = "2008-04-08"); -alter table srcpart_small add partition (ds1 = "2008-04-09"); +alter table srcpart_small_n2 add partition (ds1 = "2008-04-08"); +alter table srcpart_small_n2 add partition (ds1 = "2008-04-09"); -- Load -insert overwrite table alltypesorc_int select cint, cstring1 from alltypesorc; -insert overwrite table srcpart_date partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; -insert overwrite table srcpart_date partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; -insert overwrite table srcpart_small partition (ds1 = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; +insert overwrite table alltypesorc_int_n0 select cint, cstring1 from alltypesorc; +insert overwrite table srcpart_date_n6 partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; +insert overwrite table srcpart_date_n6 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; +insert overwrite table srcpart_small_n2 partition (ds1 = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; set hive.tez.dynamic.semijoin.reduction=false; -analyze table alltypesorc_int compute statistics for columns; -analyze table srcpart_date compute statistics for columns; -analyze table srcpart_small compute statistics for columns; +analyze table alltypesorc_int_n0 compute statistics for columns; +analyze table srcpart_date_n6 compute statistics for columns; +analyze table srcpart_small_n2 compute statistics for columns; set hive.tez.dynamic.semijoin.reduction=true; EXPLAIN SELECT count(*) FROM ( SELECT * - FROM (SELECT * FROM srcpart_date WHERE ds = "2008-04-09") `srcpart_date` - JOIN (SELECT * FROM srcpart_small WHERE ds1 = "2008-04-08") `srcpart_small` - ON (srcpart_date.key = srcpart_small.key1) - JOIN alltypesorc_int - ON (srcpart_small.key1 = alltypesorc_int.cstring)) a + FROM (SELECT * FROM srcpart_date_n6 WHERE ds = "2008-04-09") `srcpart_date_n6` + JOIN (SELECT * FROM srcpart_small_n2 WHERE ds1 = "2008-04-08") `srcpart_small_n2` + ON (srcpart_date_n6.key = srcpart_small_n2.key1) + JOIN alltypesorc_int_n0 + ON (srcpart_small_n2.key1 = alltypesorc_int_n0.cstring)) a JOIN ( SELECT * - FROM (SELECT * FROM srcpart_date WHERE ds = "2008-04-08") `srcpart_date` - JOIN (SELECT * FROM srcpart_small WHERE ds1 = "2008-04-08") `srcpart_small` - ON (srcpart_date.key = srcpart_small.key1) - JOIN alltypesorc_int - ON (srcpart_small.key1 = alltypesorc_int.cstring)) b + FROM (SELECT * FROM srcpart_date_n6 WHERE ds = "2008-04-08") `srcpart_date_n6` + JOIN (SELECT * FROM srcpart_small_n2 WHERE ds1 = "2008-04-08") `srcpart_small_n2` + ON (srcpart_date_n6.key = srcpart_small_n2.key1) + JOIN alltypesorc_int_n0 + ON (srcpart_small_n2.key1 = alltypesorc_int_n0.cstring)) b ON ('1' = '1'); -drop table srcpart_date; -drop table srcpart_small; -drop table alltypesorc_int; +drop table srcpart_date_n6; +drop table srcpart_small_n2; +drop table alltypesorc_int_n0; http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/dynamic_semijoin_user_level.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/dynamic_semijoin_user_level.q b/ql/src/test/queries/clientpositive/dynamic_semijoin_user_level.q index e9d0109..5e3a1b8 100644 --- a/ql/src/test/queries/clientpositive/dynamic_semijoin_user_level.q +++ b/ql/src/test/queries/clientpositive/dynamic_semijoin_user_level.q @@ -17,65 +17,65 @@ set hive.stats.fetch.column.stats=true; set hive.llap.memory.oversubscription.max.executors.per.query=0; -- Create Tables -create table alltypesorc_int ( cint int, cstring string ) stored as ORC; -create table srcpart_date (key string, value string) partitioned by (ds string ) stored as ORC; -CREATE TABLE srcpart_small(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; +create table alltypesorc_int_n2 ( cint int, cstring string ) stored as ORC; +create table srcpart_date_n9 (key string, value string) partitioned by (ds string ) stored as ORC; +CREATE TABLE srcpart_small_n4(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; -- Add Partitions -alter table srcpart_date add partition (ds = "2008-04-08"); -alter table srcpart_date add partition (ds = "2008-04-09"); +alter table srcpart_date_n9 add partition (ds = "2008-04-08"); +alter table srcpart_date_n9 add partition (ds = "2008-04-09"); -alter table srcpart_small add partition (ds = "2008-04-08"); -alter table srcpart_small add partition (ds = "2008-04-09"); +alter table srcpart_small_n4 add partition (ds = "2008-04-08"); +alter table srcpart_small_n4 add partition (ds = "2008-04-09"); -- Load -insert overwrite table alltypesorc_int select cint, cstring1 from alltypesorc; -insert overwrite table srcpart_date partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; -insert overwrite table srcpart_date partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; -insert overwrite table srcpart_small partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; +insert overwrite table alltypesorc_int_n2 select cint, cstring1 from alltypesorc; +insert overwrite table srcpart_date_n9 partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; +insert overwrite table srcpart_date_n9 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; +insert overwrite table srcpart_small_n4 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; set hive.tez.dynamic.semijoin.reduction=false; -analyze table alltypesorc_int compute statistics for columns; -analyze table srcpart_date compute statistics for columns; -analyze table srcpart_small compute statistics for columns; +analyze table alltypesorc_int_n2 compute statistics for columns; +analyze table srcpart_date_n9 compute statistics for columns; +analyze table srcpart_small_n4 compute statistics for columns; -- single column, single key set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); set hive.tez.dynamic.semijoin.reduction=true; -- Mix dynamic partition pruning(DPP) and min/max bloom filter optimizations. Should pick the DPP. -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.ds); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.ds); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.ds); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.ds); set hive.tez.dynamic.semijoin.reduction=false; --multiple sources, single key -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_small_n4.key1 = alltypesorc_int_n2.cstring); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_small_n4.key1 = alltypesorc_int_n2.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_small_n4.key1 = alltypesorc_int_n2.cstring); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_small_n4.key1 = alltypesorc_int_n2.cstring); set hive.tez.dynamic.semijoin.reduction=false; -- single source, multiple keys -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1 and srcpart_date_n9.value = srcpart_small_n4.value1); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1 and srcpart_date_n9.value = srcpart_small_n4.value1); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1 and srcpart_date_n9.value = srcpart_small_n4.value1); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1 and srcpart_date_n9.value = srcpart_small_n4.value1); set hive.tez.dynamic.semijoin.reduction=false; -- multiple sources, different keys -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); -- Explain extended to verify fast start for Reducer in semijoin branch -EXPLAIN extended select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN extended select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); set hive.tez.dynamic.semijoin.reduction=false; -- With Mapjoins. @@ -83,28 +83,28 @@ set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=100000000000; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); set hive.tez.dynamic.semijoin.reduction=false; -- multiple sources, different keys -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); --set hive.tez.dynamic.semijoin.reduction=false; -- With unions -explain select * from alltypesorc_int join - (select srcpart_date.key as key from srcpart_date +explain select * from alltypesorc_int_n2 join + (select srcpart_date_n9.key as key from srcpart_date_n9 union all - select srcpart_small.key1 as key from srcpart_small) unionsrc on (alltypesorc_int.cstring = unionsrc.key); + select srcpart_small_n4.key1 as key from srcpart_small_n4) unionsrc on (alltypesorc_int_n2.cstring = unionsrc.key); -drop table srcpart_date; -drop table srcpart_small; -drop table alltypesorc_int; +drop table srcpart_date_n9; +drop table srcpart_small_n4; +drop table alltypesorc_int_n2; http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q b/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q index af9853c..f2f36d1 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q @@ -19,9 +19,9 @@ set hive.exec.dynamic.partition.mode=nonstrict; -drop table t1; +drop table t1_n147; -create table t1( +create table t1_n147( a string, b int, c int, @@ -30,18 +30,18 @@ partitioned by (e string) clustered by(a) sorted by(a desc) into 10 buckets stored as textfile; -insert overwrite table t1 partition(e) select a,b,c,d,'epart' from t1_staging; +insert overwrite table t1_n147 partition(e) select a,b,c,d,'epart' from t1_staging; select 'bucket_0'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000000_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000000_0; select 'bucket_2'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000002_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000002_0; select 'bucket_4'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000004_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000004_0; select 'bucket_6'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000006_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000006_0; select 'bucket_8'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000008_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000008_0; set hive.optimize.sort.dynamic.partition=false; set hive.exec.dynamic.partition.mode=nonstrict; @@ -49,9 +49,9 @@ set hive.exec.dynamic.partition.mode=nonstrict; -- disable sorted dynamic partition optimization to make sure the results are correct -drop table t1; +drop table t1_n147; -create table t1( +create table t1_n147( a string, b int, c int, @@ -60,15 +60,15 @@ partitioned by (e string) clustered by(a) sorted by(a desc) into 10 buckets stored as textfile; -insert overwrite table t1 partition(e) select a,b,c,d,'epart' from t1_staging; +insert overwrite table t1_n147 partition(e) select a,b,c,d,'epart' from t1_staging; select 'bucket_0'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000000_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000000_0; select 'bucket_2'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000002_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000002_0; select 'bucket_4'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000004_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000004_0; select 'bucket_6'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000006_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000006_0; select 'bucket_8'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000008_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000008_0; http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q index 6a90f83..435cdad 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q @@ -10,7 +10,7 @@ set hive.vectorized.execution.enabled=true; -create table over1k( +create table over1k_n1( t tinyint, si smallint, i int, @@ -25,11 +25,11 @@ create table over1k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over1k' into table over1k; +load data local inpath '../../data/files/over1k' into table over1k_n1; -create table over1k_orc like over1k; +create table over1k_orc like over1k_n1; alter table over1k_orc set fileformat orc; -insert overwrite table over1k_orc select * from over1k; +insert overwrite table over1k_orc select * from over1k_n1; create table over1k_part_orc( si smallint, http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q index 3f35b81..436c0ed 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q @@ -10,7 +10,7 @@ set hive.exec.dynamic.partition.mode=nonstrict; -create table over1k( +create table over1k_n3( t tinyint, si smallint, i int, @@ -25,7 +25,7 @@ create table over1k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over1k' into table over1k; +load data local inpath '../../data/files/over1k' into table over1k_n3; create table over1k_part( si smallint, @@ -54,29 +54,29 @@ create table over1k_part_buck_sort( sorted by (f) into 4 buckets; -- map-only jobs converted to map-reduce job by hive.optimize.sort.dynamic.partition optimization -explain insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27; -explain insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10; -explain insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27; -explain insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27; +explain insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +explain insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27 limit 10; +explain insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +explain insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; -insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27; -insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10; -insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27; -insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27; +insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27 limit 10; +insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization -explain insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27; -explain insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10; -explain insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27; -explain insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27; +explain insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +explain insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27 limit 10; +explain insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +explain insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; -insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27; -insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10; -insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27; -insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27; +insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27 limit 10; +insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; desc formatted over1k_part partition(ds="foo",t=27); desc formatted over1k_part partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__"); @@ -101,19 +101,19 @@ create table over1k_part2( partitioned by (ds string, t tinyint); set hive.optimize.sort.dynamic.partition=false; -explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k_n3 where t is null or t=27 order by i; set hive.optimize.sort.dynamic.partition=true; -explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; -explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k_n3 where t is null or t=27 order by i; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k_n3 order by i limit 10) tmp where t is null or t=27; set hive.optimize.sort.dynamic.partition=false; -explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k_n3 where t is null or t=27 group by si,i,b,f,t; set hive.optimize.sort.dynamic.partition=true; -- tests for HIVE-8162, only partition column 't' should be in last RS operator -explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k_n3 where t is null or t=27 group by si,i,b,f,t; set hive.optimize.sort.dynamic.partition=false; -insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; +insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k_n3 where t is null or t=27 order by i; desc formatted over1k_part2 partition(ds="foo",t=27); desc formatted over1k_part2 partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__"); @@ -123,7 +123,7 @@ select * from over1k_part2; select count(*) from over1k_part2; set hive.optimize.sort.dynamic.partition=true; -insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; +insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k_n3 where t is null or t=27 order by i; desc formatted over1k_part2 partition(ds="foo",t=27); desc formatted over1k_part2 partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__"); @@ -145,12 +145,12 @@ create table over1k_part_buck_sort2( sorted by (f) into 1 buckets; set hive.optimize.sort.dynamic.partition=false; -explain insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27; +explain insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; set hive.optimize.sort.dynamic.partition=true; -explain insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27; +explain insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; set hive.optimize.sort.dynamic.partition=false; -insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27; +insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; desc formatted over1k_part_buck_sort2 partition(t=27); desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__"); @@ -159,7 +159,7 @@ select * from over1k_part_buck_sort2; select count(*) from over1k_part_buck_sort2; set hive.optimize.sort.dynamic.partition=true; -insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27; +insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; desc formatted over1k_part_buck_sort2 partition(t=27); desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__"); @@ -174,21 +174,21 @@ create table over1k_part3( partitioned by (s string, t tinyint, i int); set hive.optimize.sort.dynamic.partition=true; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where s="foo"; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and s="foo"; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27 and s="foo"; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27 and s="foo"; - -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where s="foo"; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and s="foo"; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27 and s="foo"; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27 and s="foo"; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where s="foo"; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where t=27; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and t=27; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and s="foo"; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where t=27 and s="foo"; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and t=27 and s="foo"; + +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where t=27; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and t=27; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where t=27 and s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and t=27 and s="foo"; select sum(hash(*)) from over1k_part3; @@ -200,12 +200,12 @@ create table over1k_part3( f float) partitioned by (s string, t tinyint, i int); set hive.optimize.sort.dynamic.partition=false; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where s="foo"; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and s="foo"; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27 and s="foo"; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27 and s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where t=27; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and t=27; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where t=27 and s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and t=27 and s="foo"; select sum(hash(*)) from over1k_part3; http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q index 9a52fb4..6dfb51a 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q @@ -11,12 +11,12 @@ set hive.exec.dynamic.partition.mode=nonstrict; -- SORT_QUERY_RESULTS -drop table ss; +drop table ss_n0; drop table ss_orc; drop table ss_part; drop table ss_part_orc; -create table ss ( +create table ss_n0 ( ss_sold_date_sk int, ss_net_paid_inc_tax float, ss_net_profit float); @@ -26,13 +26,13 @@ ss_net_paid_inc_tax float, ss_net_profit float) partitioned by (ss_sold_date_sk int); -load data local inpath '../../data/files/dynpart_test.txt' overwrite into table ss; +load data local inpath '../../data/files/dynpart_test.txt' overwrite into table ss_n0; explain insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 group by ss_sold_date_sk, ss_net_paid_inc_tax, @@ -43,7 +43,7 @@ insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 group by ss_sold_date_sk, ss_net_paid_inc_tax, @@ -60,7 +60,7 @@ explain insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 distribute by ss_sold_date_sk; @@ -68,7 +68,7 @@ insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 distribute by ss_sold_date_sk; @@ -85,7 +85,7 @@ explain insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 group by ss_sold_date_sk, ss_net_paid_inc_tax, @@ -96,7 +96,7 @@ insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 group by ss_sold_date_sk, ss_net_paid_inc_tax, @@ -113,7 +113,7 @@ explain insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 distribute by ss_sold_date_sk; @@ -121,7 +121,7 @@ insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 distribute by ss_sold_date_sk; @@ -144,9 +144,9 @@ ss_net_paid_inc_tax float, ss_net_profit float) partitioned by (ss_sold_date_sk int) stored as orc; -insert overwrite table ss_orc select * from ss; +insert overwrite table ss_orc select * from ss_n0; -drop table ss; +drop table ss_n0; drop table ss_part; explain insert overwrite table ss_part_orc partition (ss_sold_date_sk) http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/empty_join.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/empty_join.q b/ql/src/test/queries/clientpositive/empty_join.q index 088b66a..cac2a6a 100644 --- a/ql/src/test/queries/clientpositive/empty_join.q +++ b/ql/src/test/queries/clientpositive/empty_join.q @@ -2,23 +2,23 @@ set hive.auto.convert.join=true; set hive.mapjoin.hybridgrace.hashtable=false; set hive.explain.user=true; -DROP TABLE IF EXISTS test_1; -CREATE TABLE test_1 AS SELECT 1 AS id; +DROP TABLE IF EXISTS test_1_n2; +CREATE TABLE test_1_n2 AS SELECT 1 AS id; -DROP TABLE IF EXISTS test_2; -CREATE TABLE test_2 (id INT); +DROP TABLE IF EXISTS test_2_n2; +CREATE TABLE test_2_n2 (id INT); -DROP TABLE IF EXISTS test_3; -CREATE TABLE test_3 AS SELECT 1 AS id; +DROP TABLE IF EXISTS test_3_n0; +CREATE TABLE test_3_n0 AS SELECT 1 AS id; explain SELECT t1.id, t2.id, t3.id -FROM test_1 t1 -LEFT JOIN test_2 t2 ON t1.id = t2.id -INNER JOIN test_3 t3 ON t1.id = t3.id; +FROM test_1_n2 t1 +LEFT JOIN test_2_n2 t2 ON t1.id = t2.id +INNER JOIN test_3_n0 t3 ON t1.id = t3.id; SELECT t1.id, t2.id, t3.id -FROM test_1 t1 -LEFT JOIN test_2 t2 ON t1.id = t2.id -INNER JOIN test_3 t3 ON t1.id = t3.id +FROM test_1_n2 t1 +LEFT JOIN test_2_n2 t2 ON t1.id = t2.id +INNER JOIN test_3_n0 t3 ON t1.id = t3.id ; http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/encryption_auto_purge_tables.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/encryption_auto_purge_tables.q b/ql/src/test/queries/clientpositive/encryption_auto_purge_tables.q index b095557..14c7f7e 100644 --- a/ql/src/test/queries/clientpositive/encryption_auto_purge_tables.q +++ b/ql/src/test/queries/clientpositive/encryption_auto_purge_tables.q @@ -3,37 +3,37 @@ -- we're setting this so that TestNegaiveCliDriver.vm doesn't stop processing after DROP TABLE fails; -DROP TABLE IF EXISTS encrypted_table PURGE; -DROP TABLE IF EXISTS encrypted_ext_table PURGE; +DROP TABLE IF EXISTS encrypted_table_n5 PURGE; +DROP TABLE IF EXISTS encrypted_ext_table_n0 PURGE; -CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; +CREATE TABLE encrypted_table_n5 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; CRYPTO CREATE_KEY --keyName key_128 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table; SHOW TABLES LIKE "encrypted_*"; -ALTER TABLE encrypted_table SET TBLPROPERTIES("auto.purge"="true"); +ALTER TABLE encrypted_table_n5 SET TBLPROPERTIES("auto.purge"="true"); -INSERT OVERWRITE TABLE encrypted_table SELECT * FROM src; -SELECT COUNT(*) from encrypted_table; +INSERT OVERWRITE TABLE encrypted_table_n5 SELECT * FROM src; +SELECT COUNT(*) from encrypted_table_n5; -TRUNCATE TABLE encrypted_table; -SELECT COUNT(*) FROM encrypted_table; +TRUNCATE TABLE encrypted_table_n5; +SELECT COUNT(*) FROM encrypted_table_n5; -INSERT OVERWRITE TABLE encrypted_table SELECT * FROM src; -SELECT COUNT(*) FROM encrypted_table; +INSERT OVERWRITE TABLE encrypted_table_n5 SELECT * FROM src; +SELECT COUNT(*) FROM encrypted_table_n5; -CREATE EXTERNAL TABLE encrypted_ext_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; -ALTER TABLE encrypted_ext_table SET TBLPROPERTIES("auto.purge"="true"); +CREATE EXTERNAL TABLE encrypted_ext_table_n0 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; +ALTER TABLE encrypted_ext_table_n0 SET TBLPROPERTIES("auto.purge"="true"); -INSERT OVERWRITE TABLE encrypted_ext_table SELECT * FROM src; -SELECT COUNT(*) from encrypted_ext_table; +INSERT OVERWRITE TABLE encrypted_ext_table_n0 SELECT * FROM src; +SELECT COUNT(*) from encrypted_ext_table_n0; -DROP TABLE encrypted_table; -DROP TABLE encrypted_ext_table; +DROP TABLE encrypted_table_n5; +DROP TABLE encrypted_ext_table_n0; SHOW TABLES LIKE "encrypted_*"; -- cleanup -DROP TABLE IF EXISTS encrypted_table PURGE; -DROP TABLE IF EXISTS encrypted_ext_table PURGE; +DROP TABLE IF EXISTS encrypted_table_n5 PURGE; +DROP TABLE IF EXISTS encrypted_ext_table_n0 PURGE; CRYPTO DELETE_KEY --keyName key_128; http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/encryption_drop_table.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/encryption_drop_table.q b/ql/src/test/queries/clientpositive/encryption_drop_table.q index 2212e7d..884e510 100644 --- a/ql/src/test/queries/clientpositive/encryption_drop_table.q +++ b/ql/src/test/queries/clientpositive/encryption_drop_table.q @@ -5,14 +5,14 @@ set hive.cli.errors.ignore=true; -DROP TABLE IF EXISTS encrypted_table; +DROP TABLE IF EXISTS encrypted_table_n2; DROP TABLE IF EXISTS encrypted_ext_table; -CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; +CREATE TABLE encrypted_table_n2 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; CRYPTO CREATE_KEY --keyName key_128 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table; -INSERT OVERWRITE TABLE encrypted_table SELECT * FROM src; +INSERT OVERWRITE TABLE encrypted_table_n2 SELECT * FROM src; CREATE EXTERNAL TABLE encrypted_ext_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; SHOW TABLES LIKE "encrypted_*"; @@ -20,7 +20,7 @@ SHOW TABLES LIKE "encrypted_*"; DROP TABLE default.encrypted_ext_table; SHOW TABLES LIKE "encrypted_*"; -DROP TABLE default.encrypted_table; +DROP TABLE default.encrypted_table_n2; SHOW TABLES LIKE "encrypted_*"; DROP TABLE IF EXISTS encrypted_table1; http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q b/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q index 6cfb382..b5ea78b 100644 --- a/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q +++ b/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q @@ -8,40 +8,40 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -- SORT_QUERY_RESULTS -- init -drop table IF EXISTS encryptedTable PURGE; -drop table IF EXISTS unencryptedTable PURGE; +drop table IF EXISTS encryptedTable_n0 PURGE; +drop table IF EXISTS unencryptedTable_n0 PURGE; -create table encryptedTable(value string) +create table encryptedTable_n0(value string) partitioned by (key string) clustered by (value) into 2 buckets stored as orc LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encryptedTable' TBLPROPERTIES ('transactional'='true'); CRYPTO CREATE_KEY --keyName key_1 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_1 --path ${hiveconf:hive.metastore.warehouse.dir}/encryptedTable; -create table unencryptedTable(value string) +create table unencryptedTable_n0(value string) partitioned by (key string) clustered by (value) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -- insert encrypted table from values -insert into table encryptedTable partition (key) values +insert into table encryptedTable_n0 partition (key) values ('val_501', '501'), ('val_502', '502'); -select * from encryptedTable order by key; +select * from encryptedTable_n0 order by key; -- insert encrypted table from unencrypted source from src -insert into table encryptedTable partition (key) +insert into table encryptedTable_n0 partition (key) select value, key limit 2; -select * from encryptedTable order by key; +select * from encryptedTable_n0 order by key; -- insert unencrypted table from encrypted source -from encryptedTable -insert into table unencryptedTable partition (key) +from encryptedTable_n0 +insert into table unencryptedTable_n0 partition (key) select value, key; -select * from unencryptedTable order by key; +select * from unencryptedTable_n0 order by key; -- clean up -drop table encryptedTable PURGE; +drop table encryptedTable_n0 PURGE; CRYPTO DELETE_KEY --keyName key_1; -drop table unencryptedTable PURGE; +drop table unencryptedTable_n0 PURGE; http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/encryption_insert_values.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/encryption_insert_values.q b/ql/src/test/queries/clientpositive/encryption_insert_values.q index a712f35..242515e 100644 --- a/ql/src/test/queries/clientpositive/encryption_insert_values.q +++ b/ql/src/test/queries/clientpositive/encryption_insert_values.q @@ -1,13 +1,13 @@ -- SORT_QUERY_RESULTS; set hive.stats.column.autogather=false; -DROP TABLE IF EXISTS encrypted_table PURGE; -CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; +DROP TABLE IF EXISTS encrypted_table_n3 PURGE; +CREATE TABLE encrypted_table_n3 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; CRYPTO CREATE_KEY --keyName key_128 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table; -INSERT INTO encrypted_table values(1,'foo'),(2,'bar'); +INSERT INTO encrypted_table_n3 values(1,'foo'),(2,'bar'); -select * from encrypted_table; +select * from encrypted_table_n3; CRYPTO DELETE_KEY --keyName key_128; http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q b/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q index a96807e..834bfbc 100644 --- a/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q +++ b/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q @@ -1,20 +1,20 @@ -DROP TABLE IF EXISTS encrypted_table PURGE; +DROP TABLE IF EXISTS encrypted_table_n0 PURGE; -CREATE TABLE encrypted_table (key STRING, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encrypted_table'; +CREATE TABLE encrypted_table_n0 (key STRING, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encrypted_table'; -- Create encryption key and zone; crypto create_key --keyName key1; crypto create_zone --keyName key1 --path ${hiveconf:hive.metastore.warehouse.dir}/encrypted_table; -- Test loading data from the local filesystem; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE encrypted_table; -SELECT * FROM encrypted_table; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE encrypted_table_n0; +SELECT * FROM encrypted_table_n0; -- Test loading data from the hdfs filesystem; dfs -copyFromLocal ../../data/files/kv1.txt hdfs:///tmp/kv1.txt; -LOAD DATA INPATH '/tmp/kv1.txt' OVERWRITE INTO TABLE encrypted_table; -SELECT * FROM encrypted_table; +LOAD DATA INPATH '/tmp/kv1.txt' OVERWRITE INTO TABLE encrypted_table_n0; +SELECT * FROM encrypted_table_n0; -DROP TABLE encrypted_table PURGE; +DROP TABLE encrypted_table_n0 PURGE; crypto delete_key --keyName key1; \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/encryption_move_tbl.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/encryption_move_tbl.q b/ql/src/test/queries/clientpositive/encryption_move_tbl.q index ff1d57c..b636076 100644 --- a/ql/src/test/queries/clientpositive/encryption_move_tbl.q +++ b/ql/src/test/queries/clientpositive/encryption_move_tbl.q @@ -6,11 +6,11 @@ set hive.stats.column.autogather=false; set hive.cli.errors.ignore=true; -DROP TABLE IF EXISTS encrypted_table PURGE; +DROP TABLE IF EXISTS encrypted_table_n1 PURGE; DROP DATABASE IF EXISTS encrypted_db; --- create table default.encrypted_table in its default warehouse location ${hiveconf:hive.metastore.warehouse.dir}/encrypted_table -CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encrypted_table'; +-- create table default.encrypted_table_n1 in its default warehouse location ${hiveconf:hive.metastore.warehouse.dir}/encrypted_table +CREATE TABLE encrypted_table_n1 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encrypted_table'; CRYPTO CREATE_KEY --keyName key_128 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/encrypted_table; @@ -19,20 +19,20 @@ CREATE DATABASE encrypted_db LOCATION '${hiveconf:hive.metastore.warehouse.dir}/ CRYPTO CREATE_KEY --keyName key_128_2 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128_2 --path ${hiveconf:hive.metastore.warehouse.dir}/encrypted_db.db; -INSERT OVERWRITE TABLE encrypted_table SELECT * FROM src; +INSERT OVERWRITE TABLE encrypted_table_n1 SELECT * FROM src; SHOW TABLES LIKE "encrypted_*"; -ANALYZE TABLE encrypted_table COMPUTE STATISTICS FOR COLUMNS; -DESCRIBE FORMATTED encrypted_table key; -DESCRIBE FORMATTED encrypted_table value; +ANALYZE TABLE encrypted_table_n1 COMPUTE STATISTICS FOR COLUMNS; +DESCRIBE FORMATTED encrypted_table_n1 key; +DESCRIBE FORMATTED encrypted_table_n1 value; -- should fail, since they are in different encryption zones, but table columns statistics should not change -ALTER TABLE default.encrypted_table RENAME TO encrypted_db.encrypted_table_2; +ALTER TABLE default.encrypted_table_n1 RENAME TO encrypted_db.encrypted_table_2; SHOW TABLES; -DESCRIBE FORMATTED encrypted_table key; -DESCRIBE FORMATTED encrypted_table value; +DESCRIBE FORMATTED encrypted_table_n1 key; +DESCRIBE FORMATTED encrypted_table_n1 value; -- should succeed in Hadoop 2.7 but fail in 2.6 (HDFS-7530) -ALTER TABLE default.encrypted_table RENAME TO default.plain_table; +ALTER TABLE default.encrypted_table_n1 RENAME TO default.plain_table; SHOW TABLES; -- create table encrypted_table_outloc under default database but in a specified location other than the default db location in the warehouse @@ -49,20 +49,20 @@ CRYPTO CREATE_KEY --keyName key_128_4 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128_4 --path ${hiveconf:hive.metastore.warehouse.dir}/../specified_db_location; USE encrypted_db_outloc; -CREATE TABLE encrypted_table (key INT, value STRING); -INSERT OVERWRITE TABLE encrypted_table SELECT * FROM default.src; -ALTER TABLE encrypted_table RENAME TO renamed_encrypted_table; +CREATE TABLE encrypted_table_n1 (key INT, value STRING); +INSERT OVERWRITE TABLE encrypted_table_n1 SELECT * FROM default.src; +ALTER TABLE encrypted_table_n1 RENAME TO renamed_encrypted_table_n1; -- should succeed since data moves within specified_db_location SHOW TABLES; -- should fail, since they are in different encryption zones -ALTER TABLE encrypted_db_outloc.renamed_encrypted_table RENAME TO default.plain_table_2; +ALTER TABLE encrypted_db_outloc.renamed_encrypted_table_n1 RENAME TO default.plain_table_2; SHOW TABLES; -DROP TABLE default.encrypted_table PURGE; +DROP TABLE default.encrypted_table_n1 PURGE; DROP TABLE default.plain_table PURGE; DROP TABLE default.renamed_encrypted_table_outloc PURGE; DROP DATABASE encrypted_db; -DROP TABLE encrypted_db_outloc.renamed_encrypted_table PURGE; +DROP TABLE encrypted_db_outloc.renamed_encrypted_table_n1 PURGE; DROP DATABASE encrypted_db_outloc; CRYPTO DELETE_KEY --keyName key_128; CRYPTO DELETE_KEY --keyName key_128_2; http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q b/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q index c9ac035..94052a5 100644 --- a/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q +++ b/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q @@ -1,16 +1,16 @@ -- SORT_QUERY_RESULTS -DROP TABLE IF EXISTS encrypted_table PURGE; -CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; +DROP TABLE IF EXISTS encrypted_table_n4 PURGE; +CREATE TABLE encrypted_table_n4 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; CRYPTO CREATE_KEY --keyName key_128 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE encrypted_table; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE encrypted_table_n4; dfs -chmod -R 555 ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table; -SELECT count(*) FROM encrypted_table; +SELECT count(*) FROM encrypted_table_n4; -drop table encrypted_table PURGE; +drop table encrypted_table_n4 PURGE; CRYPTO DELETE_KEY --keyName key_128;