impala-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tarmstr...@apache.org
Subject [03/50] [abbrv] incubator-impala git commit: Use unique_database fixture in test_compute_stats.py.
Date Thu, 12 May 2016 22:09:38 GMT
Use unique_database fixture in test_compute_stats.py.

This patch makes it a little easier to use the unique_database fixture
with .test files. The RESULTS section can now contain $DATABASE which
is replaced with the current database by the test framework.

Testing:
- ran the test locally on exhaustive
- ran the test on hdfs and the local filesystem on Jenkins

Change-Id: I8655eb769003f88c0e1ec1b254118e4ec3353b48
Reviewed-on: http://gerrit.cloudera.org:8080/2947
Reviewed-by: Alex Behm <alex.behm@cloudera.com>
Tested-by: Internal Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/a41710a0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/a41710a0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/a41710a0

Branch: refs/heads/master
Commit: a41710a0c80f2fb72438f7e555050e86472eaf74
Parents: 9a37638
Author: Alex Behm <alex.behm@cloudera.com>
Authored: Tue May 3 00:16:41 2016 -0700
Committer: Tim Armstrong <tarmstrong@cloudera.com>
Committed: Thu May 12 14:17:50 2016 -0700

----------------------------------------------------------------------
 .../QueryTest/compute-stats-decimal.test        |  20 +-
 .../QueryTest/compute-stats-incremental.test    | 146 +++++++-------
 .../QueryTest/compute-stats-keywords.test       |  15 ++
 .../queries/QueryTest/compute-stats.test        | 192 ++++++++-----------
 .../queries/QueryTest/corrupt-stats.test        | 181 +++++++++++++++++
 .../queries/QueryTest/corrupt_stats.test        | 184 ------------------
 tests/common/impala_test_suite.py               |   2 +
 tests/metadata/test_compute_stats.py            |  60 +++---
 8 files changed, 393 insertions(+), 407 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a41710a0/testdata/workloads/functional-query/queries/QueryTest/compute-stats-decimal.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/compute-stats-decimal.test b/testdata/workloads/functional-query/queries/QueryTest/compute-stats-decimal.test
index 2b902c7..c5baff9 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/compute-stats-decimal.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/compute-stats-decimal.test
@@ -1,17 +1,17 @@
 ====
 ---- QUERY
 # test compute stats on a partitioned decimal text table
-create table compute_stats_db.decimal_tbl like functional.decimal_tbl;
-insert into compute_stats_db.decimal_tbl partition(d6)
+create table decimal_tbl like functional.decimal_tbl;
+insert into decimal_tbl partition(d6)
 select * from functional.decimal_tbl;
 ====
 ---- QUERY
-compute stats compute_stats_db.decimal_tbl
+compute stats decimal_tbl
 ---- RESULTS
 'Updated 1 partition(s) and 5 column(s).'
 ====
 ---- QUERY
-show table stats compute_stats_db.decimal_tbl
+show table stats decimal_tbl
 ---- LABELS
 d6, #Rows, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental Stats, Location
 ---- RESULTS
@@ -21,7 +21,7 @@ d6, #Rows, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental St
 STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.decimal_tbl
+show column stats decimal_tbl
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -36,16 +36,16 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
 # test compute stats on a mixed-type parquet table
-create table compute_stats_db.mixed_types(a int, b decimal(10,0)) stored as parquet;
-insert into compute_stats_db.mixed_types values (1, 2), (3, 4);
+create table mixed_types(a int, b decimal(10,0)) stored as parquet;
+insert into mixed_types values (1, 2), (3, 4);
 ====
 ---- QUERY
-compute stats compute_stats_db.mixed_types
+compute stats mixed_types
 ---- RESULTS
 'Updated 1 partition(s) and 2 column(s).'
 ====
 ---- QUERY
-show table stats compute_stats_db.mixed_types
+show table stats mixed_types
 ---- LABELS
 #Rows, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental Stats, Location
 ---- RESULTS
@@ -54,7 +54,7 @@ show table stats compute_stats_db.mixed_types
 BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.mixed_types
+show column stats mixed_types
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a41710a0/testdata/workloads/functional-query/queries/QueryTest/compute-stats-incremental.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/compute-stats-incremental.test b/testdata/workloads/functional-query/queries/QueryTest/compute-stats-incremental.test
index 7c41aad..382737e 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/compute-stats-incremental.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/compute-stats-incremental.test
@@ -1,19 +1,19 @@
 ====
 ---- QUERY
 # test computing stats on a partitioned text table with all types
-create table compute_stats_db.alltypes_incremental like functional.alltypes;
-insert into compute_stats_db.alltypes_incremental partition(year, month)
+create table alltypes_incremental like functional.alltypes;
+insert into alltypes_incremental partition(year, month)
 select * from functional.alltypes;
 ====
 ---- QUERY
-compute incremental stats compute_stats_db.alltypes_incremental
+compute incremental stats alltypes_incremental
 ---- RESULTS
 'Updated 24 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.alltypes_incremental
+show table stats alltypes_incremental
 ---- RESULTS
 '2009','1',310,1,'24.56KB','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
 '2009','2',280,1,'22.27KB','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
@@ -44,7 +44,7 @@ show table stats compute_stats_db.alltypes_incremental
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.alltypes_incremental
+show column stats alltypes_incremental
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -65,10 +65,10 @@ COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
-drop incremental stats compute_stats_db.alltypes_incremental partition(year=2010, month=12)
+drop incremental stats alltypes_incremental partition(year=2010, month=12)
 ====
 ---- QUERY
-show table stats compute_stats_db.alltypes_incremental;
+show table stats alltypes_incremental;
 ---- RESULTS
 '2009','1',310,1,'24.56KB','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
 '2009','2',280,1,'22.27KB','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
@@ -99,14 +99,14 @@ show table stats compute_stats_db.alltypes_incremental;
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-compute incremental stats compute_stats_db.alltypes_incremental
+compute incremental stats alltypes_incremental
 ---- RESULTS
 'Updated 1 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.alltypes_incremental;
+show table stats alltypes_incremental;
 ---- RESULTS
 '2009','1',310,1,'24.56KB','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
 '2009','2',280,1,'22.27KB','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
@@ -137,7 +137,7 @@ show table stats compute_stats_db.alltypes_incremental;
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.alltypes_incremental
+show column stats alltypes_incremental
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -158,18 +158,18 @@ COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
-create table compute_stats_db.incremental_empty_partitioned (i int) partitioned by (j int);
-alter table compute_stats_db.incremental_empty_partitioned add partition (j=1);
+create table incremental_empty_partitioned (i int) partitioned by (j int);
+alter table incremental_empty_partitioned add partition (j=1);
 ====
 ---- QUERY
-compute incremental stats compute_stats_db.incremental_empty_partitioned;
+compute incremental stats incremental_empty_partitioned;
 ---- RESULTS
 'Updated 1 partition(s) and 1 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.incremental_empty_partitioned;
+show table stats incremental_empty_partitioned;
 ---- RESULTS
 '1',0,0,'0B','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
 'Total',0,0,'0B','0B','','','',''
@@ -177,18 +177,18 @@ show table stats compute_stats_db.incremental_empty_partitioned;
 STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-drop stats compute_stats_db.incremental_empty_partitioned;
+drop stats incremental_empty_partitioned;
 ====
 ---- QUERY
 # IMPALA-2199: Test that compute incremental stats with a partition spec on an empty partition populates the row count.
-compute incremental stats compute_stats_db.incremental_empty_partitioned partition(j=1);
+compute incremental stats incremental_empty_partitioned partition(j=1);
 ---- RESULTS
 'Updated 1 partition(s) and 1 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.incremental_empty_partitioned;
+show table stats incremental_empty_partitioned;
 ---- RESULTS
 '1',0,0,'0B','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
 'Total',0,0,'0B','0B','','','',''
@@ -196,18 +196,18 @@ show table stats compute_stats_db.incremental_empty_partitioned;
 STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-drop incremental stats compute_stats_db.alltypes_incremental partition(year=2010, month=1);
-drop incremental stats compute_stats_db.alltypes_incremental partition(year=2010, month=2);
+drop incremental stats alltypes_incremental partition(year=2010, month=1);
+drop incremental stats alltypes_incremental partition(year=2010, month=2);
 ====
 ---- QUERY
-compute incremental stats compute_stats_db.alltypes_incremental partition(year=2010, month=2);
+compute incremental stats alltypes_incremental partition(year=2010, month=2);
 ---- RESULTS
 'Updated 1 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.alltypes_incremental;
+show table stats alltypes_incremental;
 ---- RESULTS
 '2009','1',310,1,'24.56KB','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
 '2009','2',280,1,'22.27KB','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
@@ -238,7 +238,7 @@ show table stats compute_stats_db.alltypes_incremental;
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.alltypes_incremental
+show column stats alltypes_incremental
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -260,8 +260,8 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
 # Confirm that dropping stats drops incremental stats as well
-drop stats compute_stats_db.alltypes_incremental;
-show table stats compute_stats_db.alltypes_incremental;
+drop stats alltypes_incremental;
+show table stats alltypes_incremental;
 ---- RESULTS
 '2009','1',-1,1,'24.56KB','NOT CACHED','NOT CACHED','TEXT','false',regex:.*
 '2009','2',-1,1,'22.27KB','NOT CACHED','NOT CACHED','TEXT','false',regex:.*
@@ -293,14 +293,14 @@ STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
 # Test that many partition keys work correctly
-create table compute_stats_db.incremental_many_part_keys(col int)
+create table incremental_many_part_keys(col int)
 partitioned by (p1 int, p2 int, p3 int, p4 int, p5 int, p6 int);
-insert into compute_stats_db.incremental_many_part_keys
+insert into incremental_many_part_keys
 partition(p1=1, p2=2, p3=3, p4=4, p5=5, p6=6) values(1);
 ====
 ---- QUERY
-compute incremental stats compute_stats_db.incremental_many_part_keys;
-show table stats compute_stats_db.incremental_many_part_keys;
+compute incremental stats incremental_many_part_keys;
+show table stats incremental_many_part_keys;
 ---- RESULTS
 '1','2','3','4','5','6',1,1,'2B','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
 'Total','','','','','',1,1,'2B','0B','','','',''
@@ -308,13 +308,13 @@ show table stats compute_stats_db.incremental_many_part_keys;
 STRING, STRING, STRING, STRING, STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-create table compute_stats_db.incremental_null_part_key(col int) partitioned by (p int);
-insert into compute_stats_db.incremental_null_part_key partition(p) values(1,NULL), (1,2);
-compute incremental stats compute_stats_db.incremental_null_part_key partition(p=2);
+create table incremental_null_part_key(col int) partitioned by (p int);
+insert into incremental_null_part_key partition(p) values(1,NULL), (1,2);
+compute incremental stats incremental_null_part_key partition(p=2);
 ====
 ---- QUERY
-compute incremental stats compute_stats_db.incremental_null_part_key partition(p=NULL);
-show table stats compute_stats_db.incremental_null_part_key;
+compute incremental stats incremental_null_part_key partition(p=NULL);
+show table stats incremental_null_part_key;
 ---- RESULTS
 'NULL',1,1,'2B','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
 '2',1,1,'2B','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
@@ -324,12 +324,12 @@ STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
 # Check that incremental stats queries handle partitions with keyword names
-create table compute_stats_db.incremental_keyword_part_key(col int) partitioned by
+create table incremental_keyword_part_key(col int) partitioned by
 (`date` int);
-insert into compute_stats_db.incremental_keyword_part_key partition(`date`=1) values(2);
-compute incremental stats compute_stats_db.incremental_keyword_part_key
+insert into incremental_keyword_part_key partition(`date`=1) values(2);
+compute incremental stats incremental_keyword_part_key
 partition(`date`=1);
-show table stats compute_stats_db.incremental_keyword_part_key;
+show table stats incremental_keyword_part_key;
 ---- RESULTS
 '1',1,1,'2B','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
 'Total',1,1,'2B','0B','','','',''
@@ -337,9 +337,9 @@ show table stats compute_stats_db.incremental_keyword_part_key;
 STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-drop stats compute_stats_db.incremental_keyword_part_key;
-compute incremental stats compute_stats_db.incremental_keyword_part_key;
-show table stats compute_stats_db.incremental_keyword_part_key;
+drop stats incremental_keyword_part_key;
+compute incremental stats incremental_keyword_part_key;
+show table stats incremental_keyword_part_key;
 ---- RESULTS
 '1',1,1,'2B','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
 'Total',1,1,'2B','0B','','','',''
@@ -347,13 +347,13 @@ show table stats compute_stats_db.incremental_keyword_part_key;
 STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-create table compute_stats_db.incremental_string_part_value(col int) partitioned by
+create table incremental_string_part_value(col int) partitioned by
 (p string);
-insert into compute_stats_db.incremental_string_part_value partition(p="test_string")
+insert into incremental_string_part_value partition(p="test_string")
 values(2);
-compute incremental stats compute_stats_db.incremental_string_part_value
+compute incremental stats incremental_string_part_value
 partition(p="test_string");
-show table stats compute_stats_db.incremental_string_part_value;
+show table stats incremental_string_part_value;
 ---- RESULTS
 'test_string',1,1,'2B','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
 'Total',1,1,'2B','0B','','','',''
@@ -362,15 +362,15 @@ STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
 # Check that dropping a column still allows stats to be computed
-create table compute_stats_db.incremental_drop_column (a int, b int, c int)
+create table incremental_drop_column (a int, b int, c int)
 partitioned by (d int);
-alter table compute_stats_db.incremental_drop_column add partition (d=1);
-insert into compute_stats_db.incremental_drop_column partition(d=1) values (4,4,4);
-compute incremental stats compute_stats_db.incremental_drop_column;
-alter table compute_stats_db.incremental_drop_column drop column c;
-alter table compute_stats_db.incremental_drop_column drop column b;
-compute incremental stats compute_stats_db.incremental_drop_column;
-show table stats compute_stats_db.incremental_drop_column;
+alter table incremental_drop_column add partition (d=1);
+insert into incremental_drop_column partition(d=1) values (4,4,4);
+compute incremental stats incremental_drop_column;
+alter table incremental_drop_column drop column c;
+alter table incremental_drop_column drop column b;
+compute incremental stats incremental_drop_column;
+show table stats incremental_drop_column;
 ---- RESULTS
 '1',1,1,'6B','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
 'Total',1,1,'6B','0B','','','',''
@@ -379,13 +379,13 @@ STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
 # Check that adding a column invalidates all incremental stats
-create table compute_stats_db.incremental_add_column (col int) partitioned by (p int);
-insert into compute_stats_db.incremental_add_column partition(p) values(1,1),(2,2);
-compute incremental stats compute_stats_db.incremental_add_column;
-insert into compute_stats_db.incremental_add_column partition(p) values(1,1);
-alter table compute_stats_db.incremental_add_column add columns (c int);
-compute incremental stats compute_stats_db.incremental_add_column;
-show table stats compute_stats_db.incremental_add_column;
+create table incremental_add_column (col int) partitioned by (p int);
+insert into incremental_add_column partition(p) values(1,1),(2,2);
+compute incremental stats incremental_add_column;
+insert into incremental_add_column partition(p) values(1,1);
+alter table incremental_add_column add columns (c int);
+compute incremental stats incremental_add_column;
+show table stats incremental_add_column;
 ---- RESULTS
 '1',2,2,'4B','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
 '2',1,1,'2B','NOT CACHED','NOT CACHED','TEXT','true',regex:.*
@@ -394,19 +394,19 @@ show table stats compute_stats_db.incremental_add_column;
 STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-create table compute_stats_db.incremental_no_partitions (col int) partitioned by (p int);
-compute incremental stats compute_stats_db.incremental_no_partitions;
-show table stats compute_stats_db.incremental_no_partitions;
+create table incremental_no_partitions (col int) partitioned by (p int);
+compute incremental stats incremental_no_partitions;
+show table stats incremental_no_partitions;
 ---- RESULTS
 'Total',0,0,'0B','0B','','','',''
 ---- TYPES
 STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-create table compute_stats_db.incremental_not_partitioned (col int);
-insert into compute_stats_db.incremental_not_partitioned values(1),(2);
-compute incremental stats compute_stats_db.incremental_not_partitioned;
-show table stats compute_stats_db.incremental_not_partitioned;
+create table incremental_not_partitioned (col int);
+insert into incremental_not_partitioned values(1),(2);
+compute incremental stats incremental_not_partitioned;
+show table stats incremental_not_partitioned;
 ---- RESULTS
 2,1,'4B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*
 ---- TYPES
@@ -418,7 +418,7 @@ BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 # The values of date_string_col always have exactly 8 characters. The CHAR/VARCHAR
 # sizes below are chosen such that they are smaller, equal, and greater than the
 # source data values, in particular, to test the CHAR padding behavior.
-create table compute_stats_db.chars_tbl (
+create table chars_tbl (
   id int,
   ch1 char(1),
   ch2 char(8),
@@ -433,7 +433,7 @@ partitioned by (
   day varchar(13)
 );
 
-insert overwrite compute_stats_db.chars_tbl partition(year, day)
+insert overwrite chars_tbl partition(year, day)
 select
 id,
 cast(date_string_col as char(1)),
@@ -453,14 +453,14 @@ year=2010 /day=3/: 1000
 year=2010 /day=__HIVE_DEFAULT_PARTITION__/: 1000
 ====
 ---- QUERY
-compute incremental stats compute_stats_db.chars_tbl
+compute incremental stats chars_tbl
 ---- RESULTS
 'Updated 3 partition(s) and 8 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.chars_tbl
+show column stats chars_tbl
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -479,18 +479,18 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
 # Populate a new partition to verify the incremental stats update
-insert into compute_stats_db.chars_tbl partition(year, day)
+insert into chars_tbl partition(year, day)
 select null, cast('x' as char(1)), cast('x' as char(8)), cast('x' as char(20)),
 null, cast('x' as varchar(1)), cast('x' as varchar(8)), cast('x' as varchar(20)),
 cast('abc' as char(5)), cast('xyz' as varchar(13));
-compute incremental stats compute_stats_db.chars_tbl;
+compute incremental stats chars_tbl;
 ---- RESULTS
 'Updated 1 partition(s) and 8 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.chars_tbl
+show column stats chars_tbl
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a41710a0/testdata/workloads/functional-query/queries/QueryTest/compute-stats-keywords.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/compute-stats-keywords.test b/testdata/workloads/functional-query/queries/QueryTest/compute-stats-keywords.test
new file mode 100644
index 0000000..a58e7c5
--- /dev/null
+++ b/testdata/workloads/functional-query/queries/QueryTest/compute-stats-keywords.test
@@ -0,0 +1,15 @@
+====
+---- QUERY
+COMPUTE STATS `parquet`.impala_1055
+---- RESULTS
+'Updated 1 partition(s) and 1 column(s).'
+---- TYPES
+STRING
+====
+---- QUERY
+COMPUTE STATS `parquet`.`parquet`
+---- RESULTS
+'Updated 1 partition(s) and 1 column(s).'
+---- TYPES
+STRING
+====

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a41710a0/testdata/workloads/functional-query/queries/QueryTest/compute-stats.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/compute-stats.test b/testdata/workloads/functional-query/queries/QueryTest/compute-stats.test
index e1f302c..dd3fec8 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/compute-stats.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/compute-stats.test
@@ -1,19 +1,19 @@
 ====
 ---- QUERY
 # test computing stats on a partitioned text table with all types
-create table compute_stats_db.alltypes like functional.alltypes;
-insert into compute_stats_db.alltypes partition(year, month)
+create table alltypes like functional.alltypes;
+insert into alltypes partition(year, month)
 select * from functional.alltypes;
 ====
 ---- QUERY
-compute stats compute_stats_db.alltypes
+compute stats alltypes
 ---- RESULTS
 'Updated 24 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.alltypes
+show table stats alltypes
 ---- LABELS
 YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
 ---- RESULTS
@@ -46,7 +46,7 @@ YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCRE
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.alltypes
+show column stats alltypes
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -68,10 +68,10 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
 # drop stats from this table
-drop stats compute_stats_db.alltypes
+drop stats alltypes
 ====
 ---- QUERY
-show table stats compute_stats_db.alltypes
+show table stats alltypes
 ---- LABELS
 YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
 ---- RESULTS
@@ -105,7 +105,7 @@ STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
 # Note - the NDV for partition columns is read from the table metadata.
-show column stats compute_stats_db.alltypes
+show column stats alltypes
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -127,11 +127,11 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
 # Add partitions with NULL values and check for stats.
-alter table compute_stats_db.alltypes add partition (year=NULL, month=NULL)
+alter table alltypes add partition (year=NULL, month=NULL)
 ---- RESULTS
 ====
 ---- QUERY
-show column stats compute_stats_db.alltypes
+show column stats alltypes
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -152,11 +152,11 @@ COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
-alter table compute_stats_db.alltypes add partition (year=2011, month=NULL)
+alter table alltypes add partition (year=2011, month=NULL)
 ---- RESULTS
 ====
 ---- QUERY
-show column stats compute_stats_db.alltypes
+show column stats alltypes
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -178,11 +178,11 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
 # Drop the partitions with NULL values and check for stats.
-alter table compute_stats_db.alltypes drop partition (year=NULL, month=NULL)
+alter table alltypes drop partition (year=NULL, month=NULL)
 ---- RESULTS
 ====
 ---- QUERY
-show column stats compute_stats_db.alltypes
+show column stats alltypes
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -203,11 +203,11 @@ COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
-alter table compute_stats_db.alltypes drop partition (year=2011, month=NULL)
+alter table alltypes drop partition (year=2011, month=NULL)
 ---- RESULTS
 ====
 ---- QUERY
-show column stats compute_stats_db.alltypes
+show column stats alltypes
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -229,25 +229,25 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
 # drop stats from this table a second time, should not throw an error.
-drop stats compute_stats_db.alltypes
+drop stats alltypes
 ====
 ---- QUERY
 # test computing stats on an partitioned text table with all types
-create table compute_stats_db.alltypesnopart like functional.alltypesnopart;
-insert into compute_stats_db.alltypesnopart
+create table alltypesnopart like functional.alltypesnopart;
+insert into alltypesnopart
 select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col,
 double_col, date_string_col, string_col, timestamp_col
 from functional.alltypessmall;
 ====
 ---- QUERY
-compute stats compute_stats_db.alltypesnopart
+compute stats alltypesnopart
 ---- RESULTS
 'Updated 1 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.alltypesnopart
+show table stats alltypesnopart
 ---- LABELS
 #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
 ---- RESULTS
@@ -256,7 +256,7 @@ show table stats compute_stats_db.alltypesnopart
 BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.alltypesnopart
+show column stats alltypesnopart
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -276,20 +276,20 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
 # test computing stats on a partitioned parquet table with all types
-create table compute_stats_db.alltypes_parquet
+create table alltypes_parquet
 like functional_parquet.alltypes;
-insert into compute_stats_db.alltypes_parquet partition(year, month)
+insert into alltypes_parquet partition(year, month)
 select * from functional.alltypes;
 ====
 ---- QUERY
-compute stats compute_stats_db.alltypes_parquet
+compute stats alltypes_parquet
 ---- RESULTS
 'Updated 24 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.alltypes_parquet
+show table stats alltypes_parquet
 ---- LABELS
 YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
 ---- RESULTS
@@ -322,7 +322,7 @@ YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCRE
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.alltypes_parquet
+show column stats alltypes_parquet
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -344,17 +344,17 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
 # test computing stats on an empty table
-create table compute_stats_db.alltypes_empty like functional_rc_snap.alltypes
+create table alltypes_empty like functional_rc_snap.alltypes
 ====
 ---- QUERY
-compute stats compute_stats_db.alltypes_empty
+compute stats alltypes_empty
 ---- RESULTS
 'Updated 0 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.alltypes_empty
+show table stats alltypes_empty
 ---- LABELS
 YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
 ---- RESULTS
@@ -363,7 +363,7 @@ YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCRE
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.alltypes_empty
+show column stats alltypes_empty
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -387,27 +387,27 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 # IMPALA-867: Test computing stats on Avro tables created by Hive with
 # matching/mismatched column definitions and Avro schema.
 # Clone the used tables here.
-create table compute_stats_db.avro_hive_alltypes
+create table avro_hive_alltypes
 like functional_avro_snap.alltypes;
-create table compute_stats_db.avro_hive_alltypes_extra_coldef
+create table avro_hive_alltypes_extra_coldef
 like functional_avro_snap.alltypes_extra_coldef;
-create table compute_stats_db.avro_hive_alltypes_missing_coldef
+create table avro_hive_alltypes_missing_coldef
 like functional_avro_snap.alltypes_missing_coldef;
-create table compute_stats_db.avro_hive_alltypes_type_mismatch
+create table avro_hive_alltypes_type_mismatch
 like functional_avro_snap.alltypes_type_mismatch;
-create table compute_stats_db.avro_hive_no_avro_schema
+create table avro_hive_no_avro_schema
 like functional_avro_snap.no_avro_schema;
 ====
 ---- QUERY
 # Avro table with matching column definitions and Avro schema
-compute stats compute_stats_db.avro_hive_alltypes
+compute stats avro_hive_alltypes
 ---- RESULTS
 'Updated 0 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.avro_hive_alltypes
+show table stats avro_hive_alltypes
 ---- LABELS
 YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
 ---- RESULTS
@@ -416,7 +416,7 @@ YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCRE
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.avro_hive_alltypes
+show column stats avro_hive_alltypes
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -438,14 +438,14 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
 # Avro table with an extra column definition.
-compute stats compute_stats_db.avro_hive_alltypes_extra_coldef
+compute stats avro_hive_alltypes_extra_coldef
 ---- RESULTS
 'Updated 0 partition(s) and 12 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.avro_hive_alltypes_extra_coldef
+show table stats avro_hive_alltypes_extra_coldef
 ---- LABELS
 YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
 ---- RESULTS
@@ -454,7 +454,7 @@ YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCRE
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.avro_hive_alltypes_extra_coldef
+show column stats avro_hive_alltypes_extra_coldef
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -477,14 +477,14 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
 # Avro table with missing two column definitions.
-compute stats compute_stats_db.avro_hive_alltypes_missing_coldef
+compute stats avro_hive_alltypes_missing_coldef
 ---- RESULTS
 'Updated 0 partition(s) and 9 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.avro_hive_alltypes_missing_coldef
+show table stats avro_hive_alltypes_missing_coldef
 ---- LABELS
 YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
 ---- RESULTS
@@ -493,7 +493,7 @@ YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCRE
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.avro_hive_alltypes_missing_coldef
+show column stats avro_hive_alltypes_missing_coldef
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -514,14 +514,14 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ---- QUERY
 # Avro table with one column definition having a different
 # type than the Avro schema (bigint_col is a string).
-compute stats compute_stats_db.avro_hive_alltypes_type_mismatch
+compute stats avro_hive_alltypes_type_mismatch
 ---- RESULTS
 'Updated 0 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.avro_hive_alltypes_type_mismatch
+show table stats avro_hive_alltypes_type_mismatch
 ---- LABELS
 YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
 ---- RESULTS
@@ -530,7 +530,7 @@ YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCRE
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.avro_hive_alltypes_type_mismatch
+show column stats avro_hive_alltypes_type_mismatch
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -553,14 +553,14 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ---- QUERY
 # Avro table without an Avro schema created by Hive.
 # The Avro schema is inferred from the column definitions,
-compute stats compute_stats_db.avro_hive_no_avro_schema
+compute stats avro_hive_no_avro_schema
 ---- RESULTS
 'Updated 0 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.avro_hive_no_avro_schema
+show table stats avro_hive_no_avro_schema
 ---- LABELS
 YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
 ---- RESULTS
@@ -569,7 +569,7 @@ YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCRE
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.avro_hive_no_avro_schema
+show column stats avro_hive_no_avro_schema
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -591,21 +591,21 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
 # Test Avro table created without any column definitions.
-create table compute_stats_db.avro_impala_alltypes_no_coldefs
+create table avro_impala_alltypes_no_coldefs
 partitioned by (year int, month int)
 with serdeproperties
 ('avro.schema.url'='$FILESYSTEM_PREFIX/test-warehouse/avro_schemas/functional/alltypes.json')
 stored as avro;
 ====
 ---- QUERY
-compute stats compute_stats_db.avro_impala_alltypes_no_coldefs
+compute stats avro_impala_alltypes_no_coldefs
 ---- RESULTS
 'Updated 0 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.avro_impala_alltypes_no_coldefs
+show table stats avro_impala_alltypes_no_coldefs
 ---- LABELS
 YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
 ---- RESULTS
@@ -614,7 +614,7 @@ YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCRE
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.avro_impala_alltypes_no_coldefs
+show column stats avro_impala_alltypes_no_coldefs
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -637,7 +637,7 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ---- QUERY
 # IMPALA-1104: Test computing stats on Avro tables created by Impala
 # with mismatched column definitions and Avro schema. Mismatched column name.
-create table compute_stats_db.avro_impala_alltypes_bad_colname
+create table avro_impala_alltypes_bad_colname
 (id int, bool_col boolean, tinyint_col int, smallint_col int, bad_int_col int,
 bigint_col bigint, float_col float, double_col double, date_string_col string,
 string_col string, timestamp_col timestamp)
@@ -647,14 +647,14 @@ with serdeproperties
 stored as avro;
 ====
 ---- QUERY
-compute stats compute_stats_db.avro_impala_alltypes_bad_colname
+compute stats avro_impala_alltypes_bad_colname
 ---- RESULTS
 'Updated 0 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.avro_impala_alltypes_bad_colname
+show table stats avro_impala_alltypes_bad_colname
 ---- LABELS
 YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
 ---- RESULTS
@@ -663,7 +663,7 @@ YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCRE
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.avro_impala_alltypes_bad_colname
+show column stats avro_impala_alltypes_bad_colname
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -686,7 +686,7 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ---- QUERY
 # IMPALA-1104: Test computing stats on Avro tables created by Impala
 # with mismatched column definitions and Avro schema. Mismatched column type.
-create table compute_stats_db.avro_impala_alltypes_bad_coltype
+create table avro_impala_alltypes_bad_coltype
 (id int, bool_col boolean, tinyint_col int, smallint_col int, int_col int,
 bigint_col bigint, float_col float, double_col bigint, date_string_col string,
 string_col string, timestamp_col timestamp)
@@ -696,14 +696,14 @@ with serdeproperties
 stored as avro;
 ====
 ---- QUERY
-compute stats compute_stats_db.avro_impala_alltypes_bad_coltype
+compute stats avro_impala_alltypes_bad_coltype
 ---- RESULTS
 'Updated 0 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.avro_impala_alltypes_bad_coltype
+show table stats avro_impala_alltypes_bad_coltype
 ---- LABELS
 YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
 ---- RESULTS
@@ -712,7 +712,7 @@ YEAR, MONTH, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCRE
 STRING, STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.avro_impala_alltypes_bad_coltype
+show column stats avro_impala_alltypes_bad_coltype
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -733,41 +733,19 @@ COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ====
 ---- QUERY
-# For IMPALA-1055, using a database called `parquet` to test cases where the name of
-# the database is a keyword.
-CREATE TABLE `parquet`.billion_parquet(id INT);
-====
----- QUERY
-COMPUTE STATS `parquet`.billion_parquet
----- RESULTS
-'Updated 1 partition(s) and 1 column(s).'
----- TYPES
-STRING
-====
----- QUERY
-CREATE TABLE `parquet`.`parquet`(id INT)
-====
----- QUERY
-COMPUTE STATS `parquet`.`parquet`
----- RESULTS
-'Updated 1 partition(s) and 1 column(s).'
----- TYPES
-STRING
-====
----- QUERY
 # IMPALA-883: Compute table stats for an empty partition.
-create table compute_stats_db.empty_partitioned (i int) partitioned by (j int);
-alter table compute_stats_db.empty_partitioned add partition (j=1);
+create table empty_partitioned (i int) partitioned by (j int);
+alter table empty_partitioned add partition (j=1);
 ====
 ---- QUERY
-compute stats compute_stats_db.empty_partitioned
+compute stats empty_partitioned
 ---- RESULTS
 'Updated 1 partition(s) and 1 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.empty_partitioned
+show table stats empty_partitioned
 ---- RESULTS
 '1',0,0,'0B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*
 'Total',0,0,'0B','0B','','','',''
@@ -776,19 +754,19 @@ STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
 # Insert non empty partition to the table with empty partition.
-insert into table compute_stats_db.empty_partitioned partition (j=2) select 1;
+insert into table empty_partitioned partition (j=2) select 1;
 ====
 ---- QUERY
 # Verify partition stats work with empty and non-empty partition.
-drop stats compute_stats_db.empty_partitioned;
-compute stats compute_stats_db.empty_partitioned;
+drop stats empty_partitioned;
+compute stats empty_partitioned;
 ---- RESULTS
 'Updated 2 partition(s) and 1 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.empty_partitioned
+show table stats empty_partitioned
 ---- RESULTS
 '1',0,0,'0B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*
 '2',1,1,'2B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*
@@ -798,15 +776,15 @@ STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
 # Verify partition stats work with empty and non-empty partition.
-drop stats compute_stats_db.empty_partitioned;
-compute stats compute_stats_db.empty_partitioned;
+drop stats empty_partitioned;
+compute stats empty_partitioned;
 ---- RESULTS
 'Updated 2 partition(s) and 1 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.empty_partitioned
+show table stats empty_partitioned
 ---- RESULTS
 '1',0,0,'0B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*
 '2',1,1,'2B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*
@@ -816,18 +794,18 @@ STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 ====
 ---- QUERY
 # IMPALA-1614 Verify that COMPUTE STATS works on a table whose name starts with numbers.
-create table compute_stats_db.`123_table` (i int, 1p int) partitioned by (2j int);
-alter table compute_stats_db.`123_table` add partition (2j=1);
+create table `123_table` (i int, 1p int) partitioned by (2j int);
+alter table `123_table` add partition (2j=1);
 ====
 ---- QUERY
-compute stats compute_stats_db.`123_table`
+compute stats `123_table`
 ---- RESULTS
 'Updated 1 partition(s) and 2 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show table stats compute_stats_db.`123_table`
+show table stats `123_table`
 ---- RESULTS
 '1',0,0,'0B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*
 'Total',0,0,'0B','0B','','','',''
@@ -840,7 +818,7 @@ STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
 # The values of date_string_col always have exactly 8 characters. The CHAR/VARCHAR
 # sizes below are chosen such that they are smaller, equal, and greater than the
 # source data values, in particular, to test the CHAR padding behavior.
-create table compute_stats_db.chars_tbl (
+create table chars_tbl (
   id int,
   ch1 char(1),
   ch2 char(8),
@@ -855,7 +833,7 @@ partitioned by (
   day varchar(13)
 );
 
-insert overwrite compute_stats_db.chars_tbl partition(year, day)
+insert overwrite chars_tbl partition(year, day)
 select
 id,
 cast(date_string_col as char(1)),
@@ -875,14 +853,14 @@ year=2010 /day=3/: 1000
 year=2010 /day=__HIVE_DEFAULT_PARTITION__/: 1000
 ====
 ---- QUERY
-compute stats compute_stats_db.chars_tbl
+compute stats chars_tbl
 ---- RESULTS
 'Updated 3 partition(s) and 8 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.chars_tbl
+show column stats chars_tbl
 ---- LABELS
 COLUMN, TYPE, #DISTINCT VALUES, #NULLS, MAX SIZE, AVG SIZE
 ---- RESULTS
@@ -902,15 +880,15 @@ STRING, STRING, BIGINT, BIGINT, INT, DOUBLE
 ---- QUERY
 # Test that compute stats on a Hive-created Avro table without column defs
 # works (HIVE-6308, IMPALA-867).
-create table compute_stats_db.alltypes_no_coldef like functional_avro_snap.alltypes_no_coldef;
-compute stats compute_stats_db.alltypes_no_coldef
+create table alltypes_no_coldef like functional_avro_snap.alltypes_no_coldef;
+compute stats alltypes_no_coldef
 ---- RESULTS
 'Updated 1 partition(s) and 11 column(s).'
 ---- TYPES
 STRING
 ====
 ---- QUERY
-show column stats compute_stats_db.alltypes_no_coldef
+show column stats alltypes_no_coldef
 ---- RESULTS
 'id','INT',0,-1,4,4
 'bool_col','BOOLEAN',2,-1,1,1

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a41710a0/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test b/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test
new file mode 100644
index 0000000..7c5b561
--- /dev/null
+++ b/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test
@@ -0,0 +1,181 @@
+====
+---- QUERY
+create table corrupted (id int, name string) partitioned by (org int);
+====
+---- QUERY
+insert into corrupted partition (org=1) values (1, "Martin"), (2, "Hans"), (3, "Peter");
+====
+---- QUERY
+insert into corrupted partition (org=2) values (4, "Martin"), (5, "Hans"), (6, "Peter");
+====
+---- QUERY
+show table stats corrupted;
+---- LABELS
+ORG, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
+---- RESULTS
+'1',-1,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/$DATABASE.db/corrupted/org=1'
+'2',-1,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/$DATABASE.db/corrupted/org=2'
+'Total',-1,2,'48B','0B','','','',''
+---- TYPES
+STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
+====
+---- QUERY
+compute stats corrupted;
+====
+---- QUERY
+show table stats corrupted;
+---- LABELS
+ORG, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
+---- RESULTS
+'1',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/$DATABASE.db/corrupted/org=1'
+'2',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/$DATABASE.db/corrupted/org=2'
+'Total',6,2,'48B','0B','','','',''
+---- TYPES
+STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
+====
+---- QUERY
+alter table corrupted partition(org=1) set tblproperties('numRows'='0');
+====
+---- QUERY
+invalidate metadata corrupted;
+====
+---- QUERY
+show table stats corrupted;
+---- LABELS
+ORG, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
+---- RESULTS
+'1',0,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/$DATABASE.db/corrupted/org=1'
+'2',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/$DATABASE.db/corrupted/org=2'
+'Total',6,2,'48B','0B','','','',''
+---- TYPES
+STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
+====
+---- QUERY
+explain select count(*) from corrupted where org = 1;
+---- RESULTS: VERIFY_IS_SUBSET
+'WARNING: The following tables have potentially corrupt table'
+'statistics. Drop and re-compute statistics to resolve this problem.'
+'$DATABASE.corrupted'
+''
+'03:AGGREGATE [FINALIZE]'
+'|  output: count:merge(*)'
+'|'
+'02:EXCHANGE [UNPARTITIONED]'
+'|'
+'01:AGGREGATE'
+'|  output: count(*)'
+'|'
+'00:SCAN HDFS [$DATABASE.corrupted]'
+'   partitions=1/2 files=1 size=24B'
+---- TYPES
+STRING
+====
+---- QUERY
+alter table corrupted partition(org=1) set tblproperties('numRows'='3');
+alter table corrupted set tblproperties('numRows'='0');
+====
+---- QUERY
+show table stats corrupted;
+---- LABELS
+ORG, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
+---- RESULTS
+'1',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/$DATABASE.db/corrupted/org=1'
+'2',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/$DATABASE.db/corrupted/org=2'
+'Total',0,2,'48B','0B','','','',''
+---- TYPES
+STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
+====
+---- QUERY
+explain select count(*) from corrupted;
+---- RESULTS: VERIFY_IS_SUBSET
+'01:AGGREGATE [FINALIZE]'
+'|  output: count(*)'
+'|'
+'00:SCAN HDFS [$DATABASE.corrupted]'
+'   partitions=2/2 files=2 size=48B'
+---- TYPES
+STRING
+====
+---- QUERY
+alter table corrupted set tblproperties('numRows'='6');
+====
+---- QUERY
+show table stats corrupted;
+---- LABELS
+ORG, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
+---- RESULTS
+'1',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/$DATABASE.db/corrupted/org=1'
+'2',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/$DATABASE.db/corrupted/org=2'
+'Total',6,2,'48B','0B','','','',''
+---- TYPES
+STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
+====
+---- QUERY
+explain select count(*) from corrupted;
+---- RESULTS: VERIFY_IS_SUBSET
+'01:AGGREGATE [FINALIZE]'
+'|  output: count(*)'
+'|'
+'00:SCAN HDFS [$DATABASE.corrupted]'
+'   partitions=2/2 files=2 size=48B'
+---- TYPES
+STRING
+====
+---- QUERY
+create table corrupted_no_part (id int);
+insert into corrupted_no_part values (1),(2),(3);
+compute stats corrupted_no_part;
+====
+---- QUERY
+show table stats corrupted_no_part;
+---- LABELS
+#ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
+---- RESULTS
+3,1,'6B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/$DATABASE.db/corrupted_no_part'
+---- TYPES
+BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
+====
+---- QUERY
+-- Check that small query optimization is executed.
+explain select count(*) from corrupted_no_part;
+---- RESULTS: VERIFY_IS_SUBSET
+'01:AGGREGATE [FINALIZE]'
+'|  output: count(*)'
+'|'
+'00:SCAN HDFS [$DATABASE.corrupted_no_part]'
+'   partitions=1/1 files=1 size=6B'
+---- TYPES
+STRING
+====
+---- QUERY
+alter table corrupted_no_part set tblproperties('numRows'='0');
+====
+---- QUERY
+show table stats corrupted_no_part;
+---- LABELS
+#ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
+---- RESULTS
+-1,1,'6B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/$DATABASE.db/corrupted_no_part'
+---- TYPES
+BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
+====
+---- QUERY
+-- After setting num rows to 0, the HMS will set it to -1 and avoids bad behavior.
+explain select count(*) from corrupted_no_part;
+---- RESULTS: VERIFY_IS_SUBSET
+'WARNING: The following tables are missing relevant table and/or column statistics.'
+'$DATABASE.corrupted_no_part'
+''
+'03:AGGREGATE [FINALIZE]'
+'|  output: count:merge(*)'
+'|'
+'02:EXCHANGE [UNPARTITIONED]'
+'|'
+'01:AGGREGATE'
+'|  output: count(*)'
+'|'
+'00:SCAN HDFS [$DATABASE.corrupted_no_part]'
+'   partitions=1/1 files=1 size=6B'
+---- TYPES
+STRING
+====

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a41710a0/testdata/workloads/functional-query/queries/QueryTest/corrupt_stats.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/corrupt_stats.test b/testdata/workloads/functional-query/queries/QueryTest/corrupt_stats.test
deleted file mode 100644
index fbb4fa1..0000000
--- a/testdata/workloads/functional-query/queries/QueryTest/corrupt_stats.test
+++ /dev/null
@@ -1,184 +0,0 @@
-====
----- QUERY
-use compute_stats_db;
-====
----- QUERY
-create table corrupted (id int, name string) partitioned by (org int);
-====
----- QUERY
-insert into corrupted partition (org=1) values (1, "Martin"), (2, "Hans"), (3, "Peter");
-====
----- QUERY
-insert into corrupted partition (org=2) values (4, "Martin"), (5, "Hans"), (6, "Peter");
-====
----- QUERY
-show table stats corrupted;
----- LABELS
-ORG, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
----- RESULTS
-'1',-1,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/compute_stats_db.db/corrupted/org=1'
-'2',-1,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/compute_stats_db.db/corrupted/org=2'
-'Total',-1,2,'48B','0B','','','',''
----- TYPES
-STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
-====
----- QUERY
-compute stats corrupted;
-====
----- QUERY
-show table stats corrupted;
----- LABELS
-ORG, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
----- RESULTS
-'1',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/compute_stats_db.db/corrupted/org=1'
-'2',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/compute_stats_db.db/corrupted/org=2'
-'Total',6,2,'48B','0B','','','',''
----- TYPES
-STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
-====
----- QUERY
-alter table corrupted partition(org=1) set tblproperties('numRows'='0');
-====
----- QUERY
-invalidate metadata corrupted;
-====
----- QUERY
-show table stats corrupted;
----- LABELS
-ORG, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
----- RESULTS
-'1',0,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/compute_stats_db.db/corrupted/org=1'
-'2',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/compute_stats_db.db/corrupted/org=2'
-'Total',6,2,'48B','0B','','','',''
----- TYPES
-STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
-====
----- QUERY
-explain select count(*) from corrupted where org = 1;
----- RESULTS: VERIFY_IS_SUBSET
-'WARNING: The following tables have potentially corrupt table'
-'statistics. Drop and re-compute statistics to resolve this problem.'
-'compute_stats_db.corrupted'
-''
-'03:AGGREGATE [FINALIZE]'
-'|  output: count:merge(*)'
-'|'
-'02:EXCHANGE [UNPARTITIONED]'
-'|'
-'01:AGGREGATE'
-'|  output: count(*)'
-'|'
-'00:SCAN HDFS [compute_stats_db.corrupted]'
-'   partitions=1/2 files=1 size=24B'
----- TYPES
-STRING
-====
----- QUERY
-alter table corrupted partition(org=1) set tblproperties('numRows'='3');
-alter table corrupted set tblproperties('numRows'='0');
-====
----- QUERY
-show table stats corrupted;
----- LABELS
-ORG, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
----- RESULTS
-'1',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/compute_stats_db.db/corrupted/org=1'
-'2',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/compute_stats_db.db/corrupted/org=2'
-'Total',0,2,'48B','0B','','','',''
----- TYPES
-STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
-====
----- QUERY
-explain select count(*) from corrupted;
----- RESULTS: VERIFY_IS_SUBSET
-'01:AGGREGATE [FINALIZE]'
-'|  output: count(*)'
-'|'
-'00:SCAN HDFS [compute_stats_db.corrupted]'
-'   partitions=2/2 files=2 size=48B'
----- TYPES
-STRING
-====
----- QUERY
-alter table corrupted set tblproperties('numRows'='6');
-====
----- QUERY
-show table stats corrupted;
----- LABELS
-ORG, #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
----- RESULTS
-'1',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/compute_stats_db.db/corrupted/org=1'
-'2',3,1,'24B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/compute_stats_db.db/corrupted/org=2'
-'Total',6,2,'48B','0B','','','',''
----- TYPES
-STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
-====
----- QUERY
-explain select count(*) from corrupted;
----- RESULTS: VERIFY_IS_SUBSET
-'01:AGGREGATE [FINALIZE]'
-'|  output: count(*)'
-'|'
-'00:SCAN HDFS [compute_stats_db.corrupted]'
-'   partitions=2/2 files=2 size=48B'
----- TYPES
-STRING
-====
----- QUERY
-create table corrupted_no_part (id int);
-insert into corrupted_no_part values (1),(2),(3);
-compute stats corrupted_no_part;
-====
----- QUERY
-show table stats corrupted_no_part;
----- LABELS
-#ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
----- RESULTS
-3,1,'6B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/compute_stats_db.db/corrupted_no_part'
----- TYPES
-BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
-====
----- QUERY
--- Check that small query optimization is executed.
-explain select count(*) from corrupted_no_part;
----- RESULTS: VERIFY_IS_SUBSET
-'01:AGGREGATE [FINALIZE]'
-'|  output: count(*)'
-'|'
-'00:SCAN HDFS [compute_stats_db.corrupted_no_part]'
-'   partitions=1/1 files=1 size=6B'
----- TYPES
-STRING
-====
----- QUERY
-alter table corrupted_no_part set tblproperties('numRows'='0');
-====
----- QUERY
-show table stats corrupted_no_part;
----- LABELS
-#ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
----- RESULTS
--1,1,'6B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/compute_stats_db.db/corrupted_no_part'
----- TYPES
-BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
-====
----- QUERY
--- After setting num rows to 0, the HMS will set it to -1 and avoids bad behavior.
-explain select count(*) from corrupted_no_part;
----- RESULTS: VERIFY_IS_SUBSET
-'WARNING: The following tables are missing relevant table and/or column statistics.'
-'compute_stats_db.corrupted_no_part'
-''
-'03:AGGREGATE [FINALIZE]'
-'|  output: count:merge(*)'
-'|'
-'02:EXCHANGE [UNPARTITIONED]'
-'|'
-'01:AGGREGATE'
-'|  output: count(*)'
-'|'
-'00:SCAN HDFS [compute_stats_db.corrupted_no_part]'
-'   partitions=1/1 files=1 size=6B'
----- TYPES
-STRING
-====
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a41710a0/tests/common/impala_test_suite.py
----------------------------------------------------------------------
diff --git a/tests/common/impala_test_suite.py b/tests/common/impala_test_suite.py
index 0ed587b..d6e70c8 100644
--- a/tests/common/impala_test_suite.py
+++ b/tests/common/impala_test_suite.py
@@ -304,6 +304,8 @@ class ImpalaTestSuite(BaseTestSuite):
         test_section['RESULTS'] = test_section['RESULTS'] \
             .replace('$NAMENODE', NAMENODE) \
             .replace('$IMPALA_HOME', IMPALA_HOME)
+        if use_db:
+          test_section['RESULTS'] = test_section['RESULTS'].replace('$DATABASE', use_db)
         verify_raw_results(test_section, result,
                          vector.get_value('table_format').file_format,
                          pytest.config.option.update_results)

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a41710a0/tests/metadata/test_compute_stats.py
----------------------------------------------------------------------
diff --git a/tests/metadata/test_compute_stats.py b/tests/metadata/test_compute_stats.py
index b2f6653..f4ae2ff 100644
--- a/tests/metadata/test_compute_stats.py
+++ b/tests/metadata/test_compute_stats.py
@@ -24,9 +24,6 @@ from tests.util.filesystem_utils import WAREHOUSE
 # TODO: Merge this test file with test_col_stats.py
 @SkipIf.not_default_fs # Isilon: Missing coverage: compute stats
 class TestComputeStats(ImpalaTestSuite):
-  TEST_DB_NAME = "compute_stats_db"
-  TEST_ALIASING_DB_NAME = "parquet"
-
   @classmethod
   def get_workload(self):
     return 'functional-query'
@@ -39,40 +36,38 @@ class TestComputeStats(ImpalaTestSuite):
     # are different for different file formats.
     cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload()))
 
-  def setup_method(self, method):
-    # cleanup and create a fresh test database
-    self.cleanup_db(self.TEST_DB_NAME)
-    self.execute_query("create database {0} location '{1}/{0}.db'"
-        .format(self.TEST_DB_NAME, WAREHOUSE))
-    # cleanup and create a fresh test database whose name is a keyword
-    self.cleanup_db(self.TEST_ALIASING_DB_NAME)
-    self.execute_query("create database `{0}` location '{1}/{0}.db'"
-        .format(self.TEST_ALIASING_DB_NAME, WAREHOUSE))
+  @SkipIfLocal.hdfs_blocks
+  def test_compute_stats(self, vector, unique_database):
+    self.run_test_case('QueryTest/compute-stats', vector, unique_database)
+    # Test compute stats on decimal columns separately so we can vary between CDH4/5
+    self.run_test_case('QueryTest/compute-stats-decimal', vector, unique_database)
 
-  def teardown_method(self, method):
-    self.cleanup_db(self.TEST_DB_NAME)
-    self.cleanup_db(self.TEST_ALIASING_DB_NAME)
+  def test_compute_stats_incremental(self, vector, unique_database):
+    self.run_test_case('QueryTest/compute-stats-incremental', vector, unique_database)
 
-  @SkipIfLocal.hdfs_blocks
   @pytest.mark.execute_serially
-  def test_compute_stats(self, vector):
-    self.run_test_case('QueryTest/compute-stats', vector)
-    # Test compute stats on decimal columns separately so we can vary between CDH4/5
-    self.run_test_case('QueryTest/compute-stats-decimal', vector)
+  def test_compute_stats_many_partitions(self, vector):
     # To cut down on test execution time, only run the compute stats test against many
     # partitions if performing an exhaustive test run.
     if self.exploration_strategy() != 'exhaustive': return
     self.run_test_case('QueryTest/compute-stats-many-partitions', vector)
 
   @pytest.mark.execute_serially
-  def test_compute_stats_incremental(self, vector):
-    self.run_test_case('QueryTest/compute-stats-incremental', vector)
+  def test_compute_stats_keywords(self, vector):
+    """IMPALA-1055: Tests compute stats with a db/table name that are keywords."""
+    self.execute_query("drop database if exists `parquet` cascade")
+    self.execute_query("create database `parquet`")
+    self.execute_query("create table `parquet`.impala_1055 (id INT)")
+    self.execute_query("create table `parquet`.`parquet` (id INT)")
+    try:
+      self.run_test_case('QueryTest/compute-stats-keywords', vector)
+    finally:
+      self.cleanup_db("parquet")
 
-  @pytest.mark.execute_serially
   @SkipIfS3.hive
   @SkipIfIsilon.hive
   @SkipIfLocal.hive
-  def test_compute_stats_impala_2201(self, vector):
+  def test_compute_stats_impala_2201(self, vector, unique_database):
     """IMPALA-2201: Tests that the results of compute incremental stats are properly
     persisted when the data was loaded from Hive with hive.stats.autogather=true.
     """
@@ -89,31 +84,31 @@ class TestComputeStats(ImpalaTestSuite):
       create table {0}.{1} (c int) partitioned by (p1 int, p2 string);
       insert overwrite table {0}.{1} partition (p1=1, p2="pval")
       select id from functional.alltypestiny;
-    """.format(self.TEST_DB_NAME, table_name)
+    """.format(unique_database, table_name)
     check_call(["hive", "-e", create_load_data_stmts])
 
     # Make the table visible in Impala.
-    self.execute_query("invalidate metadata %s.%s" % (self.TEST_DB_NAME, table_name))
+    self.execute_query("invalidate metadata %s.%s" % (unique_database, table_name))
 
     # Check that the row count was populated during the insert. We expect 8 rows
     # because functional.alltypestiny has 8 rows, but Hive's auto stats gathering
     # is known to be flaky and sometimes sets the row count to 0. So we check that
     # the row count is not -1 instead of checking for 8 directly.
     show_result = \
-      self.execute_query("show table stats %s.%s" % (self.TEST_DB_NAME, table_name))
+      self.execute_query("show table stats %s.%s" % (unique_database, table_name))
     assert(len(show_result.data) == 2)
     assert("1\tpval\t-1" not in show_result.data[0])
 
     # Compute incremental stats on the single test partition.
     self.execute_query("compute incremental stats %s.%s partition (p1=1, p2='pval')"
-      % (self.TEST_DB_NAME, table_name))
+      % (unique_database, table_name))
 
     # Invalidate metadata to force reloading the stats from the Hive Metastore.
-    self.execute_query("invalidate metadata %s.%s" % (self.TEST_DB_NAME, table_name))
+    self.execute_query("invalidate metadata %s.%s" % (unique_database, table_name))
 
     # Check that the row count is still 8.
     show_result = \
-      self.execute_query("show table stats %s.%s" % (self.TEST_DB_NAME, table_name))
+      self.execute_query("show table stats %s.%s" % (unique_database, table_name))
     assert(len(show_result.data) == 2)
     assert("1\tpval\t8" in show_result.data[0])
 
@@ -130,9 +125,8 @@ class TestCorruptTableStats(TestComputeStats):
     # are different for different file formats.
     cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload()))
 
-  @pytest.mark.execute_serially
-  def test_corrupted_stats(self, vector):
+  def test_corrupt_stats(self, vector, unique_database):
     """IMPALA-1983: Test that in the presence of corrupt table statistics a warning is
     issued and the small query optimization is disabled."""
     if self.exploration_strategy() != 'exhaustive': pytest.skip("Only run in exhaustive")
-    self.run_test_case('QueryTest/corrupt_stats', vector)
+    self.run_test_case('QueryTest/corrupt-stats', vector, unique_database)


Mime
View raw message