hawq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From paul...@apache.org
Subject [2/2] incubator-hawq git commit: HAWQ-1361. Remove some installcheck-good cases since they are in the feature test suite now.
Date Fri, 03 Mar 2017 10:39:59 GMT
HAWQ-1361. Remove some installcheck-good cases since they are in the feature test suite now.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/bacfb0f1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/bacfb0f1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/bacfb0f1

Branch: refs/heads/master
Commit: bacfb0f1c9a38ed2a9407d7add49453ebd3056d6
Parents: 5975764
Author: Paul Guo <paulguo@gmail.com>
Authored: Fri Feb 24 08:03:08 2017 +0800
Committer: Paul Guo <paulguo@gmail.com>
Committed: Fri Mar 3 18:39:29 2017 +0800

----------------------------------------------------------------------
 .../expected/goh_create_type_composite.out      | 170 --------
 src/test/regress/expected/goh_database.out      | 121 ------
 .../regress/expected/goh_gp_dist_random.out     |  15 -
 src/test/regress/expected/goh_toast.out         |  39 --
 .../regress/expected/information_schema.out     | 101 -----
 src/test/regress/expected/rowtypes.out          | 264 ------------
 src/test/regress/expected/temp.out              | 201 ---------
 src/test/regress/expected/transactions.out      | 429 -------------------
 src/test/regress/input/errortbl.source          |  98 -----
 src/test/regress/known_good_schedule            |   9 -
 src/test/regress/output/errortbl.source         | 334 ---------------
 .../regress/sql/goh_create_type_composite.sql   |  96 -----
 src/test/regress/sql/goh_database.sql           |  41 --
 src/test/regress/sql/goh_gp_dist_random.sql     |   6 -
 src/test/regress/sql/goh_toast.sql              |  32 --
 src/test/regress/sql/information_schema.sql     |  57 ---
 src/test/regress/sql/rowtypes.sql               | 111 -----
 src/test/regress/sql/temp.sql                   | 163 -------
 src/test/regress/sql/transactions.sql           | 335 ---------------
 19 files changed, 2622 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bacfb0f1/src/test/regress/expected/goh_create_type_composite.out
----------------------------------------------------------------------
diff --git a/src/test/regress/expected/goh_create_type_composite.out b/src/test/regress/expected/goh_create_type_composite.out
deleted file mode 100644
index f22734f..0000000
--- a/src/test/regress/expected/goh_create_type_composite.out
+++ /dev/null
@@ -1,170 +0,0 @@
---
--- CREATE_TYPE
---
---
--- Note: widget_in/out were created in create_function_1, without any
--- prior shell-type creation.  These commands therefore complete a test
--- of the "old style" approach of making the functions first.
---
--- start_ignore
-drop database hdfs;
--- end_ignore
-create database hdfs;
-\c hdfs
--- Test stand-alone composite type
-create type temp_type_1 as (a int, b int);
-create type temp_type_2 as (a int, b int);
-create table temp_table (id int, a temp_type_1, b temp_type_2) distributed randomly;
-insert into temp_table values (1, (1,2), (3,4));
-insert into temp_table values (2, (5,6), (7,8));
-insert into temp_table values (3, (9,10), (11,12));
-\d temp_table
-Append-Only Table "public.temp_table"
- Column |    Type     | Modifiers 
---------+-------------+-----------
- id     | integer     | 
- a      | temp_type_1 | 
- b      | temp_type_2 | 
-Compression Type: None
-Compression Level: 0
-Block Size: 32768
-Checksum: f
-Distributed randomly
-
-select * from temp_table order by 1;
- id |   a    |    b    
-----+--------+---------
-  1 | (1,2)  | (3,4)
-  2 | (5,6)  | (7,8)
-  3 | (9,10) | (11,12)
-(3 rows)
-
-drop table temp_table;
-create type temp_type_3 as (a temp_type_1, b temp_type_2);
-CREATE table temp_table (id int, a temp_type_1, b temp_type_3) distributed randomly;
-insert into temp_table values (1, (9,10), ((11,12),(7,8)));
-insert into temp_table values (2, (1,2), ((3,4),(5,6)));
-select * from temp_table order by 1;
- id |   a    |          b
-----+--------+---------------------
-  1 | (9,10) | ("(11,12)","(7,8)")
-  2 | (1,2)  | ("(3,4)","(5,6)")
-(2 rows)
-
--- check catalog entries for types
-select count(typrelid) from pg_type where typname like 'temp_type_%';
- count
--------
-     3
-(1 row)
-
-comment on type temp_type_1 is 'test composite type';
-\dT temp_type_1
-             List of data types
- Schema |    Name     |     Description     
---------+-------------+---------------------
- public | temp_type_1 | test composite type
-(1 row)
-
-select count(reltype) from pg_class where relname like 'temp_type%';
- count
--------
-     3
-(1 row)
-
-create table test_func (foo temp_type_1);
-insert into test_func values((1,2));
-insert into test_func values((3,4));
-insert into test_func values((5,6));
-insert into test_func values((7,8));
--- Functions with UDTs
-create function test_temp_func(temp_type_1, temp_type_2) RETURNS temp_type_1 AS '
-  select foo from test_func where (foo).a = 3;
-' LANGUAGE SQL; 
-SELECT * FROM test_temp_func((7,8), (5,6));
- a | b 
----+---
- 3 | 4
-(1 row)
-
-drop function test_temp_func(temp_type_1, temp_type_2);
--- UDT and UDA
-create or replace function test_temp_func_2(temp_type_1, temp_type_1) RETURNS temp_type_1
AS '
-  select ($1.a + $2.a, $1.b + $2.b)::temp_type_1;
-' LANGUAGE SQL;
-CREATE AGGREGATE agg_comp_type (temp_type_1) (
-   sfunc = test_temp_func_2, stype = temp_type_1,
-   initcond = '(0,0)'
-);
-select agg_comp_type(foo) from test_func;
- agg_comp_type
----------------
- (16,20)
-(1 row)
-
--- Check alter schema
-create schema type_test;
-alter type temp_type_1 set schema type_test;
-\dT temp_type_1
-     List of data types
- Schema | Name | Description 
---------+------+-------------
-(0 rows)
-
-\dT type_test.temp_type_1
-                   List of data types
-  Schema   |         Name          |     Description     
------------+-----------------------+---------------------
- type_test | type_test.temp_type_1 | test composite type
-(1 row)
-
-\d test_func
-    Append-Only Table "public.test_func"
- Column |         Type          | Modifiers 
---------+-----------------------+-----------
- foo    | type_test.temp_type_1 | 
-Compression Type: None
-Compression Level: 0
-Block Size: 32768
-Checksum: f
-Distributed randomly
-
-select foo from test_func where (foo).a = 3;
-  foo  
--------
- (3,4)
-(1 row)
-
--- type name with truncation
-create type abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890 as (a
int, b int);
-NOTICE:  identifier "abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890"
will be truncated to "abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1"
-create table huge_type_table (a abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890);
-NOTICE:  identifier "abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890"
will be truncated to "abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1"
-NOTICE:  Table doesn't have 'distributed by' clause, and no column type is suitable for a
distribution key. Creating a NULL policy entry.
-insert into huge_type_table values ((1,2));
-insert into huge_type_table values ((3,4));
-select * from huge_type_table;
-   a   
--------
- (1,2)
- (3,4)
-(2 rows)
-
-\d huge_type_table;
-                      Append-Only Table "public.huge_type_table"
- Column |                              Type                               | Modifiers 
---------+-----------------------------------------------------------------+-----------
- a      | abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1 | 
-Compression Type: None
-Compression Level: 0
-Block Size: 32768
-Checksum: f
-Distributed randomly
-
-drop table huge_type_table;
-drop type abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890;
-NOTICE:  identifier "abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890"
will be truncated to "abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1"
--- composite type array tests ..negative test
-create table type_array_table (col_one type_test.temp_type_1[]);
-ERROR:  type "type_test.temp_type_1[]" does not exist
-\c regression

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bacfb0f1/src/test/regress/expected/goh_database.out
----------------------------------------------------------------------
diff --git a/src/test/regress/expected/goh_database.out b/src/test/regress/expected/goh_database.out
deleted file mode 100644
index f0d1319..0000000
--- a/src/test/regress/expected/goh_database.out
+++ /dev/null
@@ -1,121 +0,0 @@
-CREATE DATABASE goh_database;
-DROP DATABASE goh_database;
--- should be a clean databse
-CREATE DATABASE goh_database1;
-\c goh_database1
-CREATE TABLE x(c int);
-NOTICE:  Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'c' as the Greenplum
Database data distribution key for this table.
-HINT:  The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s)
chosen are the optimal data distribution key to minimize skew.
-INSERT INTO x VALUES(generate_series(1, 10));
-SELECT * FROM x ORDER BY c;
- c  
-----
-  1
-  2
-  3
-  4
-  5
-  6
-  7
-  8
-  9
- 10
-(10 rows)
-
-DROP TABLE x;
-\c regression
--- table should be removed
-CREATE DATABASE goh_database2;
-\c goh_database2
-CREATE TABLE x(c int);
-NOTICE:  Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'c' as the Greenplum
Database data distribution key for this table.
-HINT:  The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s)
chosen are the optimal data distribution key to minimize skew.
-INSERT INTO x VALUES(generate_series(1, 10));
-SELECT * FROM x ORDER BY c;
- c  
-----
-  1
-  2
-  3
-  4
-  5
-  6
-  7
-  8
-  9
- 10
-(10 rows)
-
-\c regression
-CREATE TABLESPACE goh_regression_tablespace1 FILESPACE dfs_system;
-CREATE DATABASE goh_database3 TABLESPACE goh_regression_tablespace1;
-\c goh_database3
-CREATE TABLE x(c int);
-NOTICE:  Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'c' as the Greenplum
Database data distribution key for this table.
-HINT:  The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s)
chosen are the optimal data distribution key to minimize skew.
-INSERT INTO x VALUES(generate_series(1, 10));
-SELECT * FROM x ORDER BY c;
- c  
-----
-  1
-  2
-  3
-  4
-  5
-  6
-  7
-  8
-  9
- 10
-(10 rows)
-
-\d x
- Append-Only Table "public.x"
- Column |  Type   | Modifiers 
---------+---------+-----------
- c      | integer | 
-Compression Type: None
-Compression Level: 0
-Block Size: 32768
-Checksum: f
-Distributed randomly
-
-\c regression
-BEGIN;
-CREATE TABLESPACE goh_regression_tablespace2 FILESPACE dfs_system;
-CREATE TABLE x(c int) TABLESPACE goh_regression_tablespace2;
-NOTICE:  Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'c' as the Greenplum
Database data distribution key for this table.
-HINT:  The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s)
chosen are the optimal data distribution key to minimize skew.
-INSERT INTO x VALUES(generate_series(1, 10));
-SELECT * FROM x ORDER BY c;
- c  
-----
-  1
-  2
-  3
-  4
-  5
-  6
-  7
-  8
-  9
- 10
-(10 rows)
-
-\d x
- Append-Only Table "public.x"
- Column |  Type   | Modifiers 
---------+---------+-----------
- c      | integer | 
-Compression Type: None
-Compression Level: 0
-Block Size: 32768
-Checksum: f
-Distributed randomly
-Tablespace: "goh_regression_tablespace2"
-
-ROLLBACK;
-DROP DATABASE goh_database1;
-DROP DATABASE goh_database2;
-DROP DATABASE goh_database3;
-DROP TABLESPACE goh_regression_tablespace1;

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bacfb0f1/src/test/regress/expected/goh_gp_dist_random.out
----------------------------------------------------------------------
diff --git a/src/test/regress/expected/goh_gp_dist_random.out b/src/test/regress/expected/goh_gp_dist_random.out
deleted file mode 100644
index 7c3adec..0000000
--- a/src/test/regress/expected/goh_gp_dist_random.out
+++ /dev/null
@@ -1,15 +0,0 @@
-select count(*) > 0 as c from gp_dist_random('pg_class');
- c 
----
- t
-(1 row)
-
-select relname from gp_dist_random('pg_class') c
-  inner join gp_dist_random('pg_namespace') n
-    on c.gp_segment_id = n.gp_segment_id
-      and c.relnamespace = n.oid
-  where n.nspname = 'nonexistent';
- relname 
----------
-(0 rows)
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bacfb0f1/src/test/regress/expected/goh_toast.out
----------------------------------------------------------------------
diff --git a/src/test/regress/expected/goh_toast.out b/src/test/regress/expected/goh_toast.out
deleted file mode 100644
index 4a65330..0000000
--- a/src/test/regress/expected/goh_toast.out
+++ /dev/null
@@ -1,39 +0,0 @@
-CREATE TABLE toastable_ao(a text, b varchar, c int) with(appendonly=true, compresslevel=1)
distributed randomly;
--- INSERT 
--- uses the toast call to store the large tuples
-INSERT INTO toastable_ao VALUES(repeat('a',100000), repeat('b',100001), 1);
-INSERT INTO toastable_ao VALUES(repeat('A',100000), repeat('B',100001), 2);
--- Check that tuples were toasted and are detoasted correctly. we use
--- char_length() because it guarantees a detoast without showing tho whole result
-SELECT char_length(a), char_length(b), c FROM toastable_ao ORDER BY c;
- char_length | char_length | c 
--------------+-------------+---
-      100000 |      100001 | 1
-      100000 |      100001 | 2
-(2 rows)
-
--- ALTER
--- this will cause a full table rewrite. we make sure the tosted values and references
--- stay intact after all the oid switching business going on.
--- ALTER TABLE toastable_ao ADD COLUMN d int DEFAULT 10;
--- SELECT char_length(a), char_length(b), c, d FROM toastable_ao ORDER BY c;
-SELECT char_length(a), char_length(b), c FROM toastable_ao ORDER BY c;
- char_length | char_length | c 
--------------+-------------+---
-      100000 |      100001 | 1
-      100000 |      100001 | 2
-(2 rows)
-
--- TRUNCATE
--- remove reference to toast table and create a new one with different values
-TRUNCATE toastable_ao;
-INSERT INTO toastable_ao VALUES(repeat('a',100002), repeat('b',100003), 2);
-SELECT char_length(a), char_length(b), c FROM toastable_ao;
- char_length | char_length | c 
--------------+-------------+---
-      100002 |      100003 | 2
-(1 row)
-
--- TODO: figure out a way to verify that toasted data is removed after the truncate.
-DROP TABLE toastable_ao;
--- TODO: figure out a way to verify that the toast tables are dropped

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bacfb0f1/src/test/regress/expected/information_schema.out
----------------------------------------------------------------------
diff --git a/src/test/regress/expected/information_schema.out b/src/test/regress/expected/information_schema.out
deleted file mode 100644
index 3084620..0000000
--- a/src/test/regress/expected/information_schema.out
+++ /dev/null
@@ -1,101 +0,0 @@
-drop table if exists r;
-NOTICE:  table "r" does not exist, skipping
-create table r(a int, b int);
- 
-SELECT attnum::information_schema.cardinal_number 
-from pg_attribute 
-where attnum > 0 and attrelid = 'r'::regclass;
- attnum 
---------
-      1
-      2
-(2 rows)
-
--- this one should fail
-SELECT attnum::information_schema.cardinal_number 
-from pg_attribute 
-where attrelid = 'r'::regclass;
-ERROR:  value for domain information_schema.cardinal_number violates check constraint "cardinal_number_domain_check"
-SELECT *
-from (SELECT attnum::information_schema.cardinal_number 
-      from pg_attribute 
-      where attnum > 0 and attrelid = 'r'::regclass) q
-where attnum=2;
- attnum 
---------
-      2
-(1 row)
-
-select table_schema, table_name,column_name,ordinal_position
-from information_schema.columns
-where table_name ='r';
- table_schema | table_name | column_name | ordinal_position 
---------------+------------+-------------+------------------
- public       | r          | b           |                2
- public       | r          | a           |                1
-(2 rows)
-
-select table_schema, table_name,column_name,ordinal_position
-from information_schema.columns
-where table_name ='r'
-and ordinal_position =1;
- table_schema | table_name | column_name | ordinal_position 
---------------+------------+-------------+------------------
- public       | r          | a           |                1
-(1 row)
-
-select table_schema, table_name,column_name,ordinal_position
-from information_schema.columns
-where ordinal_position = 20;
-    table_schema    |  table_name   |    column_name     | ordinal_position 
---------------------+---------------+--------------------+------------------
- information_schema | element_types | interval_precision |               20
- pg_catalog         | pg_filesystem | fsysacl            |               20
- pg_catalog         | pg_statistic  | stavalues3         |               20
- pg_catalog         | pg_partitions | parenttablespace   |               20
- information_schema | attributes    | datetime_precision |               20
- information_schema | columns       | collation_catalog  |               20
- pg_catalog         | pg_type       | typtypmod          |               20
- pg_catalog         | pg_proc       | prodataaccess      |               20
- pg_catalog         | pg_class      | reltriggers        |               20
- pg_catalog         | pg_am         | ambuild            |               20
- information_schema | domains       | udt_catalog        |               20
- information_schema | parameters    | numeric_scale      |               20
- information_schema | routines      | collation_catalog  |               20
-(13 rows)
-
--- MPP-25724
-select a.column_name
-from information_schema.columns a
-where a.table_name
-in
-(select b.table_name from information_schema.tables b where
-	a.column_name like 'b') and a.table_name = 'r';
- column_name
--------------
- b
-(1 row)
-
-select c.relname
-from pg_class c
-where c.relname
-in
-(select b.table_name from information_schema.tables b where
-	c.relname like 'r');
- relname
----------
- r
-(1 row)
-
-select a.table_name
-from information_schema.tables a
-where a.table_name
-in
-(select b.relname from pg_class b where
-	a.table_name like 'r');
- table_name
-------------
- r
-(1 row)
-
-drop table r;

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bacfb0f1/src/test/regress/expected/rowtypes.out
----------------------------------------------------------------------
diff --git a/src/test/regress/expected/rowtypes.out b/src/test/regress/expected/rowtypes.out
deleted file mode 100755
index 249ab8b..0000000
--- a/src/test/regress/expected/rowtypes.out
+++ /dev/null
@@ -1,264 +0,0 @@
---
--- ROWTYPES
---
--- Make both a standalone composite type and a table rowtype
-create type complex as (r float8, i float8);
-create temp table fullname (first text, last text);
--- Nested composite
-create type quad as (c1 complex, c2 complex);
--- Some simple tests of I/O conversions and row construction
-select (1.1,2.2)::complex, row((3.3,4.4),(5.5,null))::quad;
-    row    |          row           
------------+------------------------
- (1.1,2.2) | ("(3.3,4.4)","(5.5,)")
-(1 row)
-
-select row('Joe', 'Blow')::fullname, '(Joe,Blow)'::fullname;
-    row     |  fullname  
-------------+------------
- (Joe,Blow) | (Joe,Blow)
-(1 row)
-
-select '(Joe,von Blow)'::fullname, '(Joe,d''Blow)'::fullname;
-     fullname     |   fullname   
-------------------+--------------
- (Joe,"von Blow") | (Joe,d'Blow)
-(1 row)
-
-select '(Joe,"von""Blow")'::fullname, E'(Joe,d\\\\Blow)'::fullname;
-     fullname      |    fullname     
--------------------+-----------------
- (Joe,"von""Blow") | (Joe,"d\\Blow")
-(1 row)
-
-select '(Joe,"Blow,Jr")'::fullname;
-    fullname     
------------------
- (Joe,"Blow,Jr")
-(1 row)
-
-select '(Joe,)'::fullname;	-- ok, null 2nd column
- fullname 
-----------
- (Joe,)
-(1 row)
-
-select '(Joe)'::fullname;	-- bad
-ERROR:  malformed record literal: "(Joe)"
-LINE 1: select '(Joe)'::fullname;
-               ^
-DETAIL:  Too few columns.
-select '(Joe,,)'::fullname;	-- bad
-ERROR:  malformed record literal: "(Joe,,)"
-LINE 1: select '(Joe,,)'::fullname;
-               ^
-DETAIL:  Too many columns.
-create temp table quadtable(f1 int, q quad);
-insert into quadtable values (1, ((3.3,4.4),(5.5,6.6)));
-insert into quadtable values (2, ((null,4.4),(5.5,6.6)));
-select * from quadtable;
- f1 |             q             
-----+---------------------------
-  1 | ("(3.3,4.4)","(5.5,6.6)")
-  2 | ("(,4.4)","(5.5,6.6)")
-(2 rows)
-
-begin;
-set local add_missing_from = false;
-select f1, q.c1 from quadtable;		-- fails, q is a table reference
-ERROR:  missing FROM-clause entry for table "q"
-LINE 1: select f1, q.c1 from quadtable;
-                   ^
-rollback;
-select f1, (q).c1, (qq.q).c1.i from quadtable qq;
- f1 |    c1     |  i  
-----+-----------+-----
-  1 | (3.3,4.4) | 4.4
-  2 | (,4.4)    | 4.4
-(2 rows)
-
-create temp table people (fn fullname, bd date);
-insert into people values ('(Joe,Blow)', '1984-01-10');
-select * from people;
-     fn     |     bd     
-------------+------------
- (Joe,Blow) | 01-10-1984
-(1 row)
-
--- at the moment this will not work due to ALTER TABLE inadequacy:
-alter table fullname add column suffix text default '';
-ERROR:  cannot alter table "fullname" because column "people"."fn" uses its rowtype
--- Not supported in HAWQ
-alter table fullname add column suffix text default null;
-ERROR:  cannot alter table "fullname" because column "people"."fn" uses its rowtype
-select * from people;
-     fn     |     bd
-------------+------------
- (Joe,Blow) | 01-10-1984
-(1 row)
-
--- test insertion/updating of subfields, not supported in HAWQ
-update people set fn.suffix = 'Jr';
-ERROR:  cannot assign to field "suffix" of column "fn" because there is no such column in
data type fullname
-LINE 1: update people set fn.suffix = 'Jr';
-                          ^
-select * from people;
-     fn     |     bd
-------------+------------
- (Joe,Blow) | 01-10-1984
-(1 row)
-
-insert into quadtable (f1, q.c1.r, q.c2.i) values(44,55,66);
-select * from quadtable;
- f1 |             q             
-----+---------------------------
-  1 | ("(3.3,4.4)","(5.5,6.6)")
-  2 | ("(,4.4)","(5.5,6.6)")
- 44 | ("(55,)","(,66)")
-(3 rows)
-
--- The object here is to ensure that toasted references inside
--- composite values don't cause problems.  The large f1 value will
--- be toasted inside pp, it must still work after being copied to people.
-create temp table pp (f1 text);
-insert into pp values (repeat('abcdefghijkl', 100000));
--- HAWQ does not support alter, so remove third column null
-insert into people select ('Jim', f1)::fullname, current_date from pp;
-select (fn).first, substr((fn).last, 1, 20), length((fn).last) from people;
- first |        substr        | length  
--------+----------------------+---------
- Joe   | Blow                 |       4
- Jim   | abcdefghijklabcdefgh | 1200000
-(2 rows)
-
--- Test row comparison semantics.  Prior to PG 8.2 we did this in a totally
--- non-spec-compliant way.
-select ROW(1,2) < ROW(1,3) as true;
- true 
-------
- t
-(1 row)
-
-select ROW(1,2) < ROW(1,1) as false;
- false 
--------
- f
-(1 row)
-
-select ROW(1,2) < ROW(1,NULL) as null;
- null 
-------
- 
-(1 row)
-
-select ROW(1,2,3) < ROW(1,3,NULL) as true; -- the NULL is not examined
- true 
-------
- t
-(1 row)
-
-select ROW(11,'ABC') < ROW(11,'DEF') as true;
- true 
-------
- t
-(1 row)
-
-select ROW(11,'ABC') > ROW(11,'DEF') as false;
- false 
--------
- f
-(1 row)
-
-select ROW(12,'ABC') > ROW(11,'DEF') as true;
- true 
-------
- t
-(1 row)
-
--- = and <> have different NULL-behavior than < etc
-select ROW(1,2,3) < ROW(1,NULL,4) as null;
- null 
-------
- 
-(1 row)
-
-select ROW(1,2,3) = ROW(1,NULL,4) as false;
- false 
--------
- f
-(1 row)
-
-select ROW(1,2,3) <> ROW(1,NULL,4) as true;
- true 
-------
- t
-(1 row)
-
--- We allow operators beyond the six standard ones, if they have btree
--- operator classes.
-select ROW('ABC','DEF') ~<=~ ROW('DEF','ABC') as true;
- true 
-------
- t
-(1 row)
-
-select ROW('ABC','DEF') ~>=~ ROW('DEF','ABC') as false;
- false 
--------
- f
-(1 row)
-
-select ROW('ABC','DEF') ~~ ROW('DEF','ABC') as fail;
-ERROR:  could not determine interpretation of row comparison operator ~~
-LINE 1: select ROW('ABC','DEF') ~~ ROW('DEF','ABC') as fail;
-                                ^
-HINT:  Row comparison operators must be associated with btree operator classes.
--- Check row comparison with a subselect
-select unique1, unique2 from tenk1
-where (unique1, unique2) < any (select ten, ten from tenk1 where hundred < 3);
- unique1 | unique2 
----------+---------
-       1 |    2838
-       0 |    9998
-(2 rows)
-
--- Also check row comparison with an indexable condition
-select thousand, tenthous from tenk1
-where (thousand, tenthous) >= (997, 5000)
-order by thousand, tenthous;
- thousand | tenthous 
-----------+----------
-      997 |     5997
-      997 |     6997
-      997 |     7997
-      997 |     8997
-      997 |     9997
-      998 |      998
-      998 |     1998
-      998 |     2998
-      998 |     3998
-      998 |     4998
-      998 |     5998
-      998 |     6998
-      998 |     7998
-      998 |     8998
-      998 |     9998
-      999 |      999
-      999 |     1999
-      999 |     2999
-      999 |     3999
-      999 |     4999
-      999 |     5999
-      999 |     6999
-      999 |     7999
-      999 |     8999
-      999 |     9999
-(25 rows)
-
--- empty row constructor is valid
-select ROW();
- row 
------
- ()
-(1 row)
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bacfb0f1/src/test/regress/expected/temp.out
----------------------------------------------------------------------
diff --git a/src/test/regress/expected/temp.out b/src/test/regress/expected/temp.out
deleted file mode 100755
index 64ed4d1..0000000
--- a/src/test/regress/expected/temp.out
+++ /dev/null
@@ -1,201 +0,0 @@
---
--- TEMP
--- Test temp relations and indexes
---
--- test temp table/index masking
-CREATE TABLE temptest(col int);
--- CREATE INDEX i_temptest ON temptest(col);
-CREATE TEMP TABLE temptest(tcol int);
--- CREATE INDEX i_temptest ON temptest(tcol);
-SELECT * FROM temptest;
- tcol 
-------
-(0 rows)
-
--- DROP INDEX i_temptest;
-DROP TABLE temptest;
-SELECT * FROM temptest;
- col 
------
-(0 rows)
-
--- DROP INDEX i_temptest;
-DROP TABLE temptest;
--- test temp table selects
-CREATE TABLE temptest(col int);
-INSERT INTO temptest VALUES (1);
-CREATE TEMP TABLE temptest(tcol float);
-INSERT INTO temptest VALUES (2.1);
-SELECT * FROM temptest;
- tcol 
-------
-  2.1
-(1 row)
-
-DROP TABLE temptest;
-SELECT * FROM temptest;
- col 
------
-   1
-(1 row)
-
-DROP TABLE temptest;
--- test temp table deletion
-CREATE TEMP TABLE temptest(col int);
-\c regression
-SELECT * FROM temptest;
-ERROR:  relation "temptest" does not exist
-LINE 1: SELECT * FROM temptest;
-                      ^
--- Test ON COMMIT DELETE ROWS
-CREATE TEMP TABLE temptest(col int) ON COMMIT DELETE ROWS;
-BEGIN;
-INSERT INTO temptest VALUES (1);
-INSERT INTO temptest VALUES (2);
-SELECT * FROM temptest;
- col 
------
-   1
-   2
-(2 rows)
-
-COMMIT;
-SELECT * FROM temptest;
- col 
------
-(0 rows)
-
-DROP TABLE temptest;
-BEGIN;
-CREATE TEMP TABLE temptest(col) ON COMMIT DELETE ROWS AS SELECT 1;
-SELECT * FROM temptest;
- col 
------
-   1
-(1 row)
-
-COMMIT;
-SELECT * FROM temptest;
- col 
------
-(0 rows)
-
-DROP TABLE temptest;
--- Test ON COMMIT DROP
-BEGIN;
-CREATE TEMP TABLE temptest(col int) ON COMMIT DROP;
-INSERT INTO temptest VALUES (1);
-INSERT INTO temptest VALUES (2);
-SELECT * FROM temptest;
- col 
------
-   1
-   2
-(2 rows)
-
-COMMIT;
-SELECT * FROM temptest;
-ERROR:  relation "temptest" does not exist
-LINE 1: SELECT * FROM temptest;
-                      ^
-BEGIN;
-CREATE TEMP TABLE temptest(col) ON COMMIT DROP AS SELECT 1;
-SELECT * FROM temptest;
- col 
------
-   1
-(1 row)
-
-COMMIT;
-SELECT * FROM temptest;
-ERROR:  relation "temptest" does not exist
-LINE 1: SELECT * FROM temptest;
-                      ^
--- ON COMMIT is only allowed for TEMP
-CREATE TABLE temptest(col int) ON COMMIT DELETE ROWS;
-ERROR:  ON COMMIT can only be used on temporary tables
-CREATE TABLE temptest(col) ON COMMIT DELETE ROWS AS SELECT 1;
-ERROR:  ON COMMIT can only be used on temporary tables
--- Test foreign keys
--- BEGIN;
--- CREATE TEMP TABLE temptest1(col int PRIMARY KEY) DISTRIBUTED BY (col);
--- CREATE TEMP TABLE temptest2(col int REFERENCES temptest1)
---   ON COMMIT DELETE ROWS;
--- INSERT INTO temptest1 VALUES (1);
--- INSERT INTO temptest2 VALUES (1);
--- COMMIT;
--- SELECT * FROM temptest1;
--- SELECT * FROM temptest2;
--- BEGIN;
--- CREATE TEMP TABLE temptest3(col int PRIMARY KEY) ON COMMIT DELETE ROWS DISTRIBUTED BY
(col);
--- CREATE TEMP TABLE temptest4(col int REFERENCES temptest3);
--- COMMIT;
--- Test manipulation of temp schema's placement in search path
-create table public.whereami (f1 text);
-NOTICE:  Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'f1' as the Greenplum
Database data distribution key for this table.
-HINT:  The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s)
chosen are the optimal data distribution key to minimize skew.
-insert into public.whereami values ('public');
-create temp table whereami (f1 text);
-insert into whereami values ('temp');
--- create function public.whoami() returns text
---   as $$select 'public'::text$$ language sql;
--- create function pg_temp.whoami() returns text
---   as $$select 'temp'::text$$ language sql;
--- default should have pg_temp implicitly first, but only for tables
-select * from whereami;
-  f1  
-------
- temp
-(1 row)
-
--- select whoami();
--- can list temp first explicitly, but it still doesn't affect functions
-set search_path = pg_temp, public;
-select * from whereami;
-  f1  
-------
- temp
-(1 row)
-
--- select whoami();
--- or put it last for security
-set search_path = public, pg_temp;
-select * from whereami;
-   f1   
---------
- public
-(1 row)
-
--- select whoami();
--- you can invoke a temp function explicitly, though
--- select pg_temp.whoami();
-drop table public.whereami;
--- Test querying column using pg_temp schema
-create table pg_temp.test (row integer, count integer);
-insert into pg_temp.test values (1, 10), (2, 20), (3, 30);
-select avg(pg_temp.test.count) from pg_temp.test;
- avg
------
-  20
-(1 row)
-
-select avg(test.count) from pg_temp.test;
- avg
------
-  20
-(1 row)
-
-select avg(count) from pg_temp.test;
- avg
------
-  20
-(1 row)
-
-select case when pg_temp.test.count = 30 then 30 when pg_temp.test.count = 20 then 20 else
10 end from pg_temp.test;
- case
-------
-   10
-   20
-   30
-(3 rows)
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bacfb0f1/src/test/regress/expected/transactions.out
----------------------------------------------------------------------
diff --git a/src/test/regress/expected/transactions.out b/src/test/regress/expected/transactions.out
deleted file mode 100755
index a5b5ba2..0000000
--- a/src/test/regress/expected/transactions.out
+++ /dev/null
@@ -1,429 +0,0 @@
---
--- TRANSACTIONS
---
-BEGIN;
-SELECT * 
-   INTO TABLE xacttest
-   FROM aggtest;
-INSERT INTO xacttest (a, b) VALUES (777, 777.777);
-END;
--- should retrieve one value--
-SELECT a FROM xacttest WHERE a > 100;
-  a  
------
- 777
-(1 row)
-
-BEGIN;
-CREATE TABLE disappear (a int4);
-TRUNCATE aggtest;
--- should be empty
-SELECT * FROM aggtest;
- a | b 
----+---
-(0 rows)
-
-ABORT;
--- should not exist 
-SELECT oid FROM pg_class WHERE relname = 'disappear';
- oid 
------
-(0 rows)
-
--- should have members again 
-SELECT * FROM aggtest;
-  a  |    b    
------+---------
-  56 |     7.8
- 100 |  99.097
-   0 | 0.09561
-  42 |  324.78
-(4 rows)
-
-drop table aggtest;
--- Read-only tests
-CREATE TABLE writetest (a int);
-CREATE TEMPORARY TABLE temptest (a int);
-SET SESSION CHARACTERISTICS AS TRANSACTION READ ONLY;
-DROP TABLE writetest; -- fail
-ERROR:  transaction is read-only
-INSERT INTO writetest VALUES (1); -- fail
-ERROR:  transaction is read-only
-SELECT * FROM writetest; -- ok
- a 
----
-(0 rows)
-
--- DELETE FROM temptest; -- ok
--- UPDATE temptest SET a = 0 FROM writetest WHERE temptest.a = 1 AND writetest.a = temptest.a;
-- ok
--- PREPARE test AS INSERT INTO writetest VALUES (1); -- ok
--- EXECUTE test; -- fail
-SELECT * FROM writetest, temptest; -- ok
- a | a 
----+---
-(0 rows)
-
-CREATE TABLE test AS SELECT * FROM writetest; -- fail
-ERROR:  transaction is read-only
-START TRANSACTION READ WRITE;
-DROP TABLE writetest; -- ok
-COMMIT;
--- Subtransactions, basic tests
--- create & drop tables
-SET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE;
-CREATE TABLE foobar (a int);
-BEGIN;
-	CREATE TABLE foo (a int);
-	SAVEPOINT one;
-		DROP TABLE foo;
-		CREATE TABLE bar (a int);
-	ROLLBACK TO SAVEPOINT one;
-	RELEASE SAVEPOINT one;
-	SAVEPOINT two;
-		CREATE TABLE baz (a int);
-	RELEASE SAVEPOINT two;
-	drop TABLE foobar;
-	CREATE TABLE barbaz (a int);
-COMMIT;
--- should exist: barbaz, baz, foo
-SELECT * FROM foo;		-- should be empty
- a 
----
-(0 rows)
-
-SELECT * FROM bar;		-- shouldn't exist
-ERROR:  relation "bar" does not exist
-LINE 1: SELECT * FROM bar;
-                      ^
-SELECT * FROM barbaz;	-- should be empty
- a 
----
-(0 rows)
-
-SELECT * FROM baz;		-- should be empty
- a 
----
-(0 rows)
-
--- inserts
-BEGIN;
-	INSERT INTO foo VALUES (1);
-	SAVEPOINT one;
-		INSERT into bar VALUES (1);
-ERROR:  relation "bar" does not exist
-	ROLLBACK TO one;
-	RELEASE SAVEPOINT one;
-	SAVEPOINT two;
-		INSERT into barbaz VALUES (1);
-	RELEASE two;
-	SAVEPOINT three;
-		SAVEPOINT four;
-			INSERT INTO foo VALUES (2);
-		RELEASE SAVEPOINT four;
-	ROLLBACK TO SAVEPOINT three;
-	RELEASE SAVEPOINT three;
-	INSERT INTO foo VALUES (3);
-COMMIT;
-SELECT * FROM foo;		-- should have 1 and 3
- a 
----
- 1
- 3
-(2 rows)
-
-SELECT * FROM barbaz;	-- should have 1
- a 
----
- 1
-(1 row)
-
--- test whole-tree commit
-BEGIN;
-	SAVEPOINT one;
-		SELECT foo;
-ERROR:  column "foo" does not exist
-LINE 1: SELECT foo;
-               ^
-	ROLLBACK TO SAVEPOINT one;
-	RELEASE SAVEPOINT one;
-	SAVEPOINT two;
-		CREATE TABLE savepoints (a int);
-		SAVEPOINT three;
-			INSERT INTO savepoints VALUES (1);
-			SAVEPOINT four;
-				INSERT INTO savepoints VALUES (2);
-				SAVEPOINT five;
-					INSERT INTO savepoints VALUES (3);
-				ROLLBACK TO SAVEPOINT five;
-COMMIT;
-COMMIT;		-- should not be in a transaction block
-WARNING:  there is no transaction in progress
-SELECT * FROM savepoints;
- a 
----
- 1
- 2
-(2 rows)
-
--- test whole-tree rollback
-BEGIN;
-	SAVEPOINT one;
-		INSERT INTO savepoints VALUES (23);
-	RELEASE SAVEPOINT one;
-	SAVEPOINT two;
-		INSERT INTO savepoints VALUES (24);
-		SAVEPOINT three;
-			INSERT INTO savepoints VALUES (25);
-ROLLBACK;
-COMMIT;		-- should not be in a transaction block
-WARNING:  there is no transaction in progress
-		
-SELECT * FROM savepoints ORDER BY 1;
- a 
----
- 1
- 2
-(2 rows)
-
--- test whole-tree commit on an aborted subtransaction
-BEGIN;
-	INSERT INTO savepoints VALUES (4);
-	SAVEPOINT one;
-		INSERT INTO savepoints VALUES (5);
-		SELECT foo;
-ERROR:  column "foo" does not exist
-LINE 1: SELECT foo;
-               ^
-COMMIT;
-SELECT * FROM savepoints ORDER BY 1;
- a 
----
- 1
- 2
-(2 rows)
-
-BEGIN;
-	INSERT INTO savepoints VALUES (6);
-	SAVEPOINT one;
-		INSERT INTO savepoints VALUES (7);
-	RELEASE SAVEPOINT one;
-	INSERT INTO savepoints VALUES (8);
-COMMIT;
--- rows 6 and 8 should have been created by the same xact
--- SELECT a.xmin = b.xmin FROM savepoints a, savepoints b WHERE a.a=6 AND b.a=8;
--- rows 6 and 7 should have been created by different xacts
--- SELECT a.xmin = b.xmin FROM savepoints a, savepoints b WHERE a.a=6 AND b.a=7;
-BEGIN;
-	INSERT INTO savepoints VALUES (9);
-	SAVEPOINT one;
-		INSERT INTO savepoints VALUES (10);
-	ROLLBACK TO SAVEPOINT one;
-		INSERT INTO savepoints VALUES (11);
-COMMIT;
-SELECT a FROM savepoints WHERE a in (9, 10, 11) ORDER BY 1;
- a  
-----
-  9
- 11
-(2 rows)
-
--- rows 9 and 11 should have been created by different xacts
--- SELECT a.xmin = b.xmin FROM savepoints a, savepoints b WHERE a.a=9 AND b.a=11;
-BEGIN;
-	INSERT INTO savepoints VALUES (12);
-	SAVEPOINT one;
-		INSERT INTO savepoints VALUES (13);
-		SAVEPOINT two;
-			INSERT INTO savepoints VALUES (14);
-	ROLLBACK TO SAVEPOINT one;
-		INSERT INTO savepoints VALUES (15);
-		SAVEPOINT two;
-			INSERT INTO savepoints VALUES (16);
-			SAVEPOINT three;
-				INSERT INTO savepoints VALUES (17);
-COMMIT;
-SELECT a FROM savepoints WHERE a BETWEEN 12 AND 17 ORDER BY 1;
- a  
-----
- 12
- 15
- 16
- 17
-(4 rows)
-
-BEGIN;
-	INSERT INTO savepoints VALUES (18);
-	SAVEPOINT one;
-		INSERT INTO savepoints VALUES (19);
-		SAVEPOINT two;
-			INSERT INTO savepoints VALUES (20);
-	ROLLBACK TO SAVEPOINT one;
-		INSERT INTO savepoints VALUES (21);
-	ROLLBACK TO SAVEPOINT one;
-		INSERT INTO savepoints VALUES (22);
-COMMIT;
-SELECT a FROM savepoints WHERE a BETWEEN 18 AND 22 ORDER BY 1;
- a  
-----
- 18
- 22
-(2 rows)
-
-DROP TABLE savepoints;
--- only in a transaction block:
-SAVEPOINT one;
-ERROR:  SAVEPOINT may only be used in transaction blocks
-ROLLBACK TO SAVEPOINT one;
-ERROR:  ROLLBACK TO SAVEPOINT may only be used in transaction blocks
-RELEASE SAVEPOINT one;
-ERROR:  RELEASE SAVEPOINT may only be used in transaction blocks
--- Only "rollback to" allowed in aborted state
-BEGIN;
-  SAVEPOINT one;
-  SELECT 0/0;
-ERROR:  division by zero
-  SAVEPOINT two;    -- ignored till the end of ...
-ERROR:  current transaction is aborted, commands ignored until end of transaction block
-  RELEASE SAVEPOINT one;      -- ignored till the end of ...
-ERROR:  current transaction is aborted, commands ignored until end of transaction block
-  ROLLBACK TO SAVEPOINT one;
-  SELECT 1;
- ?column? 
-----------
-        1
-(1 row)
-
-COMMIT;
-SELECT 1;			-- this should work
- ?column? 
-----------
-        1
-(1 row)
-
--- check non-transactional behavior of cursors
--- BEGIN;
--- 	DECLARE c CURSOR FOR SELECT unique2 FROM tenk1 ORDER BY 1;
--- 	SAVEPOINT one;
--- 		FETCH 10 FROM c;
--- 	ROLLBACK TO SAVEPOINT one;
--- 		FETCH 10 FROM c;
--- 	RELEASE SAVEPOINT one;
--- 	FETCH 10 FROM c;
--- 	CLOSE c;
--- 	DECLARE c CURSOR FOR SELECT unique2/0 FROM tenk1 ORDER BY 1;
--- 	SAVEPOINT two;
--- 		FETCH 10 FROM c;
--- 	ROLLBACK TO SAVEPOINT two;
--- 	-- c is now dead to the world ...
--- 		FETCH 10 FROM c;
--- 	ROLLBACK TO SAVEPOINT two;
--- 	RELEASE SAVEPOINT two;
--- 	FETCH 10 FROM c;
--- COMMIT;
---
--- Check that "stable" functions are really stable.  They should not be
--- able to see the partial results of the calling query.  (Ideally we would
--- also check that they don't see commits of concurrent transactions, but
--- that's a mite hard to do within the limitations of pg_regress.)
---
-select * from xacttest;
-  a  |    b    
------+---------
-  56 |     7.8
- 100 |  99.097
-   0 | 0.09561
-  42 |  324.78
- 777 | 777.777
-(5 rows)
-
--- create or replace function max_xacttest() returns smallint language sql as
--- 'select max(a) from xacttest' stable;
--- begin;
--- update xacttest set a = max_xacttest() + 10 where a > 0;
--- select * from xacttest;
--- rollback;
--- But a volatile function can see the partial results of the calling query
--- create or replace function max_xacttest() returns smallint language sql as
--- 'select max(a) from xacttest' volatile;
--- begin;
--- update xacttest set a = max_xacttest() + 10 where a > 0;
--- select * from xacttest;
--- rollback;
--- Now the same test with plpgsql (since it depends on SPI which is different)
--- create or replace function max_xacttest() returns smallint language plpgsql as
--- 'begin return max(a) from xacttest; end' stable;
--- begin;
--- update xacttest set a = max_xacttest() + 10 where a > 0;
--- select * from xacttest;
--- rollback;
--- create or replace function max_xacttest() returns smallint language plpgsql as
--- 'begin return max(a) from xacttest; end' volatile;
--- begin;
--- update xacttest set a = max_xacttest() + 10 where a > 0;
--- select * from xacttest;
--- rollback;
--- test case for problems with dropping an open relation during abort
--- BEGIN;
--- 	savepoint x;
--- 		CREATE TABLE koju (a INT UNIQUE);
--- 		INSERT INTO koju VALUES (1);
--- 		INSERT INTO koju VALUES (1);
--- 	rollback to x;
--- 	CREATE TABLE koju (a INT UNIQUE);
--- 	INSERT INTO koju VALUES (1);
--- 	INSERT INTO koju VALUES (1);
--- ROLLBACK;
-DROP TABLE foo;
-DROP TABLE baz;
-DROP TABLE barbaz;
--- verify that cursors created during an aborted subtransaction are
--- closed, but that we do not rollback the effect of any FETCHs
--- performed in the aborted subtransaction
--- begin;
--- savepoint x;
--- create table abc (a int);
--- insert into abc values (5);
--- insert into abc values (10);
--- declare foo cursor for select * from abc;
--- fetch from foo;
--- rollback to x;
--- should fail
--- fetch from foo;
--- commit;
--- begin;
--- create table abc (a int);
--- insert into abc values (5);
--- insert into abc values (10);
--- insert into abc values (15);
--- declare foo cursor for select * from abc;
--- fetch from foo;
--- savepoint x;
--- fetch from foo;
--- rollback to x;
--- fetch from foo;
--- abort;
--- tests for the "tid" type
-SELECT '(3, 3)'::tid = '(3, 4)'::tid;
- ?column? 
-----------
- f
-(1 row)
-
-SELECT '(3, 3)'::tid = '(3, 3)'::tid;
- ?column? 
-----------
- t
-(1 row)
-
-SELECT '(3, 3)'::tid <> '(3, 3)'::tid;
- ?column? 
-----------
- f
-(1 row)
-
-SELECT '(3, 3)'::tid <> '(3, 4)'::tid;
- ?column? 
-----------
- t
-(1 row)
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bacfb0f1/src/test/regress/input/errortbl.source
----------------------------------------------------------------------
diff --git a/src/test/regress/input/errortbl.source b/src/test/regress/input/errortbl.source
deleted file mode 100644
index bf83da7..0000000
--- a/src/test/regress/input/errortbl.source
+++ /dev/null
@@ -1,98 +0,0 @@
--- --------------------------------------
--- 'gpfdist' protocol
--- --------------------------------------
-
-CREATE EXTERNAL WEB TABLE gpfdist_status (x text)
-execute E'( python $GPHOME/bin/lib/gppinggpfdist.py @hostname@:7070 2>&1 || echo)
'
-on SEGMENT 0
-FORMAT 'text' (delimiter '|');
-
-CREATE EXTERNAL WEB TABLE gpfdist_start (x text)
-execute E'((@gpwhich_gpfdist@ -p 7070 -d @abs_srcdir@/data  </dev/null >/dev/null 2>&1
&); sleep 2; echo "starting...") '
-on SEGMENT 0
-FORMAT 'text' (delimiter '|');
-
-CREATE EXTERNAL WEB TABLE gpfdist_stop (x text)
-execute E'(/bin/pkill gpfdist || killall gpfdist) > /dev/null 2>&1; echo "stopping..."'
-on SEGMENT 0
-FORMAT 'text' (delimiter '|');
--- start_ignore
-select * from gpfdist_stop;
-select * from gpfdist_status;
-select * from gpfdist_start;
-select * from gpfdist_status;
--- end_ignore
-
--- readable external table with error table
-
-CREATE EXTERNAL TABLE EXT_NATION1 ( N_NATIONKEY  INTEGER ,
-                            N_NAME       CHAR(25) ,
-                            N_REGIONKEY  INTEGER ,
-                            N_COMMENT    VARCHAR(152))
-location ('gpfdist://@hostname@:7070/nation_error50.tbl')
-FORMAT 'text' (delimiter '|')
-LOG ERRORS INTO EXT_NATION_ERROR1 SEGMENT REJECT LIMIT 51;
-
-CREATE EXTERNAL TABLE EXT_NATION2 ( N_NATIONKEY  INTEGER ,
-                            N_NAME       CHAR(25) ,
-                            N_REGIONKEY  INTEGER ,
-                            N_COMMENT    VARCHAR(152))
-location ('gpfdist://@hostname@:7070/nation_error50.tbl')
-FORMAT 'text' (delimiter '|')
-LOG ERRORS INTO EXT_NATION_ERROR2 SEGMENT REJECT LIMIT 50;
-
-CREATE EXTERNAL TABLE EXT_NATION3 ( N_NATIONKEY  INTEGER ,
-                            N_NAME       CHAR(25) ,
-                            N_REGIONKEY  INTEGER ,
-                            N_COMMENT    VARCHAR(152))
-location ('gpfdist://@hostname@:7070/nation.tbl')
-FORMAT 'text' (delimiter '|')
-LOG ERRORS INTO EXT_NATION_ERROR3 SEGMENT REJECT LIMIT 50;
-
--- use existing error table
-CREATE EXTERNAL TABLE EXT_NATION_WITH_EXIST_ERROR_TABLE ( N_NATIONKEY  INTEGER ,
-                            N_NAME       CHAR(25) ,
-                            N_REGIONKEY  INTEGER ,
-                            N_COMMENT    VARCHAR(152))
-location ('gpfdist://@hostname@:7070/nation_error50.tbl')
-FORMAT 'text' (delimiter '|')
-LOG ERRORS INTO EXT_NATION_ERROR1 SEGMENT REJECT LIMIT 51;
-
-select * from EXT_NATION1;
-select count(*) from EXT_NATION_ERROR1; -- should be 50
-select * from EXT_NATION_WITH_EXIST_ERROR_TABLE;
-select count(*) from EXT_NATION_ERROR1; -- should be 100
-select * from EXT_NATION2; -- should fail
-select count(*) from EXT_NATION_ERROR2; -- should be empty
-select * from EXT_NATION3;
-select count(*) from EXT_NATION_ERROR3; -- should be empty
-
-truncate EXT_NATION_ERROR1;
-select * from EXT_NATION1 as x, EXT_NATION3 as y where x.n_nationkey = y.n_nationkey;
-select count(*) from EXT_NATION_ERROR1; -- should be 50
-
-truncate EXT_NATION_ERROR1;
-select * from EXT_NATION1 as x, EXT_NATION1 as y where x.n_nationkey = y.n_nationkey; --should
not fail on self join
-select count(*) from EXT_NATION_ERROR1; -- should be 100
-
-truncate EXT_NATION_ERROR1;
-select * from EXT_NATION1 as x, EXT_NATION_WITH_EXIST_ERROR_TABLE as y where x.n_nationkey
= y.n_nationkey; --should not fail with the same error table
-select count(*) from EXT_NATION_ERROR1; -- should be 100
-
--- should fail on writable external table
-CREATE WRITABLE EXTERNAL TABLE EXT_NATION_WRITABLE ( N_NATIONKEY  INTEGER ,
-                            N_NAME       CHAR(25) ,
-                            N_REGIONKEY  INTEGER ,
-                            N_COMMENT    VARCHAR(152))
-LOCATION ('gpfdist://@hostname@:7070/nation_error50.tbl') 
-FORMAT 'text' (delimiter '|')
-LOG ERRORS INTO EXT_NATION_ERROR_WRITABLE SEGMENT REJECT LIMIT 5;
-
--- start_ignore
-select * from gpfdist_stop;
-select * from gpfdist_status;
--- end_ignore
-
-drop external table gpfdist_status;
-drop external table gpfdist_start;
-drop external table gpfdist_stop;

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bacfb0f1/src/test/regress/known_good_schedule
----------------------------------------------------------------------
diff --git a/src/test/regress/known_good_schedule b/src/test/regress/known_good_schedule
index 68e53a4..5dd86a2 100755
--- a/src/test/regress/known_good_schedule
+++ b/src/test/regress/known_good_schedule
@@ -1,7 +1,5 @@
 # $PostgreSQL: pgsql/src/test/regress/serial_schedule,v 1.33 2006/08/30 23:34:22 tgl Exp
$
 # This should probably be in an order similar to parallel_schedule.
-test: errortbl
-test: goh_create_type_composite
 ignore: goh_create_table
 ignore: goh_vacuum
 ignore: goh_subselect
@@ -10,11 +8,8 @@ ignore: goh_gphdfs
 ignore: goh_dpe
 ignore: goh_gpic
 ignore: goh_eagerfree
-test: goh_toast
 ignore: goh_alter_distribution_policy
 ignore: goh_column_compression
-test: goh_database
-test: goh_gp_dist_random
 ignore: gpsql_fault_tolerance
 ignore: goh_set_schema
 ignore: bit
@@ -63,7 +58,6 @@ ignore: select_distinct_on
 ignore: select_implicit
 ignore: select_having
 ignore: subselect
-test: information_schema
 ignore: union
 ignore: case
 ignore: join
@@ -73,7 +67,6 @@ ignore: leastsquares
 ignore: naivebayes
 ignore: direct_dispatch
 ignore: partition_pruning_with_fn
-test: transactions
 ignore: distributed_transactions
 ignore: random
 ignore: arrays
@@ -97,7 +90,6 @@ ignore: combocid
 ignore: limit
 ignore: plpgsql
 ignore: copy2
-test: temp
 ignore: domain
 ignore: rangefuncs
 ignore: table_functions
@@ -107,7 +99,6 @@ ignore: without_oid
 ignore: conversion
 ignore: truncate
 ignore: alter_table
-test: rowtypes
 ignore: returning
 ignore: stats
 ignore: tablespace



Mime
View raw message