hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gunt...@apache.org
Subject [22/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)
Date Fri, 03 Feb 2017 21:50:35 GMT
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_partitions_filter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_partitions_filter.q.out b/ql/src/test/results/beelinepositive/drop_partitions_filter.q.out
deleted file mode 100644
index f584bbd..0000000
--- a/ql/src/test/results/beelinepositive/drop_partitions_filter.q.out
+++ /dev/null
@@ -1,111 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_partitions_filter.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_partitions_filter.q
->>>  create table ptestfilter (a string, b int) partitioned by (c string, d string);
-No rows affected 
->>>  describe ptestfilter;
-'col_name','data_type','comment'
-'a','string',''
-'b','int',''
-'c','string',''
-'d','string',''
-4 rows selected 
->>>  
->>>  alter table ptestfilter add partition (c='US', d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c='US', d=2);
-No rows affected 
->>>  alter table ptestFilter add partition (c='Uganda', d=2);
-No rows affected 
->>>  alter table ptestfilter add partition (c='Germany', d=2);
-No rows affected 
->>>  alter table ptestfilter add partition (c='Canada', d=3);
-No rows affected 
->>>  alter table ptestfilter add partition (c='Russia', d=3);
-No rows affected 
->>>  alter table ptestfilter add partition (c='Greece', d=2);
-No rows affected 
->>>  alter table ptestfilter add partition (c='India', d=3);
-No rows affected 
->>>  alter table ptestfilter add partition (c='France', d=4);
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=Canada/d=3'
-'c=France/d=4'
-'c=Germany/d=2'
-'c=Greece/d=2'
-'c=India/d=3'
-'c=Russia/d=3'
-'c=US/d=1'
-'c=US/d=2'
-'c=Uganda/d=2'
-9 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c='US', d<'2');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=Canada/d=3'
-'c=France/d=4'
-'c=Germany/d=2'
-'c=Greece/d=2'
-'c=India/d=3'
-'c=Russia/d=3'
-'c=US/d=2'
-'c=Uganda/d=2'
-8 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c>='US', d<='2');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=Canada/d=3'
-'c=France/d=4'
-'c=Germany/d=2'
-'c=Greece/d=2'
-'c=India/d=3'
-'c=Russia/d=3'
-6 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c >'India');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=Canada/d=3'
-'c=France/d=4'
-'c=Germany/d=2'
-'c=Greece/d=2'
-'c=India/d=3'
-5 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c >='India'), 
-partition (c='Greece', d='2');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=Canada/d=3'
-'c=France/d=4'
-'c=Germany/d=2'
-3 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c != 'France');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=France/d=4'
-1 row selected 
->>>  
->>>  set hive.exec.drop.ignorenonexistent=false;
-No rows affected 
->>>  alter table ptestfilter drop if exists partition (c='US');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=France/d=4'
-1 row selected 
->>>  
->>>  drop table ptestfilter;
-No rows affected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_partitions_filter2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_partitions_filter2.q.out b/ql/src/test/results/beelinepositive/drop_partitions_filter2.q.out
deleted file mode 100644
index 9d31d69..0000000
--- a/ql/src/test/results/beelinepositive/drop_partitions_filter2.q.out
+++ /dev/null
@@ -1,59 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_partitions_filter2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_partitions_filter2.q
->>>  create table ptestfilter (a string, b int) partitioned by (c int, d int);
-No rows affected 
->>>  describe ptestfilter;
-'col_name','data_type','comment'
-'a','string',''
-'b','int',''
-'c','int',''
-'d','int',''
-4 rows selected 
->>>  
->>>  alter table ptestfilter add partition (c=1, d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c=1, d=2);
-No rows affected 
->>>  alter table ptestFilter add partition (c=2, d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c=2, d=2);
-No rows affected 
->>>  alter table ptestfilter add partition (c=3, d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c=3, d=2);
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=1/d=1'
-'c=1/d=2'
-'c=2/d=1'
-'c=2/d=2'
-'c=3/d=1'
-'c=3/d=2'
-6 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c=1, d=1);
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=1/d=2'
-'c=2/d=1'
-'c=2/d=2'
-'c=3/d=1'
-'c=3/d=2'
-5 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c=2);
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=1/d=2'
-'c=3/d=1'
-'c=3/d=2'
-3 rows selected 
->>>  
->>>  drop table ptestfilter;
-No rows affected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_partitions_filter3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_partitions_filter3.q.out b/ql/src/test/results/beelinepositive/drop_partitions_filter3.q.out
deleted file mode 100644
index 58b9dfe..0000000
--- a/ql/src/test/results/beelinepositive/drop_partitions_filter3.q.out
+++ /dev/null
@@ -1,59 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_partitions_filter3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_partitions_filter3.q
->>>  create table ptestfilter (a string, b int) partitioned by (c string, d int);
-No rows affected 
->>>  describe ptestfilter;
-'col_name','data_type','comment'
-'a','string',''
-'b','int',''
-'c','string',''
-'d','int',''
-4 rows selected 
->>>  
->>>  alter table ptestfilter add partition (c='1', d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c='1', d=2);
-No rows affected 
->>>  alter table ptestFilter add partition (c='2', d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c='2', d=2);
-No rows affected 
->>>  alter table ptestfilter add partition (c='3', d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c='3', d=2);
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=1/d=1'
-'c=1/d=2'
-'c=2/d=1'
-'c=2/d=2'
-'c=3/d=1'
-'c=3/d=2'
-6 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c='1', d=1);
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=1/d=2'
-'c=2/d=1'
-'c=2/d=2'
-'c=3/d=1'
-'c=3/d=2'
-5 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c='2');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=1/d=2'
-'c=3/d=1'
-'c=3/d=2'
-3 rows selected 
->>>  
->>>  drop table ptestfilter;
-No rows affected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_table.q.out b/ql/src/test/results/beelinepositive/drop_table.q.out
deleted file mode 100644
index e487f33..0000000
--- a/ql/src/test/results/beelinepositive/drop_table.q.out
+++ /dev/null
@@ -1,7 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_table.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_table.q
->>>  SET hive.exec.drop.ignorenonexistent=false;
-No rows affected 
->>>  DROP TABLE IF EXISTS UnknownTable;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_table2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_table2.q.out b/ql/src/test/results/beelinepositive/drop_table2.q.out
deleted file mode 100644
index 430411e..0000000
--- a/ql/src/test/results/beelinepositive/drop_table2.q.out
+++ /dev/null
@@ -1,33 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_table2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_table2.q
->>>  SET hive.metastore.batch.retrieve.max=1;
-No rows affected 
->>>  create table if not exists temp(col STRING) partitioned by (p STRING);
-No rows affected 
->>>  alter table temp add if not exists partition (p ='p1');
-No rows affected 
->>>  alter table temp add if not exists partition (p ='p2');
-No rows affected 
->>>  alter table temp add if not exists partition (p ='p3');
-No rows affected 
->>>  
->>>  show partitions temp;
-'partition'
-'p=p1'
-'p=p2'
-'p=p3'
-3 rows selected 
->>>  
->>>  drop table temp;
-No rows affected 
->>>  
->>>  create table if not exists temp(col STRING) partitioned by (p STRING);
-No rows affected 
->>>  
->>>  show partitions temp;
-'partition'
-No rows selected 
->>>  
->>>  drop table temp;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_table_removes_partition_dirs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_table_removes_partition_dirs.q.out b/ql/src/test/results/beelinepositive/drop_table_removes_partition_dirs.q.out
deleted file mode 100644
index 276ee7c..0000000
--- a/ql/src/test/results/beelinepositive/drop_table_removes_partition_dirs.q.out
+++ /dev/null
@@ -1,32 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_table_removes_partition_dirs.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_table_removes_partition_dirs.q
->>>  -- This test verifies that if a partition exists outside the table's current location when the
->>>  -- table is dropped the partition's location is dropped as well.
->>>  
->>>  CREATE TABLE test_table (key STRING, value STRING) 
-PARTITIONED BY (part STRING) 
-STORED AS RCFILE 
-LOCATION 'file:${system:test.tmp.dir}/drop_table_removes_partition_dirs_table';
-No rows affected 
->>>  
->>>  ALTER TABLE test_table ADD PARTITION (part = '1') 
-LOCATION 'file:${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2/part=1';
-No rows affected 
->>>  
->>>  INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
-SELECT * FROM src;
-'key','value'
-No rows selected 
->>>  
->>>  dfs -ls ${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2;
-No rows affected 
->>>  
->>>  DROP TABLE test_table;
-No rows affected 
->>>  
->>>  dfs -ls ${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2;
-No rows affected 
->>>  
->>>  dfs -rmr ${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_udf.q.out b/ql/src/test/results/beelinepositive/drop_udf.q.out
deleted file mode 100644
index 67ed784..0000000
--- a/ql/src/test/results/beelinepositive/drop_udf.q.out
+++ /dev/null
@@ -1,23 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_udf.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_udf.q
->>>  CREATE TEMPORARY FUNCTION test_translate AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestTranslate';
-No rows affected 
->>>  
->>>  EXPLAIN 
-DROP TEMPORARY FUNCTION test_translate;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_DROPFUNCTION test_translate)'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-''
-''
-10 rows selected 
->>>  
->>>  DROP TEMPORARY FUNCTION test_translate;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_view.q.out b/ql/src/test/results/beelinepositive/drop_view.q.out
deleted file mode 100644
index 6e14e96..0000000
--- a/ql/src/test/results/beelinepositive/drop_view.q.out
+++ /dev/null
@@ -1,7 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_view.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_view.q
->>>  SET hive.exec.drop.ignorenonexistent=false;
-No rows affected 
->>>  DROP VIEW IF EXISTS UnknownView;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/enforce_order.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/enforce_order.q.out b/ql/src/test/results/beelinepositive/enforce_order.q.out
deleted file mode 100644
index 15258a8..0000000
--- a/ql/src/test/results/beelinepositive/enforce_order.q.out
+++ /dev/null
@@ -1,49 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/enforce_order.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/enforce_order.q
->>>  drop table table_asc;
-No rows affected 
->>>  drop table table_desc;
-No rows affected 
->>>  
->>>  set hive.enforce.sorting = true;
-No rows affected 
->>>  
->>>  create table table_asc(key string, value string) clustered by (key) sorted by (key ASC) into 1 BUCKETS;
-No rows affected 
->>>  create table table_desc(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS;
-No rows affected 
->>>  
->>>  insert overwrite table table_asc select key, value from src;
-'key','value'
-No rows selected 
->>>  insert overwrite table table_desc select key, value from src;
-'key','value'
-No rows selected 
->>>  
->>>  select * from table_asc limit 10;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'100','val_100'
-'100','val_100'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-10 rows selected 
->>>  select * from table_desc limit 10;
-'key','value'
-'98','val_98'
-'98','val_98'
-'97','val_97'
-'97','val_97'
-'96','val_96'
-'95','val_95'
-'95','val_95'
-'92','val_92'
-'90','val_90'
-'90','val_90'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/escape_clusterby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/escape_clusterby1.q.out b/ql/src/test/results/beelinepositive/escape_clusterby1.q.out
deleted file mode 100644
index d351533..0000000
--- a/ql/src/test/results/beelinepositive/escape_clusterby1.q.out
+++ /dev/null
@@ -1,119 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/escape_clusterby1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/escape_clusterby1.q
->>>  -- escaped column names in cluster by are not working jira 3267
->>>  explain 
-select key, value from src cluster by key, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_CLUSTERBY (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL value))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-53 rows selected 
->>>  
->>>  explain 
-select `key`, value from src cluster by `key`, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL `key`)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_CLUSTERBY (TOK_TABLE_OR_COL `key`) (TOK_TABLE_OR_COL value))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-53 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/escape_distributeby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/escape_distributeby1.q.out b/ql/src/test/results/beelinepositive/escape_distributeby1.q.out
deleted file mode 100644
index 8969611..0000000
--- a/ql/src/test/results/beelinepositive/escape_distributeby1.q.out
+++ /dev/null
@@ -1,109 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/escape_distributeby1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/escape_distributeby1.q
->>>  -- escaped column names in distribute by by are not working jira 3267
->>>  explain 
-select key, value from src distribute by key, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_DISTRIBUTEBY (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL value))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                sort order: '
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-48 rows selected 
->>>  
->>>  explain 
-select `key`, value from src distribute by `key`, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL `key`)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_DISTRIBUTEBY (TOK_TABLE_OR_COL `key`) (TOK_TABLE_OR_COL value))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                sort order: '
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-48 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/escape_orderby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/escape_orderby1.q.out b/ql/src/test/results/beelinepositive/escape_orderby1.q.out
deleted file mode 100644
index 4117c5d..0000000
--- a/ql/src/test/results/beelinepositive/escape_orderby1.q.out
+++ /dev/null
@@ -1,109 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/escape_orderby1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/escape_orderby1.q
->>>  -- escaped column names in order by are not working jira 3267
->>>  explain 
-select key, value from src order by key, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-48 rows selected 
->>>  
->>>  explain 
-select `key`, value from src order by `key`, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL `key`)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL `key`)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-48 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/escape_sortby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/escape_sortby1.q.out b/ql/src/test/results/beelinepositive/escape_sortby1.q.out
deleted file mode 100644
index a90ba4e..0000000
--- a/ql/src/test/results/beelinepositive/escape_sortby1.q.out
+++ /dev/null
@@ -1,109 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/escape_sortby1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/escape_sortby1.q
->>>  -- escaped column names in sort by are not working jira 3267
->>>  explain 
-select key, value from src sort by key, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-48 rows selected 
->>>  
->>>  explain 
-select `key`, value from src sort by `key`, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL `key`)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL `key`)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-48 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/explode_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/explode_null.q.out b/ql/src/test/results/beelinepositive/explode_null.q.out
deleted file mode 100644
index 17ca437..0000000
--- a/ql/src/test/results/beelinepositive/explode_null.q.out
+++ /dev/null
@@ -1,23 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/explode_null.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/explode_null.q
->>>  SELECT explode(col) AS myCol FROM 
-(SELECT array(1,2,3) AS col FROM src LIMIT 1 
-UNION ALL 
-SELECT IF(false, array(1,2,3), NULL) AS col FROM src LIMIT 1) a;
-'mycol'
-'1'
-'2'
-'3'
-3 rows selected 
->>>  
->>>  SELECT explode(col) AS (myCol1,myCol2) FROM 
-(SELECT map(1,'one',2,'two',3,'three') AS col FROM src LIMIT 1 
-UNION ALL 
-SELECT IF(false, map(1,'one',2,'two',3,'three'), NULL) AS col FROM src LIMIT 1) a;
-'mycol1','mycol2'
-'1','one'
-'2','two'
-'3','three'
-3 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/fileformat_mix.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/fileformat_mix.q.out b/ql/src/test/results/beelinepositive/fileformat_mix.q.out
deleted file mode 100644
index 98963ce..0000000
--- a/ql/src/test/results/beelinepositive/fileformat_mix.q.out
+++ /dev/null
@@ -1,530 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/fileformat_mix.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/fileformat_mix.q
->>>  
->>>  
->>>  create table fileformat_mix_test (src int, value string) partitioned by (ds string);
-No rows affected 
->>>  alter table fileformat_mix_test set fileformat Sequencefile;
-No rows affected 
->>>  
->>>  insert overwrite table fileformat_mix_test partition (ds='1') 
-select key, value from src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  alter table fileformat_mix_test add partition (ds='2');
-No rows affected 
->>>  
->>>  alter table fileformat_mix_test set fileformat rcfile;
-No rows affected 
->>>  
->>>  select count(1) from fileformat_mix_test;
-'_c0'
-'500'
-1 row selected 
->>>  
->>>  select src from fileformat_mix_test;
-'src'
-'238'
-'86'
-'311'
-'27'
-'165'
-'409'
-'255'
-'278'
-'98'
-'484'
-'265'
-'193'
-'401'
-'150'
-'273'
-'224'
-'369'
-'66'
-'128'
-'213'
-'146'
-'406'
-'429'
-'374'
-'152'
-'469'
-'145'
-'495'
-'37'
-'327'
-'281'
-'277'
-'209'
-'15'
-'82'
-'403'
-'166'
-'417'
-'430'
-'252'
-'292'
-'219'
-'287'
-'153'
-'193'
-'338'
-'446'
-'459'
-'394'
-'237'
-'482'
-'174'
-'413'
-'494'
-'207'
-'199'
-'466'
-'208'
-'174'
-'399'
-'396'
-'247'
-'417'
-'489'
-'162'
-'377'
-'397'
-'309'
-'365'
-'266'
-'439'
-'342'
-'367'
-'325'
-'167'
-'195'
-'475'
-'17'
-'113'
-'155'
-'203'
-'339'
-'0'
-'455'
-'128'
-'311'
-'316'
-'57'
-'302'
-'205'
-'149'
-'438'
-'345'
-'129'
-'170'
-'20'
-'489'
-'157'
-'378'
-'221'
-'92'
-'111'
-'47'
-'72'
-'4'
-'280'
-'35'
-'427'
-'277'
-'208'
-'356'
-'399'
-'169'
-'382'
-'498'
-'125'
-'386'
-'437'
-'469'
-'192'
-'286'
-'187'
-'176'
-'54'
-'459'
-'51'
-'138'
-'103'
-'239'
-'213'
-'216'
-'430'
-'278'
-'176'
-'289'
-'221'
-'65'
-'318'
-'332'
-'311'
-'275'
-'137'
-'241'
-'83'
-'333'
-'180'
-'284'
-'12'
-'230'
-'181'
-'67'
-'260'
-'404'
-'384'
-'489'
-'353'
-'373'
-'272'
-'138'
-'217'
-'84'
-'348'
-'466'
-'58'
-'8'
-'411'
-'230'
-'208'
-'348'
-'24'
-'463'
-'431'
-'179'
-'172'
-'42'
-'129'
-'158'
-'119'
-'496'
-'0'
-'322'
-'197'
-'468'
-'393'
-'454'
-'100'
-'298'
-'199'
-'191'
-'418'
-'96'
-'26'
-'165'
-'327'
-'230'
-'205'
-'120'
-'131'
-'51'
-'404'
-'43'
-'436'
-'156'
-'469'
-'468'
-'308'
-'95'
-'196'
-'288'
-'481'
-'457'
-'98'
-'282'
-'197'
-'187'
-'318'
-'318'
-'409'
-'470'
-'137'
-'369'
-'316'
-'169'
-'413'
-'85'
-'77'
-'0'
-'490'
-'87'
-'364'
-'179'
-'118'
-'134'
-'395'
-'282'
-'138'
-'238'
-'419'
-'15'
-'118'
-'72'
-'90'
-'307'
-'19'
-'435'
-'10'
-'277'
-'273'
-'306'
-'224'
-'309'
-'389'
-'327'
-'242'
-'369'
-'392'
-'272'
-'331'
-'401'
-'242'
-'452'
-'177'
-'226'
-'5'
-'497'
-'402'
-'396'
-'317'
-'395'
-'58'
-'35'
-'336'
-'95'
-'11'
-'168'
-'34'
-'229'
-'233'
-'143'
-'472'
-'322'
-'498'
-'160'
-'195'
-'42'
-'321'
-'430'
-'119'
-'489'
-'458'
-'78'
-'76'
-'41'
-'223'
-'492'
-'149'
-'449'
-'218'
-'228'
-'138'
-'453'
-'30'
-'209'
-'64'
-'468'
-'76'
-'74'
-'342'
-'69'
-'230'
-'33'
-'368'
-'103'
-'296'
-'113'
-'216'
-'367'
-'344'
-'167'
-'274'
-'219'
-'239'
-'485'
-'116'
-'223'
-'256'
-'263'
-'70'
-'487'
-'480'
-'401'
-'288'
-'191'
-'5'
-'244'
-'438'
-'128'
-'467'
-'432'
-'202'
-'316'
-'229'
-'469'
-'463'
-'280'
-'2'
-'35'
-'283'
-'331'
-'235'
-'80'
-'44'
-'193'
-'321'
-'335'
-'104'
-'466'
-'366'
-'175'
-'403'
-'483'
-'53'
-'105'
-'257'
-'406'
-'409'
-'190'
-'406'
-'401'
-'114'
-'258'
-'90'
-'203'
-'262'
-'348'
-'424'
-'12'
-'396'
-'201'
-'217'
-'164'
-'431'
-'454'
-'478'
-'298'
-'125'
-'431'
-'164'
-'424'
-'187'
-'382'
-'5'
-'70'
-'397'
-'480'
-'291'
-'24'
-'351'
-'255'
-'104'
-'70'
-'163'
-'438'
-'119'
-'414'
-'200'
-'491'
-'237'
-'439'
-'360'
-'248'
-'479'
-'305'
-'417'
-'199'
-'444'
-'120'
-'429'
-'169'
-'443'
-'323'
-'325'
-'277'
-'230'
-'478'
-'178'
-'468'
-'310'
-'317'
-'333'
-'493'
-'460'
-'207'
-'249'
-'265'
-'480'
-'83'
-'136'
-'353'
-'172'
-'214'
-'462'
-'233'
-'406'
-'133'
-'175'
-'189'
-'454'
-'375'
-'401'
-'421'
-'407'
-'384'
-'256'
-'26'
-'134'
-'67'
-'384'
-'379'
-'18'
-'462'
-'492'
-'100'
-'298'
-'9'
-'341'
-'498'
-'146'
-'458'
-'362'
-'186'
-'285'
-'348'
-'167'
-'18'
-'273'
-'183'
-'281'
-'344'
-'97'
-'469'
-'315'
-'84'
-'28'
-'37'
-'448'
-'152'
-'348'
-'307'
-'194'
-'414'
-'477'
-'222'
-'126'
-'90'
-'169'
-'403'
-'400'
-'200'
-'97'
-500 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/fileformat_sequencefile.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/fileformat_sequencefile.q.out b/ql/src/test/results/beelinepositive/fileformat_sequencefile.q.out
deleted file mode 100644
index 0005811..0000000
--- a/ql/src/test/results/beelinepositive/fileformat_sequencefile.q.out
+++ /dev/null
@@ -1,62 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/fileformat_sequencefile.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/fileformat_sequencefile.q
->>>  EXPLAIN 
-CREATE TABLE dest1(key INT, value STRING) STORED AS 
-INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME dest1) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL key TOK_INT) (TOK_TABCOL value TOK_STRING)) (TOK_TABLEFILEFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' 'org.apache.hadoop.mapred.SequenceFileOutputFormat'))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: key int, value string'
-'          if not exists: false'
-'          input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'          # buckets: -1'
-'          output format: org.apache.hadoop.mapred.SequenceFileOutputFormat'
-'          name: dest1'
-'          isExternal: false'
-''
-''
-19 rows selected 
->>>  
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS 
-INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat';
-No rows affected 
->>>  
->>>  DESCRIBE EXTENDED dest1;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:dest1, dbName:fileformat_sequencefile, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/fileformat_sequencefile.db/dest1, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 10;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'0','val_0'
-'4','val_4'
-'8','val_8'
-'0','val_0'
-'0','val_0'
-'5','val_5'
-'5','val_5'
-'2','val_2'
-'5','val_5'
-'9','val_9'
-10 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/fileformat_text.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/fileformat_text.q.out b/ql/src/test/results/beelinepositive/fileformat_text.q.out
deleted file mode 100644
index 3cb1698..0000000
--- a/ql/src/test/results/beelinepositive/fileformat_text.q.out
+++ /dev/null
@@ -1,62 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/fileformat_text.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/fileformat_text.q
->>>  EXPLAIN 
-CREATE TABLE dest1(key INT, value STRING) STORED AS 
-INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME dest1) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL key TOK_INT) (TOK_TABCOL value TOK_STRING)) (TOK_TABLEFILEFORMAT 'org.apache.hadoop.mapred.TextInputFormat' 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: key int, value string'
-'          if not exists: false'
-'          input format: org.apache.hadoop.mapred.TextInputFormat'
-'          # buckets: -1'
-'          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'
-'          name: dest1'
-'          isExternal: false'
-''
-''
-19 rows selected 
->>>  
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS 
-INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat';
-No rows affected 
->>>  
->>>  DESCRIBE EXTENDED dest1;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:dest1, dbName:fileformat_text, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/fileformat_text.db/dest1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 10;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'0','val_0'
-'4','val_4'
-'8','val_8'
-'0','val_0'
-'0','val_0'
-'5','val_5'
-'5','val_5'
-'2','val_2'
-'5','val_5'
-'9','val_9'
-10 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/filter_join_breaktask.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/filter_join_breaktask.q.out b/ql/src/test/results/beelinepositive/filter_join_breaktask.q.out
deleted file mode 100644
index f12e5d1..0000000
--- a/ql/src/test/results/beelinepositive/filter_join_breaktask.q.out
+++ /dev/null
@@ -1,320 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/filter_join_breaktask.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/filter_join_breaktask.q
->>>  
->>>  CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string);
-No rows affected 
->>>  
->>>  INSERT OVERWRITE TABLE filter_join_breaktask PARTITION(ds='2008-04-08') 
-SELECT key, value from src1;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  
->>>  EXPLAIN EXTENDED 
-SELECT f.key, g.value 
-FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) 
-JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='');
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_TABREF (TOK_TABNAME filter_join_breaktask) f) (TOK_TABREF (TOK_TABNAME filter_join_breaktask) m) (AND (AND (AND (= (. (TOK_TABLE_OR_COL f) key) (. (TOK_TABLE_OR_COL m) key)) (= (. (TOK_TABLE_OR_COL f) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL m) ds) '2008-04-08')) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL f) key)))) (TOK_TABREF (TOK_TABNAME filter_join_breaktask) g) (AND (AND (AND (AND (= (. (TOK_TABLE_OR_COL g) value) (. (TOK_TABLE_OR_COL m) value)) (= (. (TOK_TABLE_OR_COL g) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL m) ds) '2008-04-08')) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL m) value))) (!= (. (TOK_TABLE_OR_COL m) value) '')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL f) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL g) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        f '
-'          TableScan'
-'            alias: f'
-'            GatherStats: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: key is not null'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: int'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: int'
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: int'
-'        m '
-'          TableScan'
-'            alias: m'
-'            GatherStats: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: ((key is not null and value is not null) and (value <> ''))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: int'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: int'
-'                tag: 1'
-'                value expressions:'
-'                      expr: value'
-'                      type: string'
-'                      expr: ds'
-'                      type: string'
-'      Needs Tagging: true'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask/ds=2008-04-08 [f, m]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask/ds=2008-04-08'
-'              name filter_join_breaktask.filter_join_breaktask'
-'              numFiles 1'
-'              numPartitions 1'
-'              numRows 25'
-'              partition_columns ds'
-'              rawDataSize 211'
-'              serialization.ddl struct filter_join_breaktask { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 236'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask'
-'                name filter_join_breaktask.filter_join_breaktask'
-'                numFiles 1'
-'                numPartitions 1'
-'                numRows 25'
-'                partition_columns ds'
-'                rawDataSize 211'
-'                serialization.ddl struct filter_join_breaktask { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 236'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: filter_join_breaktask.filter_join_breaktask'
-'            name: filter_join_breaktask.filter_join_breaktask'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col1} {VALUE._col2}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col6, _col7'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            directory: file:!!{hive.exec.scratchdir}!!'
-'            NumFilesPerFileSink: 1'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                properties:'
-'                  columns _col0,_col6,_col7'
-'                  columns.types int,string,string'
-'                  escape.delim \'
-'            TotalFiles: 1'
-'            GatherStats: false'
-'            MultiFileSpray: false'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col6'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col6'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: int'
-'        g '
-'          TableScan'
-'            alias: g'
-'            GatherStats: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: (value <> '')'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: value'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: value'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: value'
-'                      type: string'
-'      Needs Tagging: true'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [$INTNAME]'
-'        !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask/ds=2008-04-08 [g]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns _col0,_col6,_col7'
-'              columns.types int,string,string'
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns _col0,_col6,_col7'
-'                columns.types int,string,string'
-'                escape.delim \'
-'        !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask/ds=2008-04-08'
-'              name filter_join_breaktask.filter_join_breaktask'
-'              numFiles 1'
-'              numPartitions 1'
-'              numRows 25'
-'              partition_columns ds'
-'              rawDataSize 211'
-'              serialization.ddl struct filter_join_breaktask { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 236'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask'
-'                name filter_join_breaktask.filter_join_breaktask'
-'                numFiles 1'
-'                numPartitions 1'
-'                numRows 25'
-'                partition_columns ds'
-'                rawDataSize 211'
-'                serialization.ddl struct filter_join_breaktask { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 236'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: filter_join_breaktask.filter_join_breaktask'
-'            name: filter_join_breaktask.filter_join_breaktask'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col11'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: int'
-'                  expr: _col11'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0,_col1'
-'                    columns.types int:string'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-282 rows selected 
->>>  
->>>  SELECT f.key, g.value 
-FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) 
-JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='');
-'key','value'
-'146','val_146'
-'150','val_150'
-'213','val_213'
-'238','val_238'
-'255','val_255'
-'273','val_273'
-'278','val_278'
-'311','val_311'
-'401','val_401'
-'406','val_406'
-'66','val_66'
-'98','val_98'
-12 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby1.q.out b/ql/src/test/results/beelinepositive/groupby1.q.out
deleted file mode 100644
index 915ee8a..0000000
--- a/ql/src/test/results/beelinepositive/groupby1.q.out
+++ /dev/null
@@ -1,453 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby1.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  set fs.default.name=invalidscheme:///;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: rand()'
-'                      type: double'
-'                tag: -1'
-'                value expressions:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: partial1'
-'          outputColumnNames: _col0, _col1'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby1.dest_g1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby1.dest_g1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-115 rows selected 
->>>  
->>>  set fs.default.name=file:///;
-No rows affected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest_g1.* FROM dest_g1;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  !record


Mime
View raw message