hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gunt...@apache.org
Subject [23/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)
Date Fri, 03 Feb 2017 21:50:36 GMT
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/cross_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/cross_join.q.out b/ql/src/test/results/beelinepositive/cross_join.q.out
deleted file mode 100644
index 125241f..0000000
--- a/ql/src/test/results/beelinepositive/cross_join.q.out
+++ /dev/null
@@ -1,183 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/cross_join.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/cross_join.q
->>>  -- current
->>>  explain select src.key from src join src src2;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src)) (TOK_TABREF (TOK_TABNAME src) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: 1'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-52 rows selected 
->>>  -- ansi cross join
->>>  explain select src.key from src cross join src src2;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_CROSSJOIN (TOK_TABREF (TOK_TABNAME src)) (TOK_TABREF (TOK_TABNAME src) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: 1'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-52 rows selected 
->>>  -- appending condition is allowed
->>>  explain select src.key from src cross join src src2 on src.key=src2.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_CROSSJOIN (TOK_TABREF (TOK_TABNAME src)) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-64 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/ct_case_insensitive.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/ct_case_insensitive.q.out b/ql/src/test/results/beelinepositive/ct_case_insensitive.q.out
deleted file mode 100644
index ed739c3..0000000
--- a/ql/src/test/results/beelinepositive/ct_case_insensitive.q.out
+++ /dev/null
@@ -1,9 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/ct_case_insensitive.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/ct_case_insensitive.q
->>>  CREATE TABLE tmp_pyang_bucket3 (userId INT) CLUSTERED BY (userid) INTO 32 BUCKETS;
-No rows affected 
->>>  DROP TABLE tmp_pyang_bucket3;
-No rows affected 
->>>  CREATE TABLE tmp_pyang_bucket3 (userId INT) CLUSTERED BY (userid) SORTED BY (USERID) INTO 32 BUCKETS;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/ctas.q.out b/ql/src/test/results/beelinepositive/ctas.q.out
deleted file mode 100644
index 15e3355..0000000
--- a/ql/src/test/results/beelinepositive/ctas.q.out
+++ /dev/null
@@ -1,924 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/ctas.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/ctas.q
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  create table nzhang_Tmp(a int, b string);
-No rows affected 
->>>  select * from nzhang_Tmp;
-'a','b'
-No rows selected 
->>>  
->>>  explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME nzhang_CTAS1) TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key) k) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-3 depends on stages: Stage-4'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  name: ctas.nzhang_CTAS1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: !!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas1'
-''
-'  Stage: Stage-4'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: k string, value string'
-'          if not exists: false'
-'          input format: org.apache.hadoop.mapred.TextInputFormat'
-'          # buckets: -1'
-'          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'
-'          name: nzhang_CTAS1'
-'          isExternal: false'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-96 rows selected 
->>>  
->>>  create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10;
-'k','value'
-No rows selected 
->>>  
->>>  select * from nzhang_CTAS1;
-'k','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'100','val_100'
-'100','val_100'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-10 rows selected 
->>>  
->>>  describe formatted nzhang_CTAS1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'k                   ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','ctas                ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','numFiles            ','1                   '
-'','numPartitions       ','0                   '
-'','numRows             ','10                  '
-'','rawDataSize         ','96                  '
-'','totalSize           ','106                 '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-32 rows selected 
->>>  
->>>  
->>>  explain create table nzhang_ctas2 as select * from src sort by key, value limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas2) TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-3 depends on stages: Stage-4'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  name: ctas.nzhang_ctas2'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: !!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas2'
-''
-'  Stage: Stage-4'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: key string, value string'
-'          if not exists: false'
-'          input format: org.apache.hadoop.mapred.TextInputFormat'
-'          # buckets: -1'
-'          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'
-'          name: nzhang_ctas2'
-'          isExternal: false'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-96 rows selected 
->>>  
->>>  create table nzhang_ctas2 as select * from src sort by key, value limit 10;
-'key','value'
-No rows selected 
->>>  
->>>  select * from nzhang_ctas2;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'100','val_100'
-'100','val_100'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-10 rows selected 
->>>  
->>>  describe formatted nzhang_CTAS2;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','ctas                ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas2',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','numFiles            ','1                   '
-'','numPartitions       ','0                   '
-'','numRows             ','10                  '
-'','rawDataSize         ','96                  '
-'','totalSize           ','106                 '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-32 rows selected 
->>>  
->>>  
->>>  explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas3) TOK_LIKETABLE (TOK_TABLESERIALIZER (TOK_SERDENAME "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe")) TOK_TBLRCFILE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (/ (TOK_TABLE_OR_COL key) 2) half_key) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_TABLE_OR_COL value) "_con") conb)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL half_key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL conb))) (TOK_LIMIT 10))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-3 depends on stages: Stage-4'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: (key / 2)'
-'                    type: double'
-'                    expr: concat(value, '_con')'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: double'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: double'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'                  name: ctas.nzhang_ctas3'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: !!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas3'
-''
-'  Stage: Stage-4'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: half_key double, conb string'
-'          if not exists: false'
-'          input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'          # buckets: -1'
-'          output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'          serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
-'          name: nzhang_ctas3'
-'          isExternal: false'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-97 rows selected 
->>>  
->>>  create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10;
-'half_key','conb'
-No rows selected 
->>>  
->>>  select * from nzhang_ctas3;
-'half_key','conb'
-'0.0','val_0_con'
-'0.0','val_0_con'
-'0.0','val_0_con'
-'1.0','val_2_con'
-'2.0','val_4_con'
-'2.5','val_5_con'
-'2.5','val_5_con'
-'2.5','val_5_con'
-'4.0','val_8_con'
-'4.5','val_9_con'
-10 rows selected 
->>>  
->>>  describe formatted nzhang_CTAS3;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'half_key            ','double              ','None                '
-'conb                ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','ctas                ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas3',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','numFiles            ','1                   '
-'','numPartitions       ','0                   '
-'','numRows             ','10                  '
-'','rawDataSize         ','120                 '
-'','totalSize           ','199                 '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe',''
-'InputFormat:        ','org.apache.hadoop.hive.ql.io.RCFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.RCFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-32 rows selected 
->>>  
->>>  
->>>  explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas3) TOK_IFNOTEXISTS TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 2))))'
-''
-'STAGE DEPENDENCIES:'
-''
-'STAGE PLANS:'
-'STAGE PLANS:'
-7 rows selected 
->>>  
->>>  create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2;
-No rows affected 
->>>  
->>>  select * from nzhang_ctas3;
-'half_key','conb'
-'0.0','val_0_con'
-'0.0','val_0_con'
-'0.0','val_0_con'
-'1.0','val_2_con'
-'2.0','val_4_con'
-'2.5','val_5_con'
-'2.5','val_5_con'
-'2.5','val_5_con'
-'4.0','val_8_con'
-'4.5','val_9_con'
-10 rows selected 
->>>  
->>>  describe formatted nzhang_CTAS3;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'half_key            ','double              ','None                '
-'conb                ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','ctas                ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas3',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','numFiles            ','1                   '
-'','numPartitions       ','0                   '
-'','numRows             ','10                  '
-'','rawDataSize         ','120                 '
-'','totalSize           ','199                 '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe',''
-'InputFormat:        ','org.apache.hadoop.hive.ql.io.RCFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.RCFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-32 rows selected 
->>>  
->>>  
->>>  explain create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas4) TOK_LIKETABLE (TOK_TABLEROWFORMAT (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD ','))) TOK_TBLTEXTFILE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-3 depends on stages: Stage-4'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  name: ctas.nzhang_ctas4'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: !!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas4'
-''
-'  Stage: Stage-4'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: key string, value string'
-'          field delimiter: ,'
-'          if not exists: false'
-'          input format: org.apache.hadoop.mapred.TextInputFormat'
-'          # buckets: -1'
-'          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'
-'          name: nzhang_ctas4'
-'          isExternal: false'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-97 rows selected 
->>>  
->>>  create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10;
-'key','value'
-No rows selected 
->>>  
->>>  select * from nzhang_ctas4;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'100','val_100'
-'100','val_100'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-10 rows selected 
->>>  
->>>  describe formatted nzhang_CTAS4;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','ctas                ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas4',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','numFiles            ','1                   '
-'','numPartitions       ','0                   '
-'','numRows             ','10                  '
-'','rawDataSize         ','96                  '
-'','totalSize           ','106                 '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','field.delim         ',',                   '
-'','serialization.format',',                   '
-33 rows selected 
->>>  
->>>  explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas5) TOK_LIKETABLE (TOK_TABLEROWFORMAT (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD ',') (TOK_TABLEROWFORMATLINES '\012'))) TOK_TBLTEXTFILE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-3 depends on stages: Stage-4'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/ctas.db/src [src]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/ctas.db/src '
-'          Partition'
-'            base file name: src'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/ctas.db/src'
-'              name ctas.src'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct src { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/ctas.db/src'
-'                name ctas.src'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct src { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: ctas.src'
-'            name: ctas.src'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                  properties:'
-'                    columns _col0,_col1'
-'                    columns.types string,string'
-'                    escape.delim \'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns _col0,_col1'
-'              columns.types string,string'
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns _col0,_col1'
-'                columns.types string,string'
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0,_col1'
-'                    columns.types string:string'
-'                    field.delim ,'
-'                    line.delim '
-''
-'                    name ctas.nzhang_ctas5'
-'                    serialization.format ,'
-'                  name: ctas.nzhang_ctas5'
-'              TotalFiles: 1'
-'              GatherStats: true'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: !!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas5'
-''
-'  Stage: Stage-4'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: key string, value string'
-'          field delimiter: ,'
-'          if not exists: false'
-'          input format: org.apache.hadoop.mapred.TextInputFormat'
-'          line delimiter: '
-''
-'          # buckets: -1'
-'          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'
-'          name: nzhang_ctas5'
-'          isExternal: false'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-195 rows selected 
->>>  
->>>  set mapred.job.tracker=does.notexist.com:666;
-No rows affected 
->>>  set hive.exec.mode.local.auto=true;
-No rows affected 
->>>  
->>>  create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10;
-'key','value'
-No rows selected 
->>>  
->>>  create table nzhang_ctas6 (key string, `to` string);
-No rows affected 
->>>  insert overwrite table nzhang_ctas6 select key, value from src limit 10;
-'key','value'
-No rows selected 
->>>  create table nzhang_ctas7 as select key, `to` from nzhang_ctas6;
-'key','to'
-No rows selected 
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/default_partition_name.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/default_partition_name.q.out b/ql/src/test/results/beelinepositive/default_partition_name.q.out
deleted file mode 100644
index ce5f504..0000000
--- a/ql/src/test/results/beelinepositive/default_partition_name.q.out
+++ /dev/null
@@ -1,16 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/default_partition_name.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/default_partition_name.q
->>>  create table default_partition_name (key int, value string) partitioned by (ds string);
-No rows affected 
->>>  
->>>  set hive.exec.default.partition.name='some_other_default_partition_name';
-No rows affected 
->>>  
->>>  alter table default_partition_name add partition(ds='__HIVE_DEFAULT_PARTITION__');
-No rows affected 
->>>  
->>>  show partitions default_partition_name;
-'partition'
-'ds=__HIVE_DEFAULT_PARTITION__'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/delimiter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/delimiter.q.out b/ql/src/test/results/beelinepositive/delimiter.q.out
deleted file mode 100644
index b0cd333..0000000
--- a/ql/src/test/results/beelinepositive/delimiter.q.out
+++ /dev/null
@@ -1,28 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/delimiter.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/delimiter.q
->>>  create table impressions (imp string, msg string) 
-row format delimited 
-fields terminated by '\t' 
-lines terminated by '\n' 
-stored as textfile;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/in7.txt' INTO TABLE impressions;
-No rows affected 
->>>  
->>>  select * from impressions;
-'imp','msg'
-'','35'
-'48',''
-'100','100'
-3 rows selected 
->>>  
->>>  select imp,msg from impressions;
-'imp','msg'
-'','35'
-'48',''
-'100','100'
-3 rows selected 
->>>  
->>>  drop table impressions;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/desc_non_existent_tbl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/desc_non_existent_tbl.q.out b/ql/src/test/results/beelinepositive/desc_non_existent_tbl.q.out
deleted file mode 100644
index a81b3db..0000000
--- a/ql/src/test/results/beelinepositive/desc_non_existent_tbl.q.out
+++ /dev/null
@@ -1,3 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/desc_non_existent_tbl.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/desc_non_existent_tbl.q
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned.q.out b/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned.q.out
deleted file mode 100644
index f393f58..0000000
--- a/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned.q.out
+++ /dev/null
@@ -1,43 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/describe_formatted_view_partitioned.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/describe_formatted_view_partitioned.q
->>>  DROP VIEW view_partitioned;
-No rows affected 
->>>  
->>>  CREATE VIEW view_partitioned 
-PARTITIONED ON (value) 
-AS 
-SELECT key, value 
-FROM src 
-WHERE key=86;
-'key','value'
-No rows selected 
->>>  
->>>  ALTER VIEW view_partitioned 
-ADD PARTITION (value='val_86');
-No rows affected 
->>>  
->>>  DESCRIBE FORMATTED view_partitioned PARTITION (value='val_86');
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'value               ','string              ','None                '
-'','',''
-'# Detailed Partition Information','',''
-'Partition Value:    ','[val_86]            ',''
-'Database:           ','describe_formatted_view_partitioned',''
-'Table:              ','view_partitioned    ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Location:           ','null                ',''
-'Partition Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-19 rows selected 
->>>  
->>>  DROP VIEW view_partitioned;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned_json.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned_json.q.out b/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned_json.q.out
deleted file mode 100644
index 4686a5f..0000000
--- a/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned_json.q.out
+++ /dev/null
@@ -1,29 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/describe_formatted_view_partitioned_json.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/describe_formatted_view_partitioned_json.q
->>>  set hive.ddl.output.format=json;
-No rows affected 
->>>  
->>>  DROP VIEW view_partitioned;
-No rows affected 
->>>  
->>>  CREATE VIEW view_partitioned 
-PARTITIONED ON (value) 
-AS 
-SELECT key, value 
-FROM src 
-WHERE key=86;
-'key','value'
-No rows selected 
->>>  
->>>  ALTER VIEW view_partitioned 
-ADD PARTITION (value='val_86');
-No rows affected 
->>>  
->>>  DESCRIBE FORMATTED view_partitioned PARTITION (value='val_86');
-'col_name','data_type','comment'
-'{"columns":[{"name":"key","type":"string"}]}','',''
-1 row selected 
->>>  
->>>  DROP VIEW view_partitioned;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/describe_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/describe_table.q.out b/ql/src/test/results/beelinepositive/describe_table.q.out
deleted file mode 100644
index 1ad5134..0000000
--- a/ql/src/test/results/beelinepositive/describe_table.q.out
+++ /dev/null
@@ -1,183 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/describe_table.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/describe_table.q
->>>  describe srcpart;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'ds','string',''
-'hr','string',''
-4 rows selected 
->>>  describe srcpart.key;
-'col_name','data_type','comment'
-'key','string','from deserializer'
-1 row selected 
->>>  describe srcpart PARTITION(ds='2008-04-08', hr='12');
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'ds','string',''
-'hr','string',''
-4 rows selected 
->>>  
->>>  describe extended srcpart;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'ds','string',''
-'hr','string',''
-'','',''
-'Detailed Table Information','Table(tableName:srcpart, dbName:describe_table, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/describe_table.db/srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:
 null)], parameters:{numPartitions=4, numFiles=4, transient_lastDdlTime=!!UNIXTIME!!, totalSize=23248, numRows=0, rawDataSize=0}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-6 rows selected 
->>>  describe extended srcpart.key;
-'col_name','data_type','comment'
-'key','string','from deserializer'
-1 row selected 
->>>  describe extended srcpart PARTITION(ds='2008-04-08', hr='12');
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'ds','string',''
-'hr','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2008-04-08, 12], dbName:describe_table, tableName:srcpart, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/describe_table.db/srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{numFiles=1, transient_lastDdlTime=!!UNIXTIME!!, totalSize=5812, numRows=0, ra
 wDataSize=0})',''
-6 rows selected 
->>>  
->>>  describe formatted srcpart;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'hr                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','describe_table      ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/describe_table.db/srcpart',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','numFiles            ','4                   '
-'','numPartitions       ','4                   '
-'','numRows             ','0                   '
-'','rawDataSize         ','0                   '
-'','totalSize           ','23248               '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-38 rows selected 
->>>  describe formatted srcpart.key;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','from deserializer   '
-3 rows selected 
->>>  describe formatted srcpart PARTITION(ds='2008-04-08', hr='12');
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'hr                  ','string              ','None                '
-'','',''
-'# Detailed Partition Information','',''
-'Partition Value:    ','[2008-04-08, 12]    ',''
-'Database:           ','describe_table      ',''
-'Table:              ','srcpart             ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/describe_table.db/srcpart/ds=2008-04-08/hr=12',''
-'Partition Parameters:','',''
-'','numFiles            ','1                   '
-'','numRows             ','0                   '
-'','rawDataSize         ','0                   '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-36 rows selected 
->>>  
->>>  create table srcpart_serdeprops like srcpart;
-No rows affected 
->>>  alter table srcpart_serdeprops set serdeproperties('xyz'='0');
-No rows affected 
->>>  alter table srcpart_serdeprops set serdeproperties('pqrs'='1');
-No rows affected 
->>>  alter table srcpart_serdeprops set serdeproperties('abcd'='2');
-No rows affected 
->>>  alter table srcpart_serdeprops set serdeproperties('A1234'='3');
-No rows affected 
->>>  describe formatted srcpart_serdeprops;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'hr                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','describe_table      ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/describe_table.db/srcpart_serdeprops',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','A1234               ','3                   '
-'','abcd                ','2                   '
-'','pqrs                ','1                   '
-'','serialization.format','1                   '
-'','xyz                 ','0                   '
-39 rows selected 
->>>  drop table srcpart_serdeprops;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/describe_table_json.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/describe_table_json.q.out b/ql/src/test/results/beelinepositive/describe_table_json.q.out
deleted file mode 100644
index 836b936..0000000
--- a/ql/src/test/results/beelinepositive/describe_table_json.q.out
+++ /dev/null
@@ -1,42 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/describe_table_json.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/describe_table_json.q
->>>  set hive.ddl.output.format=json;
-No rows affected 
->>>  
->>>  CREATE TABLE IF NOT EXISTS jsontable (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  SHOW TABLES;
-'tab_name'
-'{"tables":["jsontable","primitives","src","src1","src_json","src_sequencefile","src_thrift","srcbucket","srcbucket2","srcpart"]}'
-1 row selected 
->>>  
->>>  SHOW TABLES LIKE 'json*';
-'tab_name'
-'{"tables":["jsontable"]}'
-1 row selected 
->>>  
->>>  SHOW TABLE EXTENDED LIKE 'json*';
-'tab_name'
-'{"tables":[]}'
-1 row selected 
->>>  
->>>  ALTER TABLE jsontable SET TBLPROPERTIES ('id' = 'jsontable');
-No rows affected 
->>>  
->>>  DESCRIBE jsontable;
-'col_name','data_type','comment'
-'{"columns":[{"name":"key","type":"int"},{"name":"value","type":"string"}]}','',''
-1 row selected 
->>>  
->>>  DESCRIBE extended jsontable;
-'col_name','data_type','comment'
-'{"columns":[{"name":"key","type":"int"},{"name":"value","type":"string"}],"tableInfo":{"owner":"!!{user.name}!!","parameters":{"id":"jsontable","last_modified_by":"!!{user.name}!!","last_modified_time":"!!UNIXTIME!!","transient_lastDdlTime":"!!UNIXTIME!!","comment":"json table"},"tableName":"jsontable","dbName":"describe_table_json","tableType":"MANAGED_TABLE","sd":{"location":"!!{hive.metastore.warehouse.dir}!!/describe_table_json.db/jsontable","parameters":{},"inputFormat":"org.apache.hadoop.mapred.TextInputFormat","outputFormat":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","cols":[{"name":"key","type":"int","comment":null,"setName":true,"setType":true,"setComment":false},{"name":"value","type":"string","comment":null,"setName":true,"setType":true,"setComment":false}],"skewedInfo":{"skewedColNames":[],"skewedColValues":[],"skewedColValueLocationMaps":{},"skewedColNamesSize":0,"skewedColNamesIterator":[],"setSkewedColNames":true,"skewedColValuesSize":0,"skewedColVa
 luesIterator":[],"setSkewedColValues":true,"skewedColValueLocationMapsSize":0,"setSkewedColValueLocationMaps":true},"serdeInfo":{"name":null,"parameters":{"serialization.format":"1"},"serializationLib":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","setName":false,"parametersSize":1,"setParameters":true,"setSerializationLib":true},"numBuckets":-1,"bucketCols":[],"compressed":false,"sortCols":[],"parametersSize":0,"setParameters":true,"colsSize":2,"colsIterator":[{"name":"key","type":"int","comment":null,"setName":true,"setType":true,"setComment":false},{"name":"value","type":"string","comment":null,"setName":true,"setType":true,"setComment":false}],"setCols":true,"setLocation":true,"setInputFormat":true,"setOutputFormat":true,"setCompressed":true,"setNumBuckets":true,"setSerdeInfo":true,"bucketColsSize":0,"bucketColsIterator":[],"setBucketCols":true,"sortColsSize":0,"sortColsIterator":[],"setSortCols":true,"setSkewedInfo":true},"partitionKeys":[],"createTime":!!UNIXTIME!!,"pri
 vileges":null,"viewOriginalText":null,"lastAccessTime":0,"retention":0,"viewExpandedText":null,"partitionKeysSize":0,"setDbName":true,"setCreateTime":true,"setLastAccessTime":true,"setSd":true,"parametersSize":5,"setParameters":true,"setTableName":true,"setOwner":true,"setRetention":true,"partitionKeysIterator":[],"setPartitionKeys":true,"setViewOriginalText":false,"setViewExpandedText":false,"setTableType":true,"setPrivileges":false}}','',''
-1 row selected 
->>>  
->>>  DROP TABLE jsontable;
-No rows affected 
->>>  
->>>  set hive.ddl.output.format=text;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/describe_xpath.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/describe_xpath.q.out b/ql/src/test/results/beelinepositive/describe_xpath.q.out
deleted file mode 100644
index a55635e..0000000
--- a/ql/src/test/results/beelinepositive/describe_xpath.q.out
+++ /dev/null
@@ -1,40 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/describe_xpath.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/describe_xpath.q
->>>  -- Describe a list structure in a thrift table
->>>  describe src_thrift.lint;
-'col_name','data_type','comment'
-'lint','array<int>','from deserializer'
-1 row selected 
->>>  
->>>  -- Describe the element of a list
->>>  describe src_thrift.lint.$elem$;
-'col_name','data_type','comment'
-'$elem$','int','from deserializer'
-1 row selected 
->>>  
->>>  -- Describe the key of a map
->>>  describe src_thrift.mStringString.$key$;
-'col_name','data_type','comment'
-'$key$','string','from deserializer'
-1 row selected 
->>>  
->>>  -- Describe the value of a map
->>>  describe src_thrift.mStringString.$value$;
-'col_name','data_type','comment'
-'$value$','string','from deserializer'
-1 row selected 
->>>  
->>>  -- Describe a complex element of a list
->>>  describe src_thrift.lintString.$elem$;
-'col_name','data_type','comment'
-'myint','int','from deserializer'
-'mystring','string','from deserializer'
-'underscore_int','int','from deserializer'
-3 rows selected 
->>>  
->>>  -- Describe a member of an element of a list
->>>  describe src_thrift.lintString.$elem$.myint;
-'col_name','data_type','comment'
-'myint','int','from deserializer'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/diff_part_input_formats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/diff_part_input_formats.q.out b/ql/src/test/results/beelinepositive/diff_part_input_formats.q.out
deleted file mode 100644
index 3f9bb3e..0000000
--- a/ql/src/test/results/beelinepositive/diff_part_input_formats.q.out
+++ /dev/null
@@ -1,19 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/diff_part_input_formats.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/diff_part_input_formats.q
->>>  -- Tests the case where a table is changed from sequence file to a RC file,
->>>  -- resulting in partitions in both file formats. If no valid partitions are
->>>  -- selected, then it should still use RC file for reading the dummy partition.
->>>  CREATE TABLE part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE;
-No rows affected 
->>>  ALTER TABLE part_test ADD PARTITION(ds='1');
-No rows affected 
->>>  ALTER TABLE part_test SET FILEFORMAT RCFILE;
-No rows affected 
->>>  ALTER TABLE part_test ADD PARTITION(ds='2');
-No rows affected 
->>>  SELECT count(1) FROM part_test WHERE ds='3';
-'_c0'
-'0'
-1 row selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/disable_file_format_check.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/disable_file_format_check.q.out b/ql/src/test/results/beelinepositive/disable_file_format_check.q.out
deleted file mode 100644
index a9faddc..0000000
--- a/ql/src/test/results/beelinepositive/disable_file_format_check.q.out
+++ /dev/null
@@ -1,17 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/disable_file_format_check.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/disable_file_format_check.q
->>>  set hive.fileformat.check = false;
-No rows affected 
->>>  create table kv_fileformat_check_txt (key string, value string) stored as textfile;
-No rows affected 
->>>  load data local inpath '../data/files/kv1.seq' overwrite into table kv_fileformat_check_txt;
-No rows affected 
->>>  
->>>  create table kv_fileformat_check_seq (key string, value string) stored as sequencefile;
-No rows affected 
->>>  load data local inpath '../data/files/kv1.txt' overwrite into table kv_fileformat_check_seq;
-No rows affected 
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/disable_merge_for_bucketing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/disable_merge_for_bucketing.q.out b/ql/src/test/results/beelinepositive/disable_merge_for_bucketing.q.out
deleted file mode 100644
index 90dda6d..0000000
--- a/ql/src/test/results/beelinepositive/disable_merge_for_bucketing.q.out
+++ /dev/null
@@ -1,484 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/disable_merge_for_bucketing.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/disable_merge_for_bucketing.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  set hive.exec.reducers.max = 1;
-No rows affected 
->>>  set hive.merge.mapredfiles=true;
-No rows affected 
->>>  
->>>  
->>>  CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS;
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucket2_1 
-select * from src;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucket2_1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                sort order: '
-'                Map-reduce partition columns:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/disable_merge_for_bucketing.db/src [src]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/disable_merge_for_bucketing.db/src '
-'          Partition'
-'            base file name: src'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/disable_merge_for_bucketing.db/src'
-'              name disable_merge_for_bucketing.src'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct src { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/disable_merge_for_bucketing.db/src'
-'                name disable_merge_for_bucketing.src'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct src { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: disable_merge_for_bucketing.src'
-'            name: disable_merge_for_bucketing.src'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: UDFToInteger(_col0)'
-'                  type: int'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 2'
-'              Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count 2'
-'                    bucket_field_name key'
-'                    columns key,value'
-'                    columns.types int:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/disable_merge_for_bucketing.db/bucket2_1'
-'                    name disable_merge_for_bucketing.bucket2_1'
-'                    serialization.ddl struct bucket2_1 { i32 key, string value}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: disable_merge_for_bucketing.bucket2_1'
-'              TotalFiles: 2'
-'              GatherStats: true'
-'              MultiFileSpray: true'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/disable_merge_for_bucketing.db/bucket2_1'
-'                name disable_merge_for_bucketing.bucket2_1'
-'                serialization.ddl struct bucket2_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: disable_merge_for_bucketing.bucket2_1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-150 rows selected 
->>>  
->>>  insert overwrite table bucket2_1 
-select * from src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  explain 
-select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME bucket2_1) (TOK_TABLEBUCKETSAMPLE 1 2) s)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        s '
-'          TableScan'
-'            alias: s'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((hash(key) & 2147483647) % 2) = 0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: int'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-50 rows selected 
->>>  
->>>  select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'2','val_2'
-'4','val_4'
-'8','val_8'
-'10','val_10'
-'12','val_12'
-'12','val_12'
-'18','val_18'
-'18','val_18'
-'20','val_20'
-'24','val_24'
-'24','val_24'
-'26','val_26'
-'26','val_26'
-'28','val_28'
-'30','val_30'
-'34','val_34'
-'42','val_42'
-'42','val_42'
-'44','val_44'
-'54','val_54'
-'58','val_58'
-'58','val_58'
-'64','val_64'
-'66','val_66'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'72','val_72'
-'72','val_72'
-'74','val_74'
-'76','val_76'
-'76','val_76'
-'78','val_78'
-'80','val_80'
-'82','val_82'
-'84','val_84'
-'84','val_84'
-'86','val_86'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'96','val_96'
-'98','val_98'
-'98','val_98'
-'100','val_100'
-'100','val_100'
-'104','val_104'
-'104','val_104'
-'114','val_114'
-'116','val_116'
-'118','val_118'
-'118','val_118'
-'120','val_120'
-'120','val_120'
-'126','val_126'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'134','val_134'
-'134','val_134'
-'136','val_136'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'146','val_146'
-'146','val_146'
-'150','val_150'
-'152','val_152'
-'152','val_152'
-'156','val_156'
-'158','val_158'
-'160','val_160'
-'162','val_162'
-'164','val_164'
-'164','val_164'
-'166','val_166'
-'168','val_168'
-'170','val_170'
-'172','val_172'
-'172','val_172'
-'174','val_174'
-'174','val_174'
-'176','val_176'
-'176','val_176'
-'178','val_178'
-'180','val_180'
-'186','val_186'
-'190','val_190'
-'192','val_192'
-'194','val_194'
-'196','val_196'
-'200','val_200'
-'200','val_200'
-'202','val_202'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'214','val_214'
-'216','val_216'
-'216','val_216'
-'218','val_218'
-'222','val_222'
-'224','val_224'
-'224','val_224'
-'226','val_226'
-'228','val_228'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'238','val_238'
-'238','val_238'
-'242','val_242'
-'242','val_242'
-'244','val_244'
-'248','val_248'
-'252','val_252'
-'256','val_256'
-'256','val_256'
-'258','val_258'
-'260','val_260'
-'262','val_262'
-'266','val_266'
-'272','val_272'
-'272','val_272'
-'274','val_274'
-'278','val_278'
-'278','val_278'
-'280','val_280'
-'280','val_280'
-'282','val_282'
-'282','val_282'
-'284','val_284'
-'286','val_286'
-'288','val_288'
-'288','val_288'
-'292','val_292'
-'296','val_296'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'302','val_302'
-'306','val_306'
-'308','val_308'
-'310','val_310'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'322','val_322'
-'322','val_322'
-'332','val_332'
-'336','val_336'
-'338','val_338'
-'342','val_342'
-'342','val_342'
-'344','val_344'
-'344','val_344'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'356','val_356'
-'360','val_360'
-'362','val_362'
-'364','val_364'
-'366','val_366'
-'368','val_368'
-'374','val_374'
-'378','val_378'
-'382','val_382'
-'382','val_382'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'386','val_386'
-'392','val_392'
-'394','val_394'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'400','val_400'
-'402','val_402'
-'404','val_404'
-'404','val_404'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'414','val_414'
-'414','val_414'
-'418','val_418'
-'424','val_424'
-'424','val_424'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'432','val_432'
-'436','val_436'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'444','val_444'
-'446','val_446'
-'448','val_448'
-'452','val_452'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'458','val_458'
-'458','val_458'
-'460','val_460'
-'462','val_462'
-'462','val_462'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'470','val_470'
-'472','val_472'
-'478','val_478'
-'478','val_478'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'482','val_482'
-'484','val_484'
-'490','val_490'
-'492','val_492'
-'492','val_492'
-'494','val_494'
-'496','val_496'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-247 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/driverhook.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/driverhook.q.out b/ql/src/test/results/beelinepositive/driverhook.q.out
deleted file mode 100644
index 6319195..0000000
--- a/ql/src/test/results/beelinepositive/driverhook.q.out
+++ /dev/null
@@ -1,13 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/driverhook.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/driverhook.q
->>>  SET hive.exec.driver.run.hooks=org.apache.hadoop.hive.ql.hooks.DriverTestHook;
-No rows affected 
->>>  
->>>  -- This query should appear in the Hive CLI output.
->>>  -- We test DriverTestHook, which does exactly that.
->>>  -- This should not break.
->>>  SELECT * FROM src LIMIT 1;
-'key','value'
-'238','val_238'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_function.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_function.q.out b/ql/src/test/results/beelinepositive/drop_function.q.out
deleted file mode 100644
index a097306..0000000
--- a/ql/src/test/results/beelinepositive/drop_function.q.out
+++ /dev/null
@@ -1,7 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_function.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_function.q
->>>  SET hive.exec.drop.ignorenonexistent=false;
-No rows affected 
->>>  DROP TEMPORARY FUNCTION IF EXISTS UnknownFunction;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_index.q.out b/ql/src/test/results/beelinepositive/drop_index.q.out
deleted file mode 100644
index 00048ab..0000000
--- a/ql/src/test/results/beelinepositive/drop_index.q.out
+++ /dev/null
@@ -1,7 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_index.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_index.q
->>>  SET hive.exec.drop.ignorenonexistent=false;
-No rows affected 
->>>  DROP INDEX IF EXISTS UnknownIndex ON src;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_index_removes_partition_dirs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_index_removes_partition_dirs.q.out b/ql/src/test/results/beelinepositive/drop_index_removes_partition_dirs.q.out
deleted file mode 100644
index 389794f..0000000
--- a/ql/src/test/results/beelinepositive/drop_index_removes_partition_dirs.q.out
+++ /dev/null
@@ -1,32 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_index_removes_partition_dirs.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_index_removes_partition_dirs.q
->>>  -- This test verifies that if a partition exists outside an index table's current location when the
->>>  -- index is dropped the partition's location is dropped as well.
->>>  
->>>  CREATE TABLE test_table (key STRING, value STRING) 
-PARTITIONED BY (part STRING) 
-STORED AS RCFILE 
-LOCATION 'file:${system:test.tmp.dir}/drop_database_removes_partition_dirs_table';
-No rows affected 
->>>  
->>>  CREATE INDEX test_index ON 
-TABLE test_table(key) AS 'compact' WITH DEFERRED REBUILD 
-IN TABLE test_index_table;
-No rows affected 
->>>  
->>>  ALTER TABLE test_index_table ADD PARTITION (part = '1') 
-LOCATION 'file:${system:test.tmp.dir}/drop_index_removes_partition_dirs_index_table2/part=1';
-No rows affected 
->>>  
->>>  dfs -ls ${system:test.tmp.dir}/drop_index_removes_partition_dirs_index_table2;
-No rows affected 
->>>  
->>>  DROP INDEX test_index ON test_table;
-No rows affected 
->>>  
->>>  dfs -ls ${system:test.tmp.dir}/drop_index_removes_partition_dirs_index_table2;
-No rows affected 
->>>  
->>>  dfs -rmr ${system:test.tmp.dir}/drop_index_removes_partition_dirs_index_table2;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_multi_partitions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_multi_partitions.q.out b/ql/src/test/results/beelinepositive/drop_multi_partitions.q.out
deleted file mode 100644
index d8d9b2e..0000000
--- a/ql/src/test/results/beelinepositive/drop_multi_partitions.q.out
+++ /dev/null
@@ -1,53 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_multi_partitions.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_multi_partitions.q
->>>  create table mp (a string) partitioned by (b string, c string);
-No rows affected 
->>>  
->>>  alter table mp add partition (b='1', c='1');
-No rows affected 
->>>  alter table mp add partition (b='1', c='2');
-No rows affected 
->>>  alter table mp add partition (b='2', c='2');
-No rows affected 
->>>  
->>>  show partitions mp;
-'partition'
-'b=1/c=1'
-'b=1/c=2'
-'b=2/c=2'
-3 rows selected 
->>>  
->>>  explain extended alter table mp drop partition (b='1');
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_ALTERTABLE_DROPPARTS mp (TOK_PARTSPEC (TOK_PARTVAL b = '1')))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Drop Table Operator:'
-'        Drop Table'
-'          table: mp'
-''
-''
-13 rows selected 
->>>  alter table mp drop partition (b='1');
-No rows affected 
->>>  
->>>  show partitions mp;
-'partition'
-'b=2/c=2'
-1 row selected 
->>>  
->>>  set hive.exec.drop.ignorenonexistent=false;
-No rows affected 
->>>  alter table mp drop if exists partition (b='3');
-No rows affected 
->>>  
->>>  show partitions mp;
-'partition'
-'b=2/c=2'
-1 row selected 
->>>  !record


Mime
View raw message