hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gunt...@apache.org
Subject [50/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)
Date Fri, 03 Feb 2017 21:51:03 GMT
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_merge_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_merge_stats.q.out b/ql/src/test/results/beelinepositive/alter_merge_stats.q.out
deleted file mode 100644
index 48ab790..0000000
--- a/ql/src/test/results/beelinepositive/alter_merge_stats.q.out
+++ /dev/null
@@ -1,168 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_merge_stats.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_merge_stats.q
->>>  create table src_rc_merge_test_stat(key int, value string) stored as rcfile;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test_stat;
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test_stat;
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test_stat;
-No rows affected 
->>>  
->>>  show table extended like `src_rc_merge_test_stat`;
-'tab_name'
-'tableName:src_rc_merge_test_stat'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_stat'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:false'
-'partitionColumns:'
-'totalNumberFiles:3'
-'totalFileSize:636'
-'maxFileSize:222'
-'minFileSize:206'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  desc extended src_rc_merge_test_stat;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:src_rc_merge_test_stat, dbName:alter_merge_stats, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{numPartitions=0, numFiles=3, transient_lastDdlTime=!!UNIXTIME!!, totalSize=636, numRows=0, rawDataSize=0}, viewOriginalText:null, viewExpandedTe
 xt:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  analyze table src_rc_merge_test_stat compute statistics;
-'key','value'
-No rows selected 
->>>  
->>>  desc extended src_rc_merge_test_stat;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:src_rc_merge_test_stat, dbName:alter_merge_stats, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{numPartitions=0, numFiles=3, transient_lastDdlTime=!!UNIXTIME!!, numRows=15, totalSize=636, rawDataSize=110}, viewOriginalText:null, viewExpande
 dText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  alter table src_rc_merge_test_stat concatenate;
-No rows affected 
->>>  
->>>  show table extended like `src_rc_merge_test_stat`;
-'tab_name'
-'tableName:src_rc_merge_test_stat'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_stat'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:false'
-'partitionColumns:'
-'totalNumberFiles:1'
-'totalFileSize:239'
-'maxFileSize:239'
-'minFileSize:239'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  desc extended src_rc_merge_test_stat;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:src_rc_merge_test_stat, dbName:alter_merge_stats, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=!!UNIXTIME!!, numRows=15, totalSize=239, rawDataSize=110}, viewOriginalText:null, viewExpande
 dText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  
->>>  create table src_rc_merge_test_part_stat(key int, value string) partitioned by (ds string) stored as rcfile;
-No rows affected 
->>>  
->>>  alter table src_rc_merge_test_part_stat add partition (ds='2011');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test_part_stat partition (ds='2011');
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test_part_stat partition (ds='2011');
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test_part_stat partition (ds='2011');
-No rows affected 
->>>  
->>>  show table extended like `src_rc_merge_test_part_stat` partition (ds='2011');
-'tab_name'
-'tableName:src_rc_merge_test_part_stat'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_part_stat/ds=2011'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:true'
-'partitionColumns:struct partition_columns { string ds}'
-'totalNumberFiles:3'
-'totalFileSize:636'
-'maxFileSize:222'
-'minFileSize:206'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  desc extended src_rc_merge_test_part_stat;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:src_rc_merge_test_part_stat, dbName:alter_merge_stats, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_part_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=3, transient
 _lastDdlTime=!!UNIXTIME!!, totalSize=636, numRows=0, rawDataSize=0}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  analyze table src_rc_merge_test_part_stat partition(ds='2011') compute statistics;
-'key','value','ds'
-No rows selected 
->>>  
->>>  desc extended src_rc_merge_test_part_stat;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:src_rc_merge_test_part_stat, dbName:alter_merge_stats, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_part_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=3, transient
 _lastDdlTime=!!UNIXTIME!!, numRows=15, totalSize=636, rawDataSize=110}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  alter table src_rc_merge_test_part_stat partition (ds='2011') concatenate;
-No rows affected 
->>>  
->>>  show table extended like `src_rc_merge_test_part_stat` partition (ds='2011');
-'tab_name'
-'tableName:src_rc_merge_test_part_stat'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_part_stat/ds=2011'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:true'
-'partitionColumns:struct partition_columns { string ds}'
-'totalNumberFiles:1'
-'totalFileSize:239'
-'maxFileSize:239'
-'minFileSize:239'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  desc extended src_rc_merge_test_part_stat;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:src_rc_merge_test_part_stat, dbName:alter_merge_stats, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_part_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, transient
 _lastDdlTime=!!UNIXTIME!!, numRows=15, totalSize=239, rawDataSize=110}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  drop table src_rc_merge_test_stat;
-No rows affected 
->>>  drop table src_rc_merge_test_part_stat;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_numbuckets_partitioned_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_numbuckets_partitioned_table.q.out b/ql/src/test/results/beelinepositive/alter_numbuckets_partitioned_table.q.out
deleted file mode 100644
index b0ccce5..0000000
--- a/ql/src/test/results/beelinepositive/alter_numbuckets_partitioned_table.q.out
+++ /dev/null
@@ -1,367 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_numbuckets_partitioned_table.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_numbuckets_partitioned_table.q
->>>  
->>>  create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets;
-No rows affected 
->>>  
->>>  alter table tst1 clustered by (key) into 8 buckets;
-No rows affected 
->>>  
->>>  describe formatted tst1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','8                   ',''
-'Bucket Columns:     ','[key]               ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-34 rows selected 
->>>  
->>>  set hive.enforce.bucketing=true;
-No rows affected 
->>>  insert overwrite table tst1 partition (ds='1') select key, value from src;
-'key','value'
-No rows selected 
->>>  
->>>  describe formatted tst1 partition (ds = '1');
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Partition Information','',''
-'Partition Value:    ','[1]                 ',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Table:              ','tst1                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1/ds=1',''
-'Partition Parameters:','',''
-'','numFiles            ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','8                   ',''
-'Bucket Columns:     ','[key]               ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-35 rows selected 
->>>  
->>>  -- Test changing bucket number
->>>  
->>>  alter table tst1 clustered by (key) into 12 buckets;
-No rows affected 
->>>  
->>>  insert overwrite table tst1 partition (ds='1') select key, value from src;
-'key','value'
-No rows selected 
->>>  
->>>  describe formatted tst1 partition (ds = '1');
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Partition Information','',''
-'Partition Value:    ','[1]                 ',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Table:              ','tst1                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1/ds=1',''
-'Partition Parameters:','',''
-'','numFiles            ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','8                   ',''
-'Bucket Columns:     ','[key]               ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-35 rows selected 
->>>  
->>>  describe formatted tst1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','numFiles            ','1                   '
-'','numPartitions       ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','12                  ',''
-'Bucket Columns:     ','[key]               ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-39 rows selected 
->>>  
->>>  -- Test adding sort order
->>>  
->>>  alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets;
-No rows affected 
->>>  
->>>  describe formatted tst1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','numFiles            ','1                   '
-'','numPartitions       ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','12                  ',''
-'Bucket Columns:     ','[key]               ',''
-'Sort Columns:       ','[Order(col:key, order:1)]',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-39 rows selected 
->>>  
->>>  -- Test changing sort order
->>>  
->>>  alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets;
-No rows affected 
->>>  
->>>  describe formatted tst1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','numFiles            ','1                   '
-'','numPartitions       ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','12                  ',''
-'Bucket Columns:     ','[key]               ',''
-'Sort Columns:       ','[Order(col:value, order:0)]',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-39 rows selected 
->>>  
->>>  -- Test removing test order
->>>  
->>>  alter table tst1 clustered by (value) into 12 buckets;
-No rows affected 
->>>  
->>>  describe formatted tst1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','numFiles            ','1                   '
-'','numPartitions       ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','12                  ',''
-'Bucket Columns:     ','[value]             ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-39 rows selected 
->>>  
->>>  -- Test removing buckets
->>>  
->>>  alter table tst1 not clustered;
-No rows affected 
->>>  
->>>  describe formatted tst1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','numFiles            ','1                   '
-'','numPartitions       ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-39 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_partition_format_loc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_partition_format_loc.q.out b/ql/src/test/results/beelinepositive/alter_partition_format_loc.q.out
deleted file mode 100644
index c330319..0000000
--- a/ql/src/test/results/beelinepositive/alter_partition_format_loc.q.out
+++ /dev/null
@@ -1,106 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_partition_format_loc.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_partition_format_loc.q
->>>  create table alter_partition_format_test (key int, value string);
-No rows affected 
->>>  desc extended alter_partition_format_test;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter_partition_format_test, dbName:alter_partition_format_loc, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_partition_format_loc.db/alter_partition_format_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  alter table alter_partition_format_test set fileformat rcfile;
-No rows affected 
->>>  desc extended alter_partition_format_test;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter_partition_format_test, dbName:alter_partition_format_loc, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_partition_format_loc.db/alter_partition_format_test, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:
 null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  alter table alter_partition_format_test set location "file:/!!ELIDED!!
-No rows affected 
->>>  desc extended alter_partition_format_test;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter_partition_format_test, dbName:alter_partition_format_loc, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:file:/!!ELIDED!! inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  drop table alter_partition_format_test;
-No rows affected 
->>>  
->>>  --partitioned table
->>>  create table alter_partition_format_test (key int, value string) partitioned by (ds string);
-No rows affected 
->>>  
->>>  alter table alter_partition_format_test add partition(ds='2010');
-No rows affected 
->>>  desc extended alter_partition_format_test partition(ds='2010');
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2010], dbName:alter_partition_format_loc, tableName:alter_partition_format_test, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_partition_format_loc.db/alter_partition_format_test/ds=2010, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{transient_lastDdlTime=!!UNIXTIME!!})',''
-5 rows selected 
->>>  
->>>  alter table alter_partition_format_test partition(ds='2010') set fileformat rcfile;
-No rows affected 
->>>  desc extended alter_partition_format_test partition(ds='2010');
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2010], dbName:alter_partition_format_loc, tableName:alter_partition_format_test, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_partition_format_loc.db/alter_partition_format_test/ds=2010, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIX
 TIME!!})',''
-5 rows selected 
->>>  
->>>  alter table alter_partition_format_test partition(ds='2010') set location "file:/!!ELIDED!!
-No rows affected 
->>>  desc extended alter_partition_format_test partition(ds='2010');
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2010], dbName:alter_partition_format_loc, tableName:alter_partition_format_test, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:file:/!!ELIDED!! inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!})',''
-5 rows selected 
->>>  
->>>  desc extended alter_partition_format_test;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter_partition_format_test, dbName:alter_partition_format_loc, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_partition_format_loc.db/alter_partition_format_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{transient_lastDdl
 Time=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  alter table alter_partition_format_test set fileformat rcfile;
-No rows affected 
->>>  desc extended alter_partition_format_test;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter_partition_format_test, dbName:alter_partition_format_loc, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_partition_format_loc.db/alter_partition_format_test, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{last_modified_by=!!E
 LIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  alter table alter_partition_format_test set location "file:/!!ELIDED!!
-No rows affected 
->>>  desc extended alter_partition_format_test;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter_partition_format_test, dbName:alter_partition_format_loc, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:file:/!!ELIDED!! inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!}
 , viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  drop table alter_partition_format_test;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_partition_protect_mode.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_partition_protect_mode.q.out b/ql/src/test/results/beelinepositive/alter_partition_protect_mode.q.out
deleted file mode 100644
index 6f173f9..0000000
--- a/ql/src/test/results/beelinepositive/alter_partition_protect_mode.q.out
+++ /dev/null
@@ -1,66 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_partition_protect_mode.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_partition_protect_mode.q
->>>  -- Create table
->>>  create table if not exists alter_part_protect_mode(key string, value string ) partitioned by (year string, month string) stored as textfile ;
-No rows affected 
->>>  
->>>  -- Load data
->>>  load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1996', month='10');
-No rows affected 
->>>  load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1996', month='12');
-No rows affected 
->>>  load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1995', month='09');
-No rows affected 
->>>  load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1994', month='07');
-No rows affected 
->>>  
->>>  -- offline
->>>  alter table alter_part_protect_mode partition (year='1996') disable offline;
-No rows affected 
->>>  select * from alter_part_protect_mode where year = '1996';
-'key','value','year','month'
-'1','11','1996','10'
-'2','12','1996','10'
-'3','13','1996','10'
-'7','17','1996','10'
-'8','18','1996','10'
-'8','28','1996','10'
-'1','11','1996','12'
-'2','12','1996','12'
-'3','13','1996','12'
-'7','17','1996','12'
-'8','18','1996','12'
-'8','28','1996','12'
-12 rows selected 
->>>  alter table alter_part_protect_mode partition (year='1995') enable offline;
-No rows affected 
->>>  alter table alter_part_protect_mode partition (year='1995') disable offline;
-No rows affected 
->>>  select * from alter_part_protect_mode where year = '1995';
-'key','value','year','month'
-'1','11','1995','09'
-'2','12','1995','09'
-'3','13','1995','09'
-'7','17','1995','09'
-'8','18','1995','09'
-'8','28','1995','09'
-6 rows selected 
->>>  
->>>  -- no_drop
->>>  alter table alter_part_protect_mode partition (year='1996') enable no_drop;
-No rows affected 
->>>  alter table alter_part_protect_mode partition (year='1995') disable no_drop;
-No rows affected 
->>>  alter table alter_part_protect_mode drop partition (year='1995');
-No rows affected 
->>>  alter table alter_part_protect_mode partition (year='1994', month='07') disable no_drop;
-No rows affected 
->>>  alter table alter_part_protect_mode drop partition (year='1994');
-No rows affected 
->>>  
->>>  -- Cleanup
->>>  alter table alter_part_protect_mode partition (year='1996') disable no_drop;
-No rows affected 
->>>  drop table alter_part_protect_mode;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_table_serde.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_table_serde.q.out b/ql/src/test/results/beelinepositive/alter_table_serde.q.out
deleted file mode 100644
index f1a6d8b..0000000
--- a/ql/src/test/results/beelinepositive/alter_table_serde.q.out
+++ /dev/null
@@ -1,108 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_table_serde.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_table_serde.q
->>>  -- test table
->>>  create table test_table (id int, query string, name string);
-No rows affected 
->>>  describe extended test_table;
-'col_name','data_type','comment'
-'id','int',''
-'query','string',''
-'name','string',''
-'','',''
-'Detailed Table Information','Table(tableName:test_table, dbName:alter_table_serde, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null), FieldSchema(name:query, type:string, comment:null), FieldSchema(name:name, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  alter table test_table set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
-No rows affected 
->>>  describe extended test_table;
-'col_name','data_type','comment'
-'id','int','from deserializer'
-'query','string','from deserializer'
-'name','string','from deserializer'
-'','',''
-'Detailed Table Information','Table(tableName:test_table, dbName:alter_table_serde, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:from deserializer), FieldSchema(name:query, type:string, comment:from deserializer), FieldSchema(name:name, type:string, comment:from deserializer)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_la
 stDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  alter table test_table set serdeproperties ('field.delim' = ',');
-No rows affected 
->>>  describe extended test_table;
-'col_name','data_type','comment'
-'id','int','from deserializer'
-'query','string','from deserializer'
-'name','string','from deserializer'
-'','',''
-'Detailed Table Information','Table(tableName:test_table, dbName:alter_table_serde, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:from deserializer), FieldSchema(name:query, type:string, comment:from deserializer), FieldSchema(name:name, type:string, comment:from deserializer)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1, field.delim=,}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!
 !, transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  drop table test_table;
-No rows affected 
->>>  
->>>  --- test partitioned table
->>>  create table test_table (id int, query string, name string) partitioned by (dt string);
-No rows affected 
->>>  
->>>  alter table test_table add partition (dt = '2011');
-No rows affected 
->>>  describe extended test_table partition (dt='2011');
-'col_name','data_type','comment'
-'id','int',''
-'query','string',''
-'name','string',''
-'dt','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2011], dbName:alter_table_serde, tableName:test_table, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null), FieldSchema(name:query, type:string, comment:null), FieldSchema(name:name, type:string, comment:null), FieldSchema(name:dt, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table/dt=2011, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{transient_lastDdlTime=!!UNIXTIME!!})',''
-6 rows selected 
->>>  
->>>  alter table test_table set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
-No rows affected 
->>>  describe extended test_table partition (dt='2011');
-'col_name','data_type','comment'
-'id','int',''
-'query','string',''
-'name','string',''
-'dt','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2011], dbName:alter_table_serde, tableName:test_table, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null), FieldSchema(name:query, type:string, comment:null), FieldSchema(name:name, type:string, comment:null), FieldSchema(name:dt, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table/dt=2011, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{transient_lastDdlTime=!!UNIXTIME!!})',''
-6 rows selected 
->>>  
->>>  alter table test_table set serdeproperties ('field.delim' = ',');
-No rows affected 
->>>  describe extended test_table partition (dt='2011');
-'col_name','data_type','comment'
-'id','int',''
-'query','string',''
-'name','string',''
-'dt','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2011], dbName:alter_table_serde, tableName:test_table, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null), FieldSchema(name:query, type:string, comment:null), FieldSchema(name:name, type:string, comment:null), FieldSchema(name:dt, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table/dt=2011, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{transient_lastDdlTime=!!UNIXTIME!!})',''
-6 rows selected 
->>>  
->>>  -- test partitions
->>>  
->>>  alter table test_table partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
-No rows affected 
->>>  describe extended test_table partition (dt='2011');
-'col_name','data_type','comment'
-'id','int',''
-'query','string',''
-'name','string',''
-'dt','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2011], dbName:alter_table_serde, tableName:test_table, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null), FieldSchema(name:query, type:string, comment:null), FieldSchema(name:name, type:string, comment:null), FieldSchema(name:dt, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table/dt=2011, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!U
 NIXTIME!!})',''
-6 rows selected 
->>>  
->>>  alter table test_table partition(dt='2011') set serdeproperties ('field.delim' = ',');
-No rows affected 
->>>  describe extended test_table partition (dt='2011');
-'col_name','data_type','comment'
-'id','int',''
-'query','string',''
-'name','string',''
-'dt','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2011], dbName:alter_table_serde, tableName:test_table, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null), FieldSchema(name:query, type:string, comment:null), FieldSchema(name:name, type:string, comment:null), FieldSchema(name:dt, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table/dt=2011, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1, field.delim=,}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_
 lastDdlTime=!!UNIXTIME!!})',''
-6 rows selected 
->>>  
->>>  drop table test_table;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_view_rename.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_view_rename.q.out b/ql/src/test/results/beelinepositive/alter_view_rename.q.out
deleted file mode 100644
index 8d249aa..0000000
--- a/ql/src/test/results/beelinepositive/alter_view_rename.q.out
+++ /dev/null
@@ -1,35 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_view_rename.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_view_rename.q
->>>  CREATE TABLE invites (foo INT, bar STRING) PARTITIONED BY (ds STRING);
-No rows affected 
->>>  CREATE VIEW view1 as SELECT * FROM invites;
-'foo','bar','ds'
-No rows selected 
->>>  DESCRIBE EXTENDED view1;
-'col_name','data_type','comment'
-'foo','int',''
-'bar','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:view1, dbName:alter_view_rename, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:foo, type:int, comment:null), FieldSchema(name:bar, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT * FROM invites, viewExpandedText:SELECT `invites`.`foo`, `invites`.`bar`, `invites`.`ds` FROM `alter_view_rename`.`invites`, tableType:VIRTUAL_VIEW)',''
-5 rows selected 
->>>  
->>>  ALTER VIEW view1 RENAME TO view2;
-No rows affected 
->>>  DESCRIBE EXTENDED view2;
-'col_name','data_type','comment'
-'foo','int',''
-'bar','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:view2, dbName:alter_view_rename, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:foo, type:int, comment:null), FieldSchema(name:bar, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT * FROM invites, viewExpandedText:SELECT `invites`.`foo`, `invites`.`bar`, `invites`.`ds` FROM `alter_view_rename`.`invites`
 , tableType:VIRTUAL_VIEW)',''
-5 rows selected 
->>>  SELECT * FROM view2;
-'foo','bar','ds'
-No rows selected 
->>>  
->>>  DROP TABLE invites;
-No rows affected 
->>>  DROP VIEW view2;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/archive_excludeHadoop20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/archive_excludeHadoop20.q.out b/ql/src/test/results/beelinepositive/archive_excludeHadoop20.q.out
deleted file mode 100644
index 2bca2c3..0000000
--- a/ql/src/test/results/beelinepositive/archive_excludeHadoop20.q.out
+++ /dev/null
@@ -1,155 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/archive_excludeHadoop20.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/archive_excludeHadoop20.q
->>>  set hive.archive.enabled = true;
-No rows affected 
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  
->>>  -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20)
->>>  
->>>  drop table tstsrc;
-No rows affected 
->>>  drop table tstsrcpart;
-No rows affected 
->>>  
->>>  create table tstsrc like src;
-No rows affected 
->>>  insert overwrite table tstsrc select key, value from src;
-'key','value'
-No rows selected 
->>>  
->>>  create table tstsrcpart (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 10 buckets;
-No rows affected 
->>>  
->>>  insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11') 
-select key, value from srcpart where ds='2008-04-08' and hr='11';
-'key','value'
-No rows selected 
->>>  
->>>  insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='12') 
-select key, value from srcpart where ds='2008-04-08' and hr='12';
-'key','value'
-No rows selected 
->>>  
->>>  insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='11') 
-select key, value from srcpart where ds='2008-04-09' and hr='11';
-'key','value'
-No rows selected 
->>>  
->>>  insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='12') 
-select key, value from srcpart where ds='2008-04-09' and hr='12';
-'key','value'
-No rows selected 
->>>  
->>>  SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col 
-FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2;
-'_c0'
-'48479881068'
-1 row selected 
->>>  
->>>  ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08', hr='12');
-No rows affected 
->>>  
->>>  SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col 
-FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2;
-'_c0'
-'48479881068'
-1 row selected 
->>>  
->>>  SELECT key, count(1) FROM tstsrcpart WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key;
-'key','_c1'
-'0','3'
-1 row selected 
->>>  
->>>  SELECT * FROM tstsrcpart a JOIN tstsrc b ON a.key=b.key 
-WHERE a.ds='2008-04-08' AND a.hr='12' AND a.key='0';
-'key','value','ds','hr','key','value'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-9 rows selected 
->>>  
->>>  ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr='12');
-No rows affected 
->>>  
->>>  SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col 
-FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2;
-'_c0'
-'48479881068'
-1 row selected 
->>>  
->>>  CREATE TABLE harbucket(key INT) 
-PARTITIONED by (ds STRING) 
-CLUSTERED BY (key) INTO 10 BUCKETS;
-No rows affected 
->>>  
->>>  INSERT OVERWRITE TABLE harbucket PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc WHERE key < 50;
-'a'
-No rows selected 
->>>  
->>>  SELECT key FROM harbucket TABLESAMPLE(BUCKET 1 OUT OF 10) SORT BY key;
-'key'
-'0'
-'0'
-'0'
-'10'
-'20'
-'30'
-6 rows selected 
->>>  ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08', hr='12');
-No rows affected 
->>>  SELECT key FROM harbucket TABLESAMPLE(BUCKET 1 OUT OF 10) SORT BY key;
-'key'
-'0'
-'0'
-'0'
-'10'
-'20'
-'30'
-6 rows selected 
->>>  ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr='12');
-No rows affected 
->>>  SELECT key FROM harbucket TABLESAMPLE(BUCKET 1 OUT OF 10) SORT BY key;
-'key'
-'0'
-'0'
-'0'
-'10'
-'20'
-'30'
-6 rows selected 
->>>  
->>>  
->>>  CREATE TABLE old_name(key INT) 
-PARTITIONED by (ds STRING);
-No rows affected 
->>>  
->>>  INSERT OVERWRITE TABLE old_name PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc WHERE key < 50;
-'a'
-No rows selected 
->>>  ALTER TABLE old_name ARCHIVE PARTITION (ds='1');
-No rows affected 
->>>  SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col 
-FROM (SELECT * FROM old_name WHERE ds='1') subq1) subq2;
-'_c0'
-'48656137'
-1 row selected 
->>>  ALTER TABLE old_name RENAME TO new_name;
-No rows affected 
->>>  SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col 
-FROM (SELECT * FROM new_name WHERE ds='1') subq1) subq2;
-'_c0'
-''
-1 row selected 
->>>  
->>>  drop table tstsrc;
-No rows affected 
->>>  drop table tstsrcpart;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/authorization_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/authorization_3.q.out b/ql/src/test/results/beelinepositive/authorization_3.q.out
deleted file mode 100644
index d8c1dcb..0000000
--- a/ql/src/test/results/beelinepositive/authorization_3.q.out
+++ /dev/null
@@ -1,33 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/authorization_3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/authorization_3.q
->>>  create table src_autho_test as select * from src;
-'key','value'
-No rows selected 
->>>  
->>>  grant drop on table src_autho_test to user hive_test_user;
-No rows affected 
->>>  grant select on table src_autho_test to user hive_test_user;
-No rows affected 
->>>  
->>>  show grant user hive_test_user on table src_autho_test;
-No rows affected 
->>>  
->>>  revoke select on table src_autho_test from user hive_test_user;
-No rows affected 
->>>  revoke drop on table src_autho_test from user hive_test_user;
-No rows affected 
->>>  
->>>  grant drop,select on table src_autho_test to user hive_test_user;
-No rows affected 
->>>  show grant user hive_test_user on table src_autho_test;
-No rows affected 
->>>  revoke drop,select on table src_autho_test from user hive_test_user;
-No rows affected 
->>>  
->>>  grant drop,select(key), select(value) on table src_autho_test to user hive_test_user;
-No rows affected 
->>>  show grant user hive_test_user on table src_autho_test;
-No rows affected 
->>>  revoke drop,select(key), select(value) on table src_autho_test from user hive_test_user;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join0.q.out b/ql/src/test/results/beelinepositive/auto_join0.q.out
deleted file mode 100644
index e691113..0000000
--- a/ql/src/test/results/beelinepositive/auto_join0.q.out
+++ /dev/null
@@ -1,369 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join0.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join0.q
->>>  
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  explain 
-select sum(hash(a.k1,a.v1,a.k2, a.v2)) 
-from ( 
-SELECT src1.key as k1, src1.value as v1, 
-src2.key as k2, src2.value as v2 FROM 
-(SELECT * FROM src WHERE src.key < 10) src1 
-JOIN 
-(SELECT * FROM src WHERE src.key < 10) src2 
-SORT BY k1, v1, k2, v2 
-) a;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 10)))) src1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 10)))) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) k1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) v1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) k2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) v2)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k2)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v2))))) a)) (TOK_INSERT (TOK_DE
 STINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL a) k1) (. (TOK_TABLE_OR_COL a) v1) (. (TOK_TABLE_OR_COL a) k2) (. (TOK_TABLE_OR_COL a) v2)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-7 is a root stage , consists of Stage-8, Stage-9, Stage-1'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-2 depends on stages: Stage-1, Stage-5, Stage-6'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-9 has a backup stage: Stage-1'
-'  Stage-6 depends on stages: Stage-9'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a:src2:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a:src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 []'
-'                    1 []'
-'                  Position of Big Table: 0'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 []'
-'                    1 []'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  Position of Big Table: 0'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'                          expr: _col2'
-'                          type: string'
-'                          expr: _col3'
-'                          type: string'
-'                    outputColumnNames: _col0, _col1, _col2, _col3'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 0'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'              sort order: ++++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col0,_col1,_col2,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a:src1:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a:src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 []'
-'                    1 []'
-'                  Position of Big Table: 1'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 []'
-'                    1 []'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  Position of Big Table: 1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'                          expr: _col2'
-'                          type: string'
-'                          expr: _col3'
-'                          type: string'
-'                    outputColumnNames: _col0, _col1, _col2, _col3'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 0'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: 0'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'        a:src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: 1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-337 rows selected 
->>>  
->>>  select sum(hash(a.k1,a.v1,a.k2, a.v2)) 
-from ( 
-SELECT src1.key as k1, src1.value as v1, 
-src2.key as k2, src2.value as v2 FROM 
-(SELECT * FROM src WHERE src.key < 10) src1 
-JOIN 
-(SELECT * FROM src WHERE src.key < 10) src2 
-SORT BY k1, v1, k2, v2 
-) a;
-'_c0'
-'34441656720'
-1 row selected 
->>>  !record


Mime
View raw message