Return-Path: X-Original-To: apmail-hive-commits-archive@www.apache.org Delivered-To: apmail-hive-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id F24E6DB0B for ; Mon, 17 Dec 2012 04:09:35 +0000 (UTC) Received: (qmail 48092 invoked by uid 500); 17 Dec 2012 04:09:35 -0000 Delivered-To: apmail-hive-commits-archive@hive.apache.org Received: (qmail 47823 invoked by uid 500); 17 Dec 2012 04:09:29 -0000 Mailing-List: contact commits-help@hive.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hive-dev@hive.apache.org Delivered-To: mailing list commits@hive.apache.org Received: (qmail 47790 invoked by uid 99); 17 Dec 2012 04:09:28 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 17 Dec 2012 04:09:28 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 17 Dec 2012 04:09:26 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 845B72388980; Mon, 17 Dec 2012 04:09:05 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1422749 - in /hive/trunk/ql/src: java/org/apache/hadoop/hive/ql/ java/org/apache/hadoop/hive/ql/exec/ java/org/apache/hadoop/hive/ql/metadata/ java/org/apache/hadoop/hive/ql/parse/ java/org/apache/hadoop/hive/ql/plan/ test/queries/clientpo... Date: Mon, 17 Dec 2012 04:09:03 -0000 To: commits@hive.apache.org From: namit@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20121217040905.845B72388980@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: namit Date: Mon Dec 17 04:09:01 2012 New Revision: 1422749 URL: http://svn.apache.org/viewvc?rev=1422749&view=rev Log: HIVE-3492 Provide ALTER for partition changing bucket number (Navis via namit) Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java hive/trunk/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q hive/trunk/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java?rev=1422749&r1=1422748&r2=1422749&view=diff ============================================================================== --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (original) +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java Mon Dec 17 04:09:01 2012 @@ -245,6 +245,8 @@ public enum ErrorMsg { SHOW_CREATETABLE_INDEX(10144, "SHOW CREATE TABLE does not support tables of type INDEX_TABLE."), + ALTER_BUCKETNUM_NONBUCKETIZED_TBL(10145, "Table is not bucketized"), + LOAD_INTO_STORED_AS_DIR(10195, "A stored-as-directories table cannot be used as target for LOAD"), ALTER_TBL_STOREDASDIR_NOT_SKEWED(10196, "This operation is only valid on skewed table."), ALTER_TBL_SKEWED_LOC_NO_LOC(10197, "Alter table skewed location doesn't have locations."), Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1422749&r1=1422748&r2=1422749&view=diff ============================================================================== --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original) +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Mon Dec 17 04:09:01 2012 @@ -3230,6 +3230,18 @@ public class DDLTask extends Task getBucketCols() { return tPartition.getSd().getBucketCols(); } Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1422749&r1=1422748&r2=1422749&view=diff ============================================================================== --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original) +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Mon Dec 17 04:09:01 2012 @@ -213,6 +213,8 @@ public class DDLSemanticAnalyzer extends analyzeAlterTableRenamePart(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTBLPART_SKEWED_LOCATION) { analyzeAlterTableSkewedLocation(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_TABLEBUCKETS) { + analyzeAlterTableBucketNum(ast, tableName, partSpec); } break; } @@ -721,14 +723,9 @@ public class DDLSemanticAnalyzer extends // configured not to fail silently boolean throwException = !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT); - try { - Table tab = db.getTable(db.getCurrentDatabase(), tableName, throwException); - if (tab != null) { - inputs.add(new ReadEntity(tab)); - outputs.add(new WriteEntity(tab)); - } - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); + Table tab = getTable(db.getCurrentDatabase(), tableName, throwException); + if (tab != null) { + outputs.add(new WriteEntity(tab)); } DropTableDesc dropTblDesc = new DropTableDesc( @@ -1099,15 +1096,7 @@ public class DDLSemanticAnalyzer extends private void addInputsOutputsAlterTable(String tableName, HashMap partSpec, AlterTableDesc desc) throws SemanticException { - Table tab = null; - try { - tab = db.getTable(db.getCurrentDatabase(), tableName, true); - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); - } - - inputs.add(new ReadEntity(tab)); - + Table tab = getTable(db.getCurrentDatabase(), tableName, true); if ((partSpec == null) || (partSpec.isEmpty())) { outputs.add(new WriteEntity(tab)); } @@ -1332,14 +1321,8 @@ public class DDLSemanticAnalyzer extends private void analyzeAlterTableClusterSort(ASTNode ast) throws SemanticException { - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - Table tab = null; - - try { - tab = db.getTable(db.getCurrentDatabase(), tableName, true); - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); - } + String tableName = getUnescapedName((ASTNode)ast.getChild(0)); + Table tab = getTable(db.getCurrentDatabase(), tableName, true); inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab)); @@ -1850,19 +1833,7 @@ public class DDLSemanticAnalyzer extends break; } - try { - Table tab = null; - if (dbName == null) { - tab = db.getTable(tableName, true); - } - else { - tab = db.getTable(dbName, tableName, true); - } - inputs.add(new ReadEntity(tab)); - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); - } - + Table tab = getTable(dbName, tableName, true); showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), dbName, tableName); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showColumnsDesc), conf)); @@ -2162,17 +2133,7 @@ public class DDLSemanticAnalyzer extends if (newPartSpec == null) { throw new SemanticException("RENAME PARTITION Missing Destination" + ast); } - Table tab = null; - try { - tab = db.getTable(db.getCurrentDatabase(), tblName, false); - if (tab != null) { - inputs.add(new ReadEntity(tab)); - } else { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } + Table tab = getTable(db.getCurrentDatabase(), tblName, true); validateAlterTableType(tab, AlterTableTypes.RENAMEPARTITION); List> partSpecs = new ArrayList>(); @@ -2185,6 +2146,21 @@ public class DDLSemanticAnalyzer extends renamePartitionDesc), conf)); } + private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, + HashMap partSpec) throws SemanticException { + Table tab = getTable(db.getCurrentDatabase(), tblName, true); + if (tab.getBucketCols() == null || tab.getBucketCols().isEmpty()) { + throw new SemanticException(ErrorMsg.ALTER_BUCKETNUM_NONBUCKETIZED_TBL.getMsg()); + } + validateAlterTableType(tab, AlterTableTypes.ALTERBUCKETNUM); + + int bucketNum = Integer.parseInt(ast.getChild(0).getText()); + AlterTableDesc alterBucketNum = new AlterTableDesc(tblName, partSpec, bucketNum); + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterBucketNum), conf)); + } + private void analyzeAlterTableModifyCols(ASTNode ast, AlterTableTypes alterType) throws SemanticException { String tblName = getUnescapedName((ASTNode) ast.getChild(0)); @@ -2203,16 +2179,7 @@ public class DDLSemanticAnalyzer extends String tblName = getUnescapedName((ASTNode) ast.getChild(0)); // get table metadata List partSpecs = getFullPartitionSpecs(ast); - Table tab = null; - - try { - tab = db.getTable(db.getCurrentDatabase(), tblName, false); - if (tab != null) { - inputs.add(new ReadEntity(tab)); - } - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } + Table tab = getTable(db.getCurrentDatabase(), tblName, true); validateAlterTableType(tab, AlterTableTypes.DROPPARTITION, expectView); // Find out if all partition columns are strings. This is needed for JDO @@ -2268,18 +2235,9 @@ public class DDLSemanticAnalyzer extends private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) throws SemanticException { - String tblName = getUnescapedName((ASTNode) ast.getChild(0)); - boolean isView = false; - Table tab = null; - try { - tab = db.getTable(db.getCurrentDatabase(), tblName, false); - if (tab != null) { - inputs.add(new ReadEntity(tab)); - isView = tab.isView(); - } - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } + String tblName = getUnescapedName((ASTNode)ast.getChild(0)); + Table tab = getTable(db.getCurrentDatabase(), tblName, true); + boolean isView = tab.isView(); validateAlterTableType(tab, AlterTableTypes.ADDPARTITION, expectView); // partition name to value @@ -2400,17 +2358,8 @@ public class DDLSemanticAnalyzer extends private void analyzeAlterTableTouch(CommonTree ast) throws SemanticException { - String tblName = getUnescapedName((ASTNode) ast.getChild(0)); - Table tab; - - try { - tab = db.getTable(db.getCurrentDatabase(), tblName, false); - if (tab != null) { - inputs.add(new ReadEntity(tab)); - } - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } + String tblName = getUnescapedName((ASTNode)ast.getChild(0)); + Table tab = getTable(db.getCurrentDatabase(), tblName, true); validateAlterTableType(tab, AlterTableTypes.TOUCH); // partition name to value @@ -2446,15 +2395,7 @@ public class DDLSemanticAnalyzer extends // partition name to value List> partSpecs = getPartitionSpecs(ast); - Table tab = null; - try { - tab = db.getTable(db.getCurrentDatabase(), tblName, false); - if (tab != null) { - inputs.add(new ReadEntity(tab)); - } - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } + Table tab = getTable(db.getCurrentDatabase(), tblName, true); addTablePartsOutputs(tblName, partSpecs, true); validateAlterTableType(tab, AlterTableTypes.ARCHIVE); @@ -2976,4 +2917,20 @@ public class DDLSemanticAnalyzer extends } } + private Table getTable(String database, String tblName, boolean throwException) + throws SemanticException { + try { + Table tab = database == null ? db.getTable(tblName, false) + : db.getTable(database, tblName, false); + if (tab == null && throwException) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); + } + if (tab != null) { + inputs.add(new ReadEntity(tab)); + } + return tab; + } catch (HiveException e) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); + } + } } Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g?rev=1422749&r1=1422748&r2=1422749&view=diff ============================================================================== --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (original) +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g Mon Dec 17 04:09:01 2012 @@ -767,6 +767,7 @@ alterTblPartitionStatementSuffix | alterStatementSuffixMergeFiles | alterStatementSuffixSerdeProperties | alterStatementSuffixRenamePart + | alterStatementSuffixBucketNum | alterTblPartitionStatementSuffixSkewedLocation ; @@ -862,6 +863,12 @@ alterProtectModeMode | KW_READONLY -> ^(TOK_READONLY) ; +alterStatementSuffixBucketNum +@init { msgs.push(""); } +@after { msgs.pop(); } + : KW_INTO num=Number KW_BUCKETS + -> ^(TOK_TABLEBUCKETS $num) + ; alterStatementSuffixClusterbySortby @init {msgs.push("alter cluster by sort by statement");} Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java?rev=1422749&r1=1422748&r2=1422749&view=diff ============================================================================== --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (original) +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java Mon Dec 17 04:09:01 2012 @@ -120,6 +120,9 @@ public final class SemanticAnalyzerFacto tablePartitionCommandType.put(HiveParser.TOK_ALTERTBLPART_SKEWED_LOCATION, new HiveOperation[] {HiveOperation.ALTERTBLPART_SKEWED_LOCATION, HiveOperation.ALTERTBLPART_SKEWED_LOCATION }); + tablePartitionCommandType.put(HiveParser.TOK_TABLEBUCKETS, + new HiveOperation[] {HiveOperation.ALTERTABLE_BUCKETNUM, + HiveOperation.ALTERPARTITION_BUCKETNUM}); } public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java?rev=1422749&r1=1422748&r2=1422749&view=diff ============================================================================== --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (original) +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java Mon Dec 17 04:09:01 2012 @@ -47,12 +47,13 @@ public class AlterTableDesc extends DDLD RENAME, ADDCOLS, REPLACECOLS, ADDPROPS, ADDSERDE, ADDSERDEPROPS, ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN, ADDPARTITION, TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE, - ALTERLOCATION, DROPPARTITION, RENAMEPARTITION, ADDSKEWEDBY, ALTERSKEWEDLOCATION - }; + ALTERLOCATION, DROPPARTITION, RENAMEPARTITION, ADDSKEWEDBY, ALTERSKEWEDLOCATION, + ALTERBUCKETNUM + } public static enum ProtectModeType { NO_DROP, OFFLINE, READ_ONLY, NO_DROP_CASCADE - }; + } AlterTableTypes op; @@ -211,6 +212,13 @@ public class AlterTableDesc extends DDLD this.skewedColValues = new ArrayList>(skewedColValues); } + public AlterTableDesc(String tableName, HashMap partSpec, int numBuckets) { + op = AlterTableTypes.ALTERBUCKETNUM; + this.oldName = tableName; + this.partSpec = partSpec; + this.numberBuckets = numBuckets; + } + @Explain(displayName = "new columns") public List getNewColsString() { return Utilities.getFieldSchemaString(getNewCols()); Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java?rev=1422749&r1=1422748&r2=1422749&view=diff ============================================================================== --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java (original) +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java Mon Dec 17 04:09:01 2012 @@ -50,6 +50,10 @@ public enum HiveOperation { ALTERPARTITION_SERDEPROPERTIES("ALTERPARTITION_SERDEPROPERTIES", new Privilege[]{Privilege.ALTER_METADATA}, null), ALTERTABLE_CLUSTER_SORT("ALTERTABLE_CLUSTER_SORT", new Privilege[]{Privilege.ALTER_METADATA}, null), ANALYZE_TABLE("ANALYZE_TABLE", null, null), + ALTERTABLE_BUCKETNUM("ALTERTABLE_BUCKETNUM", + new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERPARTITION_BUCKETNUM("ALTERPARTITION_BUCKETNUM", + new Privilege[]{Privilege.ALTER_METADATA}, null), SHOWDATABASES("SHOWDATABASES", new Privilege[]{Privilege.SHOW_DATABASE}, null), SHOWTABLES("SHOWTABLES", null, null), SHOWCOLUMNS("SHOWCOLUMNS", null, null), Modified: hive/trunk/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q?rev=1422749&r1=1422748&r2=1422749&view=diff ============================================================================== --- hive/trunk/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q (original) +++ hive/trunk/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q Mon Dec 17 04:09:01 2012 @@ -20,6 +20,20 @@ describe formatted tst1 partition (ds = describe formatted tst1; +-- Test changing bucket number of (table/partition) + +alter table tst1 into 4 buckets; + +describe formatted tst1; + +describe formatted tst1 partition (ds = '1'); + +alter table tst1 partition (ds = '1') into 6 buckets; + +describe formatted tst1; + +describe formatted tst1 partition (ds = '1'); + -- Test adding sort order alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets; Modified: hive/trunk/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out?rev=1422749&r1=1422748&r2=1422749&view=diff ============================================================================== --- hive/trunk/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out (original) +++ hive/trunk/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out Mon Dec 17 04:09:01 2012 @@ -207,6 +207,206 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 +PREHOOK: query: -- Test changing bucket number of (table/partition) + +alter table tst1 into 4 buckets +PREHOOK: type: ALTERTABLE_BUCKETNUM +PREHOOK: Input: default@tst1 +POSTHOOK: query: -- Test changing bucket number of (table/partition) + +alter table tst1 into 4 buckets +POSTHOOK: type: ALTERTABLE_BUCKETNUM +POSTHOOK: Input: default@tst1 +POSTHOOK: Output: default@tst1 +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted tst1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted tst1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +# col_name data_type comment + +key string None +value string None + +# Partition Information +# col_name data_type comment + +ds string None + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + numFiles 1 + numPartitions 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [key] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: describe formatted tst1 partition (ds = '1') +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted tst1 partition (ds = '1') +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +# col_name data_type comment + +key string None +value string None + +# Partition Information +# col_name data_type comment + +ds string None + +# Detailed Partition Information +Partition Value: [1] +Database: default +Table: tst1 +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 12 +Bucket Columns: [key] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: alter table tst1 partition (ds = '1') into 6 buckets +PREHOOK: type: ALTERPARTITION_BUCKETNUM +PREHOOK: Input: default@tst1 +POSTHOOK: query: alter table tst1 partition (ds = '1') into 6 buckets +POSTHOOK: type: ALTERPARTITION_BUCKETNUM +POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1@ds=1 +POSTHOOK: Output: default@tst1@ds=1 +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted tst1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted tst1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +# col_name data_type comment + +key string None +value string None + +# Partition Information +# col_name data_type comment + +ds string None + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + numFiles 1 + numPartitions 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [key] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: describe formatted tst1 partition (ds = '1') +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted tst1 partition (ds = '1') +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +# col_name data_type comment + +key string None +value string None + +# Partition Information +# col_name data_type comment + +ds string None + +# Detailed Partition Information +Partition Value: [1] +Database: default +Table: tst1 +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: +#### A masked pattern was here #### + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 6 +Bucket Columns: [key] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 PREHOOK: query: -- Test adding sort order alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets