carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jack...@apache.org
Subject [1/2] carbondata git commit: create table with partition
Date Thu, 11 May 2017 09:51:30 GMT
Repository: carbondata
Updated Branches:
  refs/heads/12-dev a161db4e2 -> 18329275f


create table with partition

fix comment

fix comments


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/ad5c8a0c
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/ad5c8a0c
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/ad5c8a0c

Branch: refs/heads/12-dev
Commit: ad5c8a0ca77492f8d9dec84541bd7201df9b3da4
Parents: a161db4
Author: QiangCai <qiangcai@qq.com>
Authored: Mon May 8 23:20:13 2017 +0800
Committer: jackylk <jacky.likun@huawei.com>
Committed: Thu May 11 17:50:50 2017 +0800

----------------------------------------------------------------------
 .../ThriftWrapperSchemaConverterImpl.java       |  79 ++++++++++++
 .../core/metadata/schema/PartitionInfo.java     |   4 +
 .../core/metadata/schema/table/CarbonTable.java |   3 +
 .../core/metadata/schema/table/TableSchema.java |   2 +-
 .../partition/TestDDLForPartitionTable.scala    | 127 +++++++++++++++++++
 .../execution/command/carbonTableSchema.scala   |   9 ++
 .../org/apache/spark/sql/CarbonSqlParser.scala  |  38 +++++-
 .../spark/sql/parser/CarbonSparkSqlParser.scala |  17 +--
 8 files changed, 269 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/ad5c8a0c/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
b/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
index 3d114ec..2d5f395 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
@@ -25,8 +25,10 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.metadata.datatype.DataType;
 import org.apache.carbondata.core.metadata.encoder.Encoding;
 import org.apache.carbondata.core.metadata.schema.BucketingInfo;
+import org.apache.carbondata.core.metadata.schema.PartitionInfo;
 import org.apache.carbondata.core.metadata.schema.SchemaEvolution;
 import org.apache.carbondata.core.metadata.schema.SchemaEvolutionEntry;
+import org.apache.carbondata.core.metadata.schema.partition.PartitionType;
 import org.apache.carbondata.core.metadata.schema.table.TableInfo;
 import org.apache.carbondata.core.metadata.schema.table.TableSchema;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
@@ -185,6 +187,41 @@ public class ThriftWrapperSchemaConverterImpl implements SchemaConverter
{
     return thriftColumnSchema;
   }
 
+  private org.apache.carbondata.format.PartitionType fromWrapperToExternalPartitionType(
+      PartitionType wrapperPartitionType) {
+    if (null == wrapperPartitionType) {
+      return null;
+    }
+    switch (wrapperPartitionType) {
+      case HASH:
+        return org.apache.carbondata.format.PartitionType.HASH;
+      case LIST:
+        return org.apache.carbondata.format.PartitionType.LIST;
+      case RANGE:
+        return org.apache.carbondata.format.PartitionType.RANGE;
+      case RANGE_INTERVAL:
+        return org.apache.carbondata.format.PartitionType.RANGE_INTERVAL;
+      default:
+        return org.apache.carbondata.format.PartitionType.HASH;
+    }
+  }
+
+  private org.apache.carbondata.format.PartitionInfo fromWrapperToExternalPartitionInfo(
+      PartitionInfo wrapperPartitionInfo) {
+    List<org.apache.carbondata.format.ColumnSchema> thriftColumnSchema =
+        new ArrayList<org.apache.carbondata.format.ColumnSchema>();
+    for (ColumnSchema wrapperColumnSchema : wrapperPartitionInfo.getColumnSchemaList()) {
+      thriftColumnSchema.add(fromWrapperToExternalColumnSchema(wrapperColumnSchema));
+    }
+    org.apache.carbondata.format.PartitionInfo externalPartitionInfo =
+        new org.apache.carbondata.format.PartitionInfo(thriftColumnSchema,
+            fromWrapperToExternalPartitionType(wrapperPartitionInfo.getPartitionType()));
+    externalPartitionInfo.setList_info(wrapperPartitionInfo.getListInfo());
+    externalPartitionInfo.setRange_info(wrapperPartitionInfo.getRangeInfo());
+    externalPartitionInfo.setNum_partitions(wrapperPartitionInfo.getNumPartitions());
+    return externalPartitionInfo;
+  }
+
   /* (non-Javadoc)
    * convert from wrapper to external tableschema
    */
@@ -206,6 +243,10 @@ public class ThriftWrapperSchemaConverterImpl implements SchemaConverter
{
       externalTableSchema.setBucketingInfo(
           fromWrapperToExternalBucketingInfo(wrapperTableSchema.getBucketingInfo()));
     }
+    if (wrapperTableSchema.getPartitionInfo() != null) {
+      externalTableSchema.setPartitionInfo(
+          fromWrapperToExternalPartitionInfo(wrapperTableSchema.getPartitionInfo()));
+    }
     return externalTableSchema;
   }
 
@@ -381,6 +422,40 @@ public class ThriftWrapperSchemaConverterImpl implements SchemaConverter
{
     return wrapperColumnSchema;
   }
 
+  private PartitionType fromExternalToWrapperPartitionType(
+      org.apache.carbondata.format.PartitionType externalPartitionType) {
+    if (null == externalPartitionType) {
+      return null;
+    }
+    switch (externalPartitionType) {
+      case HASH:
+        return PartitionType.HASH;
+      case LIST:
+        return PartitionType.LIST;
+      case RANGE:
+        return PartitionType.RANGE;
+      case RANGE_INTERVAL:
+        return PartitionType.RANGE_INTERVAL;
+      default:
+        return PartitionType.HASH;
+    }
+  }
+
+  private PartitionInfo fromExternalToWrapperPartitionInfo(
+      org.apache.carbondata.format.PartitionInfo externalPartitionInfo) {
+    List<ColumnSchema> wrapperColumnSchema = new ArrayList<ColumnSchema>();
+    for (org.apache.carbondata.format.ColumnSchema columnSchema :
+        externalPartitionInfo.getPartition_columns()) {
+      wrapperColumnSchema.add(fromExternalToWrapperColumnSchema(columnSchema));
+    }
+    PartitionInfo wrapperPartitionInfo = new PartitionInfo(wrapperColumnSchema,
+        fromExternalToWrapperPartitionType(externalPartitionInfo.getPartition_type()));
+    wrapperPartitionInfo.setListInfo(externalPartitionInfo.getList_info());
+    wrapperPartitionInfo.setRangeInfo(externalPartitionInfo.getRange_info());
+    wrapperPartitionInfo.setNumPartitions(externalPartitionInfo.getNum_partitions());
+    return wrapperPartitionInfo;
+  }
+
   /* (non-Javadoc)
    * convert from external to wrapper tableschema
    */
@@ -402,6 +477,10 @@ public class ThriftWrapperSchemaConverterImpl implements SchemaConverter
{
       wrapperTableSchema.setBucketingInfo(
           fromExternalToWarpperBucketingInfo(externalTableSchema.bucketingInfo));
     }
+    if (externalTableSchema.getPartitionInfo() != null) {
+      wrapperTableSchema.setPartitionInfo(
+          fromExternalToWrapperPartitionInfo(externalTableSchema.getPartitionInfo()));
+    }
     return wrapperTableSchema;
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ad5c8a0c/core/src/main/java/org/apache/carbondata/core/metadata/schema/PartitionInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/PartitionInfo.java
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/PartitionInfo.java
index cd4ac0e..2b08536 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/PartitionInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/PartitionInfo.java
@@ -56,6 +56,10 @@ public class PartitionInfo implements Serializable {
     return columnSchemaList;
   }
 
+  public void setColumnSchemaList(List<ColumnSchema> columnSchemaList) {
+    this.columnSchemaList = columnSchemaList;
+  }
+
   public PartitionType getPartitionType() {
     return partitionType;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ad5c8a0c/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
index ce5cbe4..9f8beb0 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
@@ -170,9 +170,12 @@ public class CarbonTable implements Serializable {
       this.aggregateTablesName.add(aggTable.getTableName());
       fillDimensionsAndMeasuresForTables(aggTable);
       tableBucketMap.put(aggTable.getTableName(), aggTable.getBucketingInfo());
+      tablePartitionMap.put(aggTable.getTableName(), aggTable.getPartitionInfo());
     }
     tableBucketMap.put(tableInfo.getFactTable().getTableName(),
         tableInfo.getFactTable().getBucketingInfo());
+    tablePartitionMap.put(tableInfo.getFactTable().getTableName(),
+        tableInfo.getFactTable().getPartitionInfo());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ad5c8a0c/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchema.java
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchema.java
index ad2730b..f9d848e 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchema.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchema.java
@@ -195,7 +195,7 @@ public class TableSchema implements Serializable {
     return partitionInfo;
   }
 
-  public void setpartitionInfo(PartitionInfo partitionInfo) {
+  public void setPartitionInfo(PartitionInfo partitionInfo) {
     this.partitionInfo = partitionInfo;
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ad5c8a0c/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
new file mode 100644
index 0000000..00c4df8
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.partition
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.metadata.CarbonMetadata
+import org.apache.carbondata.core.metadata.datatype.DataType
+import org.apache.carbondata.core.metadata.encoder.Encoding
+import org.apache.carbondata.core.metadata.schema.partition.PartitionType
+
+class TestDDLForPartitionTable  extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll = {
+    dropTable
+  }
+
+  test("create partition table: hash partition") {
+    sql(
+      """
+        | CREATE TABLE default.hashTable (empname String, designation String, doj Timestamp,
+        |  workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
+        |  projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance
int,
+        |  utilization int,salary int)
+        | PARTITIONED BY (empno int)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('PARTITION_TYPE'='HASH','NUM_PARTITIONS'='3')
+      """.stripMargin)
+
+    val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_hashTable")
+    val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
+    assert(partitionInfo != null)
+    assert(partitionInfo.getColumnSchemaList.get(0).getColumnName.equalsIgnoreCase("empno"))
+    assert(partitionInfo.getColumnSchemaList.get(0).getDataType == DataType.INT)
+    assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.size == 0)
+    assert(partitionInfo.getPartitionType ==  PartitionType.HASH)
+    assert(partitionInfo.getNumPartitions == 3)
+  }
+
+  test("create partition table: range partition") {
+    sql(
+      """
+        | CREATE TABLE default.rangeTable (empno int, empname String, designation String,
+        |  workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
+        |  projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance
int,
+        |  utilization int,salary int)
+        | PARTITIONED BY (doj Timestamp)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('PARTITION_TYPE'='RANGE',
+        |  'RANGE_INFO'='01-01-2010, 01-01-2015, 01-04-2015, 01-07-2015')
+      """.stripMargin)
+
+    val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_rangeTable")
+    val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
+    assert(partitionInfo != null)
+    assert(partitionInfo.getColumnSchemaList.get(0).getColumnName.equalsIgnoreCase("doj"))
+    assert(partitionInfo.getColumnSchemaList.get(0).getDataType == DataType.TIMESTAMP)
+    assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.size == 3)
+    assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(0) == Encoding.DICTIONARY)
+    assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(1) == Encoding.DIRECT_DICTIONARY)
+    assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(2) == Encoding.INVERTED_INDEX)
+    assert(partitionInfo.getPartitionType == PartitionType.RANGE)
+    assert(partitionInfo.getRangeInfo.size == 4)
+    assert(partitionInfo.getRangeInfo.get(0).equals("01-01-2010"))
+    assert(partitionInfo.getRangeInfo.get(1).equals("01-01-2015"))
+    assert(partitionInfo.getRangeInfo.get(2).equals("01-04-2015"))
+    assert(partitionInfo.getRangeInfo.get(3).equals("01-07-2015"))
+  }
+
+  test("create partition table: list partition") {
+    sql(
+      """
+        | CREATE TABLE default.listTable (empno int, empname String, designation String,
doj Timestamp,
+        |  workgroupcategoryname String, deptno int, deptname String,
+        |  projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance
int,
+        |  utilization int,salary int)
+        | PARTITIONED BY (workgroupcategory string)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('PARTITION_TYPE'='LIST',
+        |  'LIST_INFO'='0, 1, (2, 3)')
+      """.stripMargin)
+    val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_listTable")
+    val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
+    assert(partitionInfo != null)
+    assert(partitionInfo.getColumnSchemaList.get(0).getColumnName.equalsIgnoreCase("workgroupcategory"))
+    assert(partitionInfo.getColumnSchemaList.get(0).getDataType == DataType.STRING)
+    assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.size == 2)
+    assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(0) == Encoding.DICTIONARY)
+    assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(1) == Encoding.INVERTED_INDEX)
+    assert(partitionInfo.getPartitionType == PartitionType.LIST)
+    assert(partitionInfo.getListInfo.size == 3)
+    assert(partitionInfo.getListInfo.get(0).size == 1)
+    assert(partitionInfo.getListInfo.get(0).get(0).equals("0"))
+    assert(partitionInfo.getListInfo.get(1).size == 1)
+    assert(partitionInfo.getListInfo.get(1).get(0).equals("1"))
+    assert(partitionInfo.getListInfo.get(2).size == 2)
+    assert(partitionInfo.getListInfo.get(2).get(0).equals("2"))
+    assert(partitionInfo.getListInfo.get(2).get(1).equals("3"))
+  }
+
+  override def afterAll = {
+    dropTable
+  }
+
+  def dropTable = {
+    sql("drop table if exists hashTable")
+    sql("drop table if exists rangeTable")
+    sql("drop table if exists listTable")
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ad5c8a0c/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 6f50ecc..a3892af 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -506,6 +506,15 @@ class TableNewProcessor(cm: TableModel) {
       tableSchema.setBucketingInfo(
         new BucketingInfo(bucketCols.asJava, cm.bucketFields.get.numberOfBuckets))
     }
+    if (cm.partitionInfo.isDefined) {
+      val partitionInfo = cm.partitionInfo.get
+      val PartitionColumnSchema = partitionInfo.getColumnSchemaList.asScala
+      val partitionCols = allColumns.filter { column =>
+        PartitionColumnSchema.exists(_.getColumnName.equalsIgnoreCase(column.getColumnName))
+      }.asJava
+      partitionInfo.setColumnSchemaList(partitionCols)
+      tableSchema.setPartitionInfo(partitionInfo)
+    }
     tableSchema.setTableName(cm.tableName)
     tableSchema.setListOfColumns(allColumns.asJava)
     tableSchema.setSchemaEvalution(schemaEvol)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ad5c8a0c/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
index 85d54d0..99a20b4 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
@@ -31,6 +31,7 @@ import org.apache.spark.sql.execution.ExplainCommand
 import org.apache.spark.sql.execution.command._
 import org.apache.spark.sql.execution.datasources.DescribeCommand
 import org.apache.spark.sql.hive.HiveQlWrapper
+import org.apache.spark.sql.types.StructField
 
 import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
 import org.apache.carbondata.spark.util.CommonUtil
@@ -153,6 +154,7 @@ class CarbonSqlParser() extends CarbonDDLSqlParser {
         var fields: Seq[Field] = Seq[Field]()
         var tableComment: String = ""
         var tableProperties = Map[String, String]()
+        var partitionByFields: Seq[Field] = Seq[Field]()
         var partitionCols: Seq[PartitionerField] = Seq[PartitionerField]()
         var likeTableName: String = ""
         var storedBy: String = ""
@@ -239,8 +241,27 @@ class CarbonSqlParser() extends CarbonDDLSqlParser {
                   val columnName = col.getName()
                   val dataType = Option(col.getType)
                   val comment = col.getComment
+                  val rawSchema = '`' + col.getName + '`' + ' ' + col.getType
+                  val field = Field(columnName, dataType, Some(columnName), None)
+
+                  // the data type of the decimal type will be like decimal(10,0)
+                  // so checking the start of the string and taking the precision and scale.
+                  // resetting the data type with decimal
+                  if (field.dataType.getOrElse("").startsWith("decimal")) {
+                    val (precision, scale) = getScaleAndPrecision(col.getType)
+                    field.precision = precision
+                    field.scale = scale
+                    field.dataType = Some("decimal")
+                  }
+                  if (field.dataType.getOrElse("").startsWith("char")) {
+                    field.dataType = Some("char")
+                  } else if (field.dataType.getOrElse("").startsWith("float")) {
+                    field.dataType = Some("float")
+                  }
+                  field.rawSchema = rawSchema
                   val partitionCol = new PartitionerField(columnName, dataType, comment)
                   partitionCols ++= Seq(partitionCol)
+                  partitionByFields ++= Seq(field)
                 }
               }
             case Token("TOK_TABLEPROPERTIES", list :: Nil) =>
@@ -266,11 +287,26 @@ class CarbonSqlParser() extends CarbonDDLSqlParser {
             case _ => // Unsupport features
           }
 
-
           // validate tblProperties
           if (!CommonUtil.validateTblProperties(tableProperties, fields)) {
             throw new MalformedCarbonCommandException("Invalid table properties")
           }
+
+          if (partitionCols.nonEmpty) {
+            if (!CommonUtil.validatePartitionColumns(tableProperties, partitionCols)) {
+              throw new MalformedCarbonCommandException("Invalid table properties")
+            }
+            // partition columns should not be part of the schema
+            val colNames = fields.map(_.column)
+            val badPartCols = partitionCols.map(_.partitionColumn).toSet.intersect(colNames.toSet)
+            if (badPartCols.nonEmpty) {
+              throw new MalformedCarbonCommandException(
+                "Partition columns should not be specified in the schema: " +
+                badPartCols.map("\"" + _ + "\"").mkString("[", ",", "]"))
+            }
+            fields ++= partitionByFields
+          }
+
           // prepare table model of the collected tokens
           val tableModel: TableModel = prepareTableModel(ifNotExistPresent,
             dbName,

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ad5c8a0c/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
index a515f0b..b1edc31 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
@@ -97,9 +97,10 @@ class CarbonSqlAstBuilder(conf: SQLConf) extends SparkSqlAstBuilder(conf)
{
       if (ctx.bucketSpec != null) {
         operationNotAllowed("CREATE TABLE ... CLUSTERED BY", ctx)
       }
-      val partitionerFields = Option(ctx.partitionColumns).toSeq.flatMap(visitColTypeList)
-        .map( structField =>
-            PartitionerField(structField.name, Some(structField.dataType.toString), null))
+      val partitionByStructFields = Option(ctx.partitionColumns).toSeq.flatMap(visitColTypeList)
+      val partitionerFields = partitionByStructFields.map { structField =>
+        PartitionerField(structField.name, Some(structField.dataType.toString), null)
+      }
       val cols = Option(ctx.columns).toSeq.flatMap(visitColTypeList)
       val properties = Option(ctx.tablePropertyList).map(visitPropertyKeyValues)
         .getOrElse(Map.empty)
@@ -122,15 +123,15 @@ class CarbonSqlAstBuilder(conf: SQLConf) extends SparkSqlAstBuilder(conf)
{
         if (!CommonUtil.validatePartitionColumns(tableProperties, partitionerFields)) {
           throw new MalformedCarbonCommandException("Invalid partition definition")
         }
-        // partition columns must be part of the schema
+        // partition columns should not be part of the schema
         val badPartCols = partitionerFields.map(_.partitionColumn).toSet.intersect(colNames.toSet)
-        if (badPartCols.isEmpty) {
-          operationNotAllowed(s"Partition columns must be specified in the schema: " +
+        if (badPartCols.nonEmpty) {
+          operationNotAllowed(s"Partition columns should not be specified in the schema:
" +
                               badPartCols.map("\"" + _ + "\"").mkString("[", ",", "]"), ctx)
         }
       }
-
-      val fields = cols.map { col =>
+      val schema = cols ++ partitionByStructFields
+      val fields = schema.map { col =>
         val x = if (col.dataType.catalogString == "float") {
           '`' + col.name + '`' + " double"
         }


Mime
View raw message