carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kumarvisha...@apache.org
Subject [1/2] carbondata git commit: handled review comments
Date Thu, 16 Nov 2017 15:12:32 GMT
Repository: carbondata
Updated Branches:
  refs/heads/master 17892b17b -> 808a334f0


handled review comments

add column comment during carbon create table and when table is described if comment is not
mentioned, default will be null
added test case when sort column is boolean column


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/9c9521b6
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/9c9521b6
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/9c9521b6

Branch: refs/heads/master
Commit: 9c9521b683fe19458d243c813dc622d30e06074d
Parents: 17892b1
Author: akashrn5 <akashnilugal@gmail.com>
Authored: Tue Oct 24 15:56:53 2017 +0530
Committer: kumarvishal <kumarvishal.1802@gmail.com>
Committed: Thu Nov 16 20:41:00 2017 +0530

----------------------------------------------------------------------
 .../TestCreateTableWithColumnComment.scala      | 54 ++++++++++++++++++++
 .../CarbonDescribeFormattedCommand.scala        | 19 ++++---
 .../sql/parser/CarbonSpark2SqlParser.scala      | 14 +++--
 .../BooleanDataTypesInsertTest.scala            | 40 +++++++++++++++
 4 files changed, 115 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/9c9521b6/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithColumnComment.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithColumnComment.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithColumnComment.scala
new file mode 100644
index 0000000..c291a6f
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithColumnComment.scala
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.createTable
+
+import org.apache.spark.sql.test.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+/**
+ * Test functionality of create table with column comment
+ */
+class TestCreateTableWithColumnComment extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    sql("use default")
+    sql("drop table if exists columnComment")
+    sql("drop table if exists defaultComment")
+  }
+
+  test("test create table with column comment") {
+    sql(
+      "create table columnComment(id int, name string comment \"This column is called name\")
" +
+      "stored by 'carbondata'")
+    checkExistence(sql("describe formatted columnComment"), true, "This column is called
name")
+  }
+
+  test("test create table with default column comment value") {
+    sql(
+      "create table defaultComment(id int, name string) " +
+      "stored by 'carbondata'")
+    checkExistence(sql("describe formatted defaultComment"), true, "null")
+  }
+
+  override def afterAll {
+    sql("use default")
+    sql("drop table if exists columnComment")
+    sql("drop table if exists defaultComment")
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9c9521b6/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonDescribeFormattedCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonDescribeFormattedCommand.scala
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonDescribeFormattedCommand.scala
index 519fbea..7dcad9a 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonDescribeFormattedCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonDescribeFormattedCommand.scala
@@ -65,6 +65,7 @@ private[sql] case class CarbonDescribeFormattedCommand(
     val dims = relation.metaData.dims.map(x => x.toLowerCase)
     var results: Seq[(String, String, String)] = child.schema.fields.map { field =>
       val fieldName = field.name.toLowerCase
+      val colComment = field.getComment().getOrElse("null")
       val comment = if (dims.contains(fieldName)) {
         val dimension = relation.metaData.carbonTable.getDimensionByName(
           relation.tableMeta.carbonTableIdentifier.getTableName, fieldName)
@@ -76,20 +77,21 @@ private[sql] case class CarbonDescribeFormattedCommand(
         if (dimension.hasEncoding(Encoding.DICTIONARY) &&
             !dimension.hasEncoding(Encoding.DIRECT_DICTIONARY)) {
           "DICTIONARY, KEY COLUMN" + (if (dimension.hasEncoding(Encoding.INVERTED_INDEX))
{
-            ""
+            "".concat(",").concat(colComment)
           } else {
-            ",NOINVERTEDINDEX"
+            ",NOINVERTEDINDEX".concat(",").concat(colComment)
           })
         } else {
           "KEY COLUMN" + (if (dimension.hasEncoding(Encoding.INVERTED_INDEX)) {
-            ""
+            "".concat(",").concat(colComment)
           } else {
-            ",NOINVERTEDINDEX"
+            ",NOINVERTEDINDEX".concat(",").concat(colComment)
           })
         }
       } else {
-        "MEASURE"
+        "MEASURE".concat(",").concat(colComment)
       }
+
       (field.name, field.dataType.simpleString, comment)
     }
     val colPropStr = if (colProps.toString().trim().length() > 0) {
@@ -130,8 +132,11 @@ private[sql] case class CarbonDescribeFormattedCommand(
       Seq(("Partition Columns: ", carbonTable.getPartitionInfo(carbonTable.getFactTableName)
         .getColumnSchemaList.asScala.map(_.getColumnName).mkString(","), ""))
     }
-    results.map { case (name, dataType, comment) =>
-      Row(f"$name%-36s", f"$dataType%-80s", f"$comment%-72s")
+    results.map {
+      case (name, dataType, null) =>
+        Row(f"$name%-36s", f"$dataType%-80s", null)
+      case (name, dataType, comment) =>
+        Row(f"$name%-36s", f"$dataType%-80s", f"$comment%-72s")
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9c9521b6/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
index 6df5a04..7a637f7 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
@@ -465,12 +465,16 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
 
   def getFields(schema: Seq[StructField]): Seq[Field] = {
     schema.map { col =>
-      val x = if (col.dataType.catalogString == "float") {
-        '`' + col.name + '`' + " double"
-      }
-      else {
-        '`' + col.name + '`' + ' ' + col.dataType.catalogString
+      var columnComment: String = ""
+      if (col.getComment().isDefined) {
+        columnComment = " comment \"" + col.getComment().get + "\""
       }
+      val x =
+        if (col.dataType.catalogString == "float") {
+          '`' + col.name + '`' + " double" + columnComment
+        } else {
+          '`' + col.name + '`' + ' ' + col.dataType.catalogString + columnComment
+        }
       val f: Field = anyFieldDef(new lexical.Scanner(x.toLowerCase))
       match {
         case Success(field, _) => field.asInstanceOf[Field]

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9c9521b6/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesInsertTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesInsertTest.scala
b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesInsertTest.scala
index 2f06900..c6a6708 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesInsertTest.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesInsertTest.scala
@@ -22,6 +22,9 @@ import org.apache.spark.sql.Row
 import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
 
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
 class BooleanDataTypesInsertTest extends QueryTest with BeforeAndAfterEach with BeforeAndAfterAll
{
 
   override def beforeEach(): Unit = {
@@ -945,4 +948,41 @@ class BooleanDataTypesInsertTest extends QueryTest with BeforeAndAfterEach
with
     )
   }
 
+  test("Inserting table with bad records, and SORT_COLUMNS is boolean column") {
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
"true")
+    sql("DROP TABLE IF EXISTS carbon_table")
+    sql(
+      s"""
+         | CREATE TABLE if not exists carbon_table(
+         | cc BOOLEAN
+         | )
+         | STORED BY 'carbondata'
+         | TBLPROPERTIES('SORT_COLUMNS'='cc')
+       """.stripMargin)
+    sql("insert into carbon_table values(true)")
+    sql("insert into carbon_table values(True)")
+    sql("insert into carbon_table values(TRUE)")
+    sql("insert into carbon_table values('true')")
+    sql("insert into carbon_table values(False)")
+    sql("insert into carbon_table values(false)")
+    sql("insert into carbon_table values(FALSE)")
+    sql("insert into carbon_table values('false')")
+    sql("insert into carbon_table values('tr')")
+    sql("insert into carbon_table values(null)")
+    sql("insert into carbon_table values('truEe')")
+    sql("insert into carbon_table values('falSee')")
+    sql("insert into carbon_table values('t')")
+    sql("insert into carbon_table values('f')")
+    checkAnswer(
+      sql("select * from carbon_table"),
+      Seq(
+        Row(true), Row(true), Row(true), Row(true),
+        Row(false), Row(false), Row(false), Row(false),
+        Row(null), Row(null), Row(null), Row(null), Row(null), Row(null)))
+
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
+        CarbonCommonConstants.DEFAULT_ENABLE_AUTO_LOAD_MERGE)
+  }
+
 }


Mime
View raw message