carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From qiang...@apache.org
Subject [1/2] carbondata git commit: in List partition all records loaded in default partition.
Date Wed, 14 Jun 2017 09:52:09 GMT
Repository: carbondata
Updated Branches:
  refs/heads/master 9eaaac571 -> 4fb38bfb3


in List partition all records loaded in default partition.


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/fda02f98
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/fda02f98
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/fda02f98

Branch: refs/heads/master
Commit: fda02f98692bba0dee98b4be0b5c7d4723fce3b3
Parents: 9eaaac5
Author: BJangir <babulaljangir111@gmail.com>
Authored: Fri Jun 9 23:03:08 2017 +0530
Committer: QiangCai <qiangcai@qq.com>
Committed: Wed Jun 14 17:45:27 2017 +0800

----------------------------------------------------------------------
 .../TestDataLoadingForPartitionTable.scala      | 22 ++++++++++++++++++++
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala | 20 +++++++++++++++++-
 .../spark/sql/parser/CarbonSparkSqlParser.scala | 15 ++++++++++---
 3 files changed, 53 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/fda02f98/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDataLoadingForPartitionTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDataLoadingForPartitionTable.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDataLoadingForPartitionTable.scala
index c0ee4f2..8a35558 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDataLoadingForPartitionTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDataLoadingForPartitionTable.scala
@@ -260,6 +260,27 @@ class TestDataLoadingForPartitionTable extends QueryTest with BeforeAndAfterAll
       sql("select empno, empname, designation, doj, workgroupcategory, workgroupcategoryname,
deptno, deptname, projectcode, projectjoindate, projectenddate, attendance, utilization, salary
from originMultiLoads order by empno"))
   }
 
+  test("list partition with string coloum and  list_info in upper case") {
+    sql(
+      """
+        | CREATE TABLE listTableUpper (empno int, empname String, doj Timestamp,
+        |  workgroupcategoryname String, deptno int, deptname String,
+        |  projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance
int,
+        |  utilization int,salary int)
+        | PARTITIONED BY (designation string)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('PARTITION_TYPE'='LIST',
+        |  'LIST_INFO'='SE,SSE')
+      """.stripMargin)
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE listTableUpper OPTIONS('DELIMITER'=
',', 'QUOTECHAR'= '"')""")
+
+    validateDataFiles("default_listTableUpper", "0", Seq(0,1,2))
+
+    checkAnswer(sql("select empno, empname, designation, doj, workgroupcategoryname, deptno,
deptname, projectcode, projectjoindate, projectenddate, attendance, utilization, salary from
listTableUpper order by empno"),
+      sql("select empno, empname, designation, doj, workgroupcategoryname, deptno, deptname,
projectcode, projectjoindate, projectenddate, attendance, utilization, salary from originTable
order by empno"))
+  }
+
+
   override def afterAll = {
     dropTable
     if (defaultTimestampFormat == null) {
@@ -284,6 +305,7 @@ class TestDataLoadingForPartitionTable extends QueryTest with BeforeAndAfterAll
     sql("drop table if exists multiLoads")
     sql("drop table if exists multiInserts")
     sql("drop table if exists loadAndInsert")
+    sql("drop table if exists listTableUpper")
   }
 
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fda02f98/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index e219f08..5e37f63 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -789,7 +789,12 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
       case Token("TOK_TABLEPROPLIST", list) =>
         list.map {
           case Token("TOK_TABLEPROPERTY", Token(key, Nil) :: Token(value, Nil) :: Nil) =>
-            unquoteString(key) -> unquoteString(value)
+            val reslovedKey = unquoteString(key)
+            if (needToConvertToLowerCase(reslovedKey)) {
+              (reslovedKey, unquoteString(value))
+            } else {
+              (reslovedKey, unquoteStringWithoutLowerConversion(value))
+            }
         }
     }
   }
@@ -802,6 +807,19 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
     }
   }
 
+  protected def unquoteStringWithoutLowerConversion(str: String) = {
+    str match {
+      case singleQuotedString(s) => s
+      case doubleQuotedString(s) => s
+      case other => other
+    }
+  }
+
+  private def needToConvertToLowerCase(key: String): Boolean = {
+    val noConvertList = Array("LIST_INFO", "RANGE_INFO")
+    !noConvertList.exists(x => x.equalsIgnoreCase(key))
+  }
+
   protected def validateOptions(optionList: Option[List[(String, String)]]): Unit = {
 
     // validate with all supported options

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fda02f98/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
index 076af46..258920b 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
@@ -115,7 +115,7 @@ class CarbonSqlAstBuilder(conf: SQLConf) extends SparkSqlAstBuilder(conf)
{
       }
 
       val tableProperties = mutable.Map[String, String]()
-      properties.foreach{property => tableProperties.put(property._1, property._2.toLowerCase)}
+      properties.foreach{property => tableProperties.put(property._1, property._2)}
 
       // validate partition clause
       if (partitionerFields.nonEmpty) {
@@ -170,12 +170,21 @@ class CarbonSqlAstBuilder(conf: SQLConf) extends SparkSqlAstBuilder(conf)
{
     val badKeys = props.filter { case (_, v) => v == null }.keys
     if (badKeys.nonEmpty) {
       operationNotAllowed(
-        s"Values must be specified for key(s): ${ badKeys.mkString("[", ",", "]") }", ctx)
+        s"Values must be specified for key(s): ${badKeys.mkString("[", ",", "]")}", ctx)
     }
     props.map { case (key, value) =>
-      (key.toLowerCase, value.toLowerCase)
+      if (needToConvertToLowerCase(key)) {
+        (key.toLowerCase, value.toLowerCase)
+      } else {
+        (key.toLowerCase, value)
+      }
     }
   }
 
+  private def needToConvertToLowerCase(key: String): Boolean = {
+    val noConvertList = Array("LIST_INFO", "RANGE_INFO")
+    !noConvertList.exists(x => x.equalsIgnoreCase(key));
+  }
+
 
 }


Mime
View raw message