carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jack...@apache.org
Subject [27/38] incubator-carbondata git commit: reuse test case for integration module
Date Sat, 07 Jan 2017 16:37:01 GMT
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
new file mode 100644
index 0000000..2f865fd
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.integration.spark.testsuite.dataload
+
+import java.math.BigDecimal
+
+import scala.collection.mutable.ArrayBuffer
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.carbon.path.{CarbonStorePath, CarbonTablePath}
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory
+
+class TestLoadDataGeneral extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    sql("DROP TABLE IF EXISTS loadtest")
+    sql(
+      """
+        | CREATE TABLE loadtest(id int, name string, city string, age int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+
+  }
+
+  private def checkSegmentExists(
+      segmentId: String,
+      datbaseName: String,
+      tableName: String): Boolean = {
+    val carbonTable = org.apache.carbondata.core.carbon.metadata.CarbonMetadata.getInstance()
+      .getCarbonTable(datbaseName + "_" + tableName)
+    val partitionPath = CarbonStorePath.getCarbonTablePath(storeLocation,
+      carbonTable.getCarbonTableIdentifier).getPartitionDir("0")
+    val fileType: FileFactory.FileType = FileFactory.getFileType(partitionPath)
+    val carbonFile = FileFactory.getCarbonFile(partitionPath, fileType)
+    val segments: ArrayBuffer[String] = ArrayBuffer()
+    carbonFile.listFiles.foreach { file =>
+      segments += CarbonTablePath.DataPathUtil.getSegmentId(file.getAbsolutePath + "/dummy")
+    }
+    segments.contains(segmentId)
+  }
+
+  test("test data loading CSV file") {
+    val testData = s"$resourcesPath/sample.csv"
+    sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest")
+    checkAnswer(
+      sql("SELECT COUNT(*) FROM loadtest"),
+      Seq(Row(4))
+    )
+  }
+
+  test("test data loading CSV file without extension name") {
+    val testData = s"$resourcesPath/sample"
+    sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest")
+    checkAnswer(
+      sql("SELECT COUNT(*) FROM loadtest"),
+      Seq(Row(8))
+    )
+  }
+
+  test("test data loading GZIP compressed CSV file") {
+    val testData = s"$resourcesPath/sample.csv.gz"
+    sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest")
+    checkAnswer(
+      sql("SELECT COUNT(*) FROM loadtest"),
+      Seq(Row(12))
+    )
+  }
+
+  test("test data loading BZIP2 compressed CSV file") {
+    val testData = s"$resourcesPath/sample.csv.bz2"
+    sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest")
+    checkAnswer(
+      sql("SELECT COUNT(*) FROM loadtest"),
+      Seq(Row(16))
+    )
+  }
+
+  test("test data loading CSV file with delimiter char \\017") {
+    val testData = s"$resourcesPath/sample_withDelimiter017.csv"
+    sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest options ('delimiter'='\\017')")
+    checkAnswer(
+      sql("SELECT COUNT(*) FROM loadtest"),
+      Seq(Row(20))
+    )
+  }
+
+  test("test data loading with invalid values for mesasures") {
+    val testData = s"$resourcesPath/invalidMeasures.csv"
+    sql("drop table if exists invalidMeasures")
+    sql("CREATE TABLE invalidMeasures (country String, salary double, age decimal(10,2))
STORED BY 'carbondata'")
+    sql(s"LOAD DATA LOCAL INPATH '$testData' into table invalidMeasures options('Fileheader'='country,salary,age')")
+    checkAnswer(
+      sql("SELECT * FROM invalidMeasures"),
+      Seq(Row("India",null,new BigDecimal("22.44")), Row("Russia",null,null), Row("USA",234.43,null))
+    )
+  }
+
+  test("test data loading into table whose name has '_'") {
+    sql("DROP TABLE IF EXISTS load_test")
+    sql(""" CREATE TABLE load_test(id int, name string, city string, age int)
+        STORED BY 'org.apache.carbondata.format' """)
+    val testData = s"$resourcesPath/sample.csv"
+    try {
+      sql(s"LOAD DATA LOCAL INPATH '$testData' into table load_test")
+      sql(s"LOAD DATA LOCAL INPATH '$testData' into table load_test")
+    } catch {
+      case ex: Exception =>
+        assert(false)
+    }
+    assert(checkSegmentExists("0", "default", "load_test"))
+    assert(checkSegmentExists("1", "default", "load_test"))
+    sql("DROP TABLE load_test")
+  }
+
+  override def afterAll {
+    sql("DROP TABLE if exists loadtest")
+    sql("drop table if exists invalidMeasures")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithAutoLoadMerge.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithAutoLoadMerge.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithAutoLoadMerge.scala
new file mode 100644
index 0000000..55cba24
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithAutoLoadMerge.scala
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.integration.spark.testsuite.dataload
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+/**
+  * Test class of creating and loading for carbon table with auto load merge
+  */
+class TestLoadDataWithAutoLoadMerge extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll: Unit = {
+    sql("DROP TABLE IF EXISTS automerge")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "true")
+    sql(
+      """
+         CREATE TABLE automerge(id int, name string, city string, age int)
+         STORED BY 'org.apache.carbondata.format'
+      """)
+  }
+
+  test("test data loading with auto load merge") {
+    val testData = s"$resourcesPath/sample.csv"
+    sql(s"LOAD DATA LOCAL INPATH '$testData' into table automerge")
+    checkAnswer(
+      sql("SELECT COUNT(*) FROM automerge"),
+      Seq(Row(4))
+    )
+  }
+
+  override def afterAll: Unit = {
+    sql("DROP TABLE IF EXISTS automerge")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "false")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithBlankLine.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithBlankLine.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithBlankLine.scala
new file mode 100644
index 0000000..88dbbe7
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithBlankLine.scala
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.integration.spark.testsuite.dataload
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+/**
+  * Test Class for data loading when there are blank lines in data
+  *
+  */
+class TestLoadDataWithBlankLine extends QueryTest with BeforeAndAfterAll {
+  override def beforeAll {
+    sql("drop table if exists carbontable")
+    sql("CREATE TABLE carbontable (empno int, empname String, designation String, " +
+      "doj String, workgroupcategory int, workgroupcategoryname String, deptno int, " +
+      "deptname String, projectcode int, projectjoindate String, projectenddate String, "
+
+      "attendance int,utilization int,salary int) " +
+        "STORED BY 'org.apache.carbondata.format'")
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/datawithblanklines.csv' INTO TABLE" +
+        " carbontable OPTIONS('DELIMITER'= ',')")
+
+    sql("drop table if exists carbontable2")
+    sql("CREATE TABLE carbontable2 (empno int, empname String, designation String, " +
+      "doj String, workgroupcategory int, workgroupcategoryname String, deptno int, " +
+      "deptname String, projectcode int, projectjoindate String, projectenddate String, "
+
+      "attendance int,utilization int,salary int) " +
+      "STORED BY 'org.apache.carbondata.format'")
+  }
+  test("test carbon table data loading when there are  blank lines in data") {
+    checkAnswer(sql("select count(*) from carbontable"),
+      Seq(Row(18)))
+  }
+
+  test("test carbon table data loading when the first line is blank") {
+    sql(s"LOAD DATA LOCAL INPATH '${resourcesPath}/dataWithNullFirstLine.csv' INTO TABLE
" +
+      "carbontable2 OPTIONS('DELIMITER'= ',','FILEHEADER'='empno,empname,designation,doj,workgroupcategory,workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate,attendance,utilization,salary')")
+
+    checkAnswer(sql("select count(*) from carbontable2"),
+      Seq(Row(11)))
+  }
+
+  override def afterAll {
+    sql("drop table if exists carbontable")
+    sql("drop table if exists carbontable2")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithEmptyArrayColumns.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithEmptyArrayColumns.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithEmptyArrayColumns.scala
new file mode 100644
index 0000000..ae00265
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithEmptyArrayColumns.scala
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.integration.spark.testsuite.dataload
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+/**
+ * Test Class for data loading when there are null measures in data
+ *
+ */
+class TestLoadDataWithEmptyArrayColumns extends QueryTest with BeforeAndAfterAll {
+  override def beforeAll {
+    sql("drop table if exists nest13")
+    sql("""
+           CREATE TABLE nest13 (imei string,age int,
+           productdate timestamp,gamePointId double,
+           reserved6 array<string>,mobile struct<poc:string, imsi:int>)
+           STORED BY 'org.apache.carbondata.format'
+        """)
+  }
+
+  test("test carbon table data loading when there are empty array columns in data") {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+        CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT
+      )
+    sql(
+      s"""
+            LOAD DATA inpath '$resourcesPath/arrayColumnEmpty.csv'
+            into table nest13 options ('DELIMITER'=',', 'complex_delimiter_level_1'='/')
+         """
+    )
+    checkAnswer(
+      sql("""
+             SELECT count(*) from nest13
+          """),
+      Seq(Row(20)))
+  }
+
+  override def afterAll {
+    sql("drop table nest13")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithJunkChars.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithJunkChars.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithJunkChars.scala
new file mode 100644
index 0000000..685e52d
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithJunkChars.scala
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.integration.spark.testsuite.dataload
+
+import java.io.{BufferedWriter, File, FileWriter}
+import java.util.Random
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+class TestLoadDataWithJunkChars extends QueryTest with BeforeAndAfterAll {
+  var filePath = ""
+  val junkchars = "ǍǎǏǐǑǒǓǔǕǖǗǘǙǚǛǜǝǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰ"
+
+  def buildTestData() = {
+    filePath = s"$integrationPath/spark-common-test/target/junkcharsdata.csv"
+    val file = new File(filePath)
+    val writer = new BufferedWriter(new FileWriter(file))
+    writer.write("c1,c2\n")
+    val random = new Random
+    for (i <- 1 until 1000000) {
+      writer.write("a" + i + "," + junkchars + "\n")
+      if ( i % 10000 == 0) {
+        writer.flush()
+      }
+    }
+    writer.write("a1000000," + junkchars)
+    writer.close
+  }
+
+  test("[bug]fix bug of duplicate rows in UnivocityCsvParser #877") {
+    buildTestData()
+    sql("drop table if exists junkcharsdata")
+    sql("""create table if not exists junkcharsdata
+             (c1 string, c2 string)
+             STORED BY 'org.apache.carbondata.format'""")
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table junkcharsdata").show
+    sql("select * from junkcharsdata").show(20,false)
+    checkAnswer(sql("select count(*) from junkcharsdata"), Seq(Row(1000000)))
+    sql("drop table if exists junkcharsdata")
+    new File(filePath).delete()
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithMaxMinInteger.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithMaxMinInteger.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithMaxMinInteger.scala
new file mode 100644
index 0000000..c24f451
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithMaxMinInteger.scala
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.integration.spark.testsuite.dataload
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+/**
+ * Test Class for data loading when there are min integer value in int column
+ *
+ */
+class TestLoadDataWithMaxMinInteger extends QueryTest with BeforeAndAfterAll {
+  override def beforeAll {
+    sql("drop table if exists integer_table_01")
+    sql("drop table if exists integer_table_02")
+    sql("drop table if exists integer_table_03")
+  }
+  test("test carbon table data loading when the int column " +
+    "contains min integer value") {
+    sql(
+      """
+        CREATE TABLE integer_table_01(imei string,age int)
+        STORED BY 'org.apache.carbondata.format'
+      """)
+    sql(
+      s"""
+        LOAD DATA INPATH '$resourcesPath/datawithmininteger.csv'
+        INTO table integer_table_01 options ('DELIMITER'=',',
+        'QUOTECHAR'='"')
+      """)
+    checkAnswer(sql("select age from integer_table_01"),
+      Seq(Row(10), Row(26), Row(10), Row(10), Row(20),
+        Row(10), Row(10), Row(10), Row(10), Row(10),
+        Row(-2147483648)))
+  }
+
+  test("test carbon table data loading when the int column " +
+    "contains max integer value") {
+    sql(
+      """
+        CREATE TABLE integer_table_02(imei string,age int)
+        STORED BY 'org.apache.carbondata.format'
+      """)
+    sql(
+      s"""
+        LOAD DATA INPATH '$resourcesPath/datawithmaxinteger.csv'
+        INTO table integer_table_02 options ('DELIMITER'=',',
+        'QUOTECHAR'='"')
+      """)
+    checkAnswer(sql("select age from integer_table_02"),
+      Seq(Row(10), Row(26), Row(10), Row(10), Row(20),
+        Row(10), Row(10), Row(10), Row(10), Row(10),
+        Row(2147483647)))
+  }
+
+  test("test carbon table data loading when the int column " +
+    "contains min and max integer value") {
+    sql(
+      """
+        CREATE TABLE integer_table_03(imei string,age int)
+        STORED BY 'org.apache.carbondata.format'
+      """)
+    sql(
+      s"""
+        LOAD DATA INPATH '$resourcesPath/datawithmaxmininteger.csv'
+        INTO table integer_table_03 options ('DELIMITER'=',',
+        'QUOTECHAR'='"')
+      """)
+    checkAnswer(sql("select age from integer_table_03"),
+      Seq(Row(10), Row(26), Row(10), Row(10), Row(20),
+        Row(10), Row(10), Row(10), Row(10), Row(10),
+        Row(-2147483648), Row(2147483647)))
+  }
+  override def afterAll {
+    sql("drop table if exists integer_table_01")
+    sql("drop table if exists integer_table_02")
+    sql("drop table if exists integer_table_03")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithNullMeasures.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithNullMeasures.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithNullMeasures.scala
new file mode 100644
index 0000000..ae9c199
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithNullMeasures.scala
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.integration.spark.testsuite.dataload
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+/**
+ * Test Class for data loading when there are null measures in data
+ *
+ */
+class TestLoadDataWithNullMeasures extends QueryTest with BeforeAndAfterAll {
+  override def beforeAll {
+    sql("DROP TABLE IF EXISTS carbontable")
+    sql(
+      "CREATE TABLE carbontable (empno int, empname String, designation String, doj String,
" +
+      "workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
" +
+      "projectcode int, projectjoindate String, projectenddate String,attendance int,utilization
" +
+      "int,salary int) STORED BY 'org.apache.carbondata.format'")
+  }
+
+  test("test carbon table data loading when there are null measures in data") {
+    try {
+      sql(
+        s"LOAD DATA LOCAL INPATH '$resourcesPath/datawithnullmsrs.csv' INTO TABLE " +
+        "carbontable OPTIONS('DELIMITER'= ',')");
+    } catch {
+      case e: Throwable => e.printStackTrace()
+    }
+  }
+
+  override def afterAll {
+    sql("drop table carbontable")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/emptyrow/TestCSVHavingOnlySpaceChar.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/emptyrow/TestCSVHavingOnlySpaceChar.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/emptyrow/TestCSVHavingOnlySpaceChar.scala
new file mode 100644
index 0000000..0ecc710
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/emptyrow/TestCSVHavingOnlySpaceChar.scala
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.integration.spark.testsuite.emptyrow
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+class TestCSVHavingOnlySpaceChar extends QueryTest with BeforeAndAfterAll {
+
+  var csvFilePath : String = null
+
+  override def beforeAll {
+    sql("drop table if exists emptyRowCarbonTable")
+    //eid,ename,sal,presal,comm,deptno,Desc
+    sql(
+      "create table if not exists emptyRowCarbonTable (eid int,ename String,sal decimal,presal
" +
+        "decimal,comm decimal" +
+        "(37,37),deptno decimal(18,2),Desc String) STORED BY 'org.apache.carbondata.format'"
+    )
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/mm/dd")
+     csvFilePath = s"$resourcesPath/emptyrow/csvwithonlyspacechar.csv"
+      }
+
+
+  test("dataload") {
+    try {
+      sql(
+        s"""LOAD DATA INPATH '$csvFilePath' INTO table emptyRowCarbonTable OPTIONS('DELIMITER'=',','QUOTECHAR'='"')""")
+    } catch {
+      case e: Throwable =>
+        System.out.println(e.getMessage)
+        assert(e.getMessage.contains("First line of the csv is not valid."))
+    }
+  }
+
+  override def afterAll {
+    sql("drop table emptyRowCarbonTable")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/emptyrow/TestEmptyRows.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/emptyrow/TestEmptyRows.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/emptyrow/TestEmptyRows.scala
new file mode 100644
index 0000000..4df8912
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/emptyrow/TestEmptyRows.scala
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.singlevaluerow
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+class TestEmptyRows extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    sql("drop table if exists emptyRowCarbonTable")
+    sql("drop table if exists emptyRowHiveTable")
+    //eid,ename,sal,presal,comm,deptno,Desc
+    sql(
+      "create table if not exists emptyRowCarbonTable (eid int,ename String,sal decimal,presal
" +
+        "decimal,comm decimal" +
+        "(37,37),deptno decimal(18,2),Desc String) STORED BY 'org.apache.carbondata.format'"
+    )
+    sql(
+      "create table if not exists emptyRowHiveTable(eid int,ename String,sal decimal,presal
" +
+        "decimal,comm " +
+        "decimal(37,37),deptno decimal(18,2),Desc String)row format delimited fields " +
+        "terminated by ','"
+    )
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+    val csvFilePath = s"$resourcesPath/emptyrow/emptyRows.csv"
+
+    sql(
+      s"""LOAD DATA INPATH '$csvFilePath' INTO table emptyRowCarbonTable OPTIONS('DELIMITER'=',','QUOTECHAR'='"','FILEHEADER'='eid,ename,sal,presal,comm,deptno,Desc')""")
+
+    sql(
+      "LOAD DATA LOCAL INPATH '" + csvFilePath + "' into table " +
+        "emptyRowHiveTable"
+    )
+  }
+
+  test("select eid from table") {
+    checkAnswer(
+      sql("select eid from emptyRowCarbonTable"),
+      sql("select eid from emptyRowHiveTable")
+    )
+  }
+
+  test("select Desc from emptyRowTable") {
+    checkAnswer(
+      sql("select Desc from emptyRowCarbonTable"),
+      sql("select Desc from emptyRowHiveTable")
+    )
+  }
+
+  override def afterAll {
+    sql("drop table emptyRowCarbonTable")
+    sql("drop table emptyRowHiveTable")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AggregateQueryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AggregateQueryTestCase.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AggregateQueryTestCase.scala
new file mode 100644
index 0000000..a73038b
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AggregateQueryTestCase.scala
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.spark.testsuite.aggquery
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+/**
+ * test cases for aggregate query
+ */
+class AggregateQueryTestCase extends QueryTest with BeforeAndAfterAll {
+  override def beforeAll {
+    sql("create table normal (column1 string,column2 string,column3 string,column4 string,column5
string,column6 string,column7 string,column8 string,column9 string,column10 string,measure1
int,measure2 int,measure3 int,measure4 int) STORED BY 'org.apache.carbondata.format'")
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/10dim_4msr.csv' INTO table normal options('FILEHEADER'='column1,column2,column3,column4,column5,column6,column7,column8,column9,column10,measure1,measure2,measure3,measure4')");
+  }
+
+  test("group by with having") {
+    checkAnswer(
+      sql("select column1,count(*) from normal group by column1 having count(*)>5"),
+      Seq(Row("column1119", 6)))
+  }
+
+  override def afterAll {
+    sql("drop table normal")
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AllDataTypesTestCaseAggregate.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AllDataTypesTestCaseAggregate.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AllDataTypesTestCaseAggregate.scala
new file mode 100644
index 0000000..65e736d
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AllDataTypesTestCaseAggregate.scala
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.aggquery
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+/**
+ * Test Class for aggregate query on multiple datatypes
+ *
+ */
+class AllDataTypesTestCaseAggregate extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
+    sql("DROP TABLE IF EXISTS alldatatypestableAGG")
+    sql("DROP TABLE IF EXISTS alldatatypescubeAGG_hive")
+    sql(
+      "CREATE TABLE alldatatypestableAGG (empno int, empname String, designation String,
doj " +
+      "Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname
" +
+      "String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance
" +
+      "int,utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
+    sql(
+      s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO TABLE alldatatypestableAGG
" +
+      "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
+    sql("DROP TABLE IF EXISTS alldatatypescubeAGG_hive")
+    sql(
+      "CREATE TABLE alldatatypescubeAGG_hive (empno int, empname String, designation String,
doj " +
+      "Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname
" +
+      "String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance
" +
+      "int,utilization int,salary int)row format delimited fields terminated by ','")
+    sql(
+      s"LOAD DATA LOCAL INPATH '$resourcesPath/datawithoutheader.csv' INTO TABLE alldatatypescubeAGG_hive")
+  }
+
+  test(
+    "select empno,empname,utilization,count(salary),sum(empno) from alldatatypestableAGG
where " +
+    "empname in ('arvind','ayushi') group by empno,empname,utilization")
+  {
+    checkAnswer(
+      sql(
+        "select empno,empname,utilization,count(salary),sum(empno) from alldatatypestableAGG
where" +
+        " empname in ('arvind','ayushi') group by empno,empname,utilization"),
+      sql(
+        "select empno,empname,utilization,count(salary),sum(empno) from alldatatypescubeAGG_hive
where" +
+        " empname in ('arvind','ayushi') group by empno,empname,utilization"))
+  }
+
+  test(
+    "select empname,trim(designation),avg(salary),avg(empno) from alldatatypestableAGG where
" +
+    "empname in ('arvind','ayushi') group by empname,trim(designation)")
+  {
+    checkAnswer(
+      sql(
+        "select empname,trim(designation),avg(salary),avg(empno) from alldatatypestableAGG
where " +
+        "empname in ('arvind','ayushi') group by empname,trim(designation)"),
+      sql(
+        "select empname,trim(designation),avg(salary),avg(empno) from alldatatypescubeAGG_hive
where " +
+        "empname in ('arvind','ayushi') group by empname,trim(designation)"))
+  }
+
+  test(
+    "select empname,length(designation),max(empno),min(empno), avg(empno) from " +
+    "alldatatypestableAGG where empname in ('arvind','ayushi') group by empname,length" +
+    "(designation) order by empname")
+  {
+    checkAnswer(
+      sql(
+        "select empname,length(designation),max(empno),min(empno), avg(empno) from " +
+        "alldatatypestableAGG where empname in ('arvind','ayushi') group by empname,length"
+
+        "(designation) order by empname"),
+      sql(
+        "select empname,length(designation),max(empno),min(empno), avg(empno) from " +
+        "alldatatypescubeAGG_hive where empname in ('arvind','ayushi') group by empname,length"
+
+        "(designation) order by empname"))
+  }
+
+  test("select count(empno), count(distinct(empno)) from alldatatypestableAGG")
+  {
+    checkAnswer(
+      sql("select count(empno), count(distinct(empno)) from alldatatypestableAGG"),
+      sql("select count(empno), count(distinct(empno)) from alldatatypescubeAGG_hive"))
+  }
+
+  override def afterAll {
+    sql("DROP TABLE IF EXISTS alldatatypestableAGG")
+    sql("DROP TABLE IF EXISTS alldatatypescubeAGG")
+    sql("DROP TABLE IF EXISTS alldatatypescubeAGG_hive")
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AverageQueryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AverageQueryTestCase.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AverageQueryTestCase.scala
new file mode 100644
index 0000000..0604189
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AverageQueryTestCase.scala
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.spark.testsuite.aggquery
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+/**
+ * test cases for aggregate query
+ */
+class AverageQueryTestCase extends QueryTest with BeforeAndAfterAll {
+  override def beforeAll {
+    sql("drop table if exists carbonTable")
+    sql("drop table if exists hiveTable")
+    sql("""
+       CREATE TABLE carbonTable (ID int, date timeStamp, country string, count int,
+       phonetype string, serialname string, salary double)
+       STORED BY 'org.apache.carbondata.format'
+        TBLPROPERTIES('DICTIONARY_INCLUDE'='ID')""")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/avgTest.csv' INTO table carbonTable""")
+
+    // create a hive table for compatible check
+    sql("""
+       CREATE TABLE hiveTable (ID int, date timeStamp, country string, count int,
+       phonetype string, serialname string, salary double)
+       row format delimited fields terminated by ','""")
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/avgTest.csv' INTO table hiveTable")
+  }
+
+  test("select avg(Measure_IntType)+IntType from carbonTable") {
+    checkAnswer(
+      sql("SELECT avg(count)+10 FROM carbonTable"),
+      sql("SELECT avg(count)+10 FROM hiveTable"))
+  }
+
+  test("select avg(Dimension_IntType)+IntType from table") {
+    checkAnswer(
+      sql("SELECT avg(ID)+10 FROM carbonTable"),
+      sql("SELECT avg(ID)+10 FROM hiveTable"))
+  }
+
+  test("select avg(TimeStamp)+IntType from table") {
+    checkAnswer(
+      sql("SELECT avg(date)+10 FROM carbonTable"),
+      sql("SELECT avg(date)+10 FROM hiveTable"))
+  }
+
+  test("select avg(TimeStamp) from table") {
+    checkAnswer(
+      sql("SELECT avg(date) FROM carbonTable"),
+      sql("SELECT avg(date) FROM hiveTable"))
+  }
+
+  test("select avg(StringType)+IntType from table") {
+    checkAnswer(
+      sql("SELECT avg(country)+10 FROM carbonTable"),
+      sql("SELECT avg(country)+10 FROM hiveTable"))
+  }
+
+  test("select max(StringType)+IntType from table") {
+    checkAnswer(
+      sql("SELECT max(country)+10 FROM carbonTable"),
+      sql("SELECT max(country)+10 FROM hiveTable"))
+  }
+
+  test("select min(StringType)+IntType from table") {
+    checkAnswer(
+      sql("SELECT min(country)+10 FROM carbonTable"),
+      sql("SELECT min(country)+10 FROM hiveTable"))
+  }
+
+  test("select sum(StringType)+IntType from table") {
+    checkAnswer(
+      sql("SELECT sum(country)+10 FROM carbonTable"),
+      sql("SELECT sum(country)+10 FROM hiveTable"))
+  }
+
+  test("select sum(distinct StringType)+IntType from table") {
+    checkAnswer(
+      sql("SELECT sum(distinct country)+10 FROM carbonTable"),
+      sql("SELECT sum(distinct country)+10 FROM hiveTable"))
+  }
+
+  override def afterAll {
+    sql("drop table carbonTable")
+    sql("drop table hiveTable")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
+  }
+
+}



Mime
View raw message