carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jack...@apache.org
Subject [24/38] incubator-carbondata git commit: reuse test case for integration module
Date Sat, 07 Jan 2017 16:36:58 GMT
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadWithColumnsMoreThanSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadWithColumnsMoreThanSchema.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadWithColumnsMoreThanSchema.scala
new file mode 100644
index 0000000..7396594
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadWithColumnsMoreThanSchema.scala
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
+
+/**
+ * This class will test data load in which number of columns in data are more than
+ * the number of columns in schema
+ */
+class TestDataLoadWithColumnsMoreThanSchema extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    sql("DROP TABLE IF EXISTS char_test")
+    sql("DROP TABLE IF EXISTS hive_char_test")
+    sql("CREATE TABLE char_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
+    sql("CREATE TABLE hive_char_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)row format delimited fields terminated by ','")
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/character_carbon.csv' into table char_test")
+    sql(s"LOAD DATA local inpath '$resourcesPath/character_hive.csv' INTO table hive_char_test")
+  }
+
+  test("test count(*) to check for data loss") {
+    checkAnswer(sql("select count(*) from char_test"),
+      sql("select count(*) from hive_char_test"))
+  }
+
+  test("test for invalid value of maxColumns") {
+    sql("DROP TABLE IF EXISTS max_columns_test")
+    sql("CREATE TABLE max_columns_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
+    try {
+      sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/character_carbon.csv' into table max_columns_test options('MAXCOLUMNS'='avfgd')")
+      assert(false)
+    } catch {
+      case _: Throwable => assert(true)
+    }
+  }
+
+  test("test for valid value of maxColumns") {
+    sql("DROP TABLE IF EXISTS valid_max_columns_test")
+    sql("CREATE TABLE valid_max_columns_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
+    try {
+      sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/character_carbon.csv' into table valid_max_columns_test options('MAXCOLUMNS'='400')")
+      checkAnswer(sql("select count(*) from valid_max_columns_test"),
+        sql("select count(*) from hive_char_test"))
+    } catch {
+      case _: Throwable => assert(false)
+    }
+  }
+
+  test("test with invalid maxColumns value") {
+    sql(
+      "CREATE TABLE max_columns_value_test (imei string,age int,task bigint,num double,level " +
+      "decimal(10,3),productdate timestamp,mark int,name string) STORED BY 'org.apache.carbondata" +
+      ".format'")
+    try {
+      sql(
+        s"LOAD DATA LOCAL INPATH '$resourcesPath/character_carbon.csv' into table " +
+        "max_columns_value_test options('FILEHEADER='imei,age','MAXCOLUMNS'='2')")
+      throw new MalformedCarbonCommandException("Invalid")
+    } catch {
+      case me: MalformedCarbonCommandException =>
+        assert(false)
+      case _: Throwable => assert(true)
+    }
+  }
+
+  test("test for maxcolumns option value greater than threshold value for maxcolumns") {
+    sql("DROP TABLE IF EXISTS valid_max_columns_test")
+    sql("CREATE TABLE valid_max_columns_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
+    try {
+      sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/character_carbon.csv' into table valid_max_columns_test options('MAXCOLUMNS'='22000')")
+      checkAnswer(sql("select count(*) from valid_max_columns_test"),
+        sql("select count(*) from hive_char_test"))
+    } catch {
+      case _: Throwable => assert(false)
+    }
+  }
+
+  test("test for boundary value for maxcolumns") {
+    sql("DROP TABLE IF EXISTS boundary_max_columns_test")
+    sql("CREATE TABLE boundary_max_columns_test (empno string, empname String, designation String, doj String, " +
+        "workgroupcategory string, workgroupcategoryname String, deptno string, deptname String, " +
+        "projectcode string, projectjoindate String, projectenddate String,attendance double," +
+        "utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES" +
+        "('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory," +
+        "workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')")
+    try {
+      sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' into table boundary_max_columns_test options('MAXCOLUMNS'='14')")
+      assert(true)
+    } catch {
+      case _: Throwable => assert(false)
+    }
+  }
+
+  test("test for maxcolumns value less than columns in 1st line of csv file") {
+    sql("DROP TABLE IF EXISTS boundary_max_columns_test")
+    sql("CREATE TABLE boundary_max_columns_test (empno string, empname String, designation String, doj String, " +
+        "workgroupcategory string, workgroupcategoryname String, deptno string, deptname String, " +
+        "projectcode string, projectjoindate String, projectenddate String,attendance double," +
+        "utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES" +
+        "('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory," +
+        "workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')")
+    try {
+      sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' into table boundary_max_columns_test options('MAXCOLUMNS'='13')")
+      assert(true)
+    } catch {
+      case _: Throwable => assert(false)
+    }
+  }
+
+  override def afterAll {
+    sql("DROP TABLE IF EXISTS char_test")
+    sql("DROP TABLE IF EXISTS hive_char_test")
+    sql("DROP TABLE IF EXISTS max_columns_value_test")
+    sql("DROP TABLE IF EXISTS boundary_max_columns_test")
+    sql("DROP TABLE IF EXISTS valid_max_columns_test")
+    sql("DROP TABLE IF EXISTS max_columns_test")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataWithDicExcludeAndInclude.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataWithDicExcludeAndInclude.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataWithDicExcludeAndInclude.scala
new file mode 100644
index 0000000..8588b8f
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataWithDicExcludeAndInclude.scala
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+class TestLoadDataWithDictionaryExcludeAndInclude extends QueryTest with BeforeAndAfterAll {
+  var filePath: String = _
+  var pwd: String = _
+
+  def buildTestData() = {
+    filePath = s"$resourcesPath/emptyDimensionData.csv"
+  }
+
+  def dropTable() = {
+    sql("DROP TABLE IF EXISTS exclude_include_t3")
+    sql("DROP TABLE IF EXISTS exclude_include_hive_t3")
+  }
+
+  def buildTable() = {
+    try {
+      sql(
+        """
+           CREATE TABLE exclude_include_hive_t3
+           (ID Int, date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int)
+           row format delimited fields terminated by ','
+        """)
+      sql(
+        """
+           CREATE TABLE exclude_include_t3
+           (ID Int, date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int)
+           STORED BY 'org.apache.carbondata.format'
+           TBLPROPERTIES('DICTIONARY_EXCLUDE'='country,phonetype,serialname',
+           'DICTIONARY_INCLUDE'='ID')
+        """)
+    } catch {
+      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
+    }
+  }
+
+  def loadTable() = {
+    try {
+      CarbonProperties.getInstance()
+        .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+      sql(
+        s"""
+           LOAD DATA LOCAL INPATH '$filePath' into table exclude_include_t3
+           """)
+      sql(
+        s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/emptyDimensionDataHive.csv' into table exclude_include_hive_t3
+           """)
+    } catch {
+      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
+    }
+  }
+
+  override def beforeAll {
+    dropTable
+    buildTestData
+    buildTable
+    loadTable
+  }
+
+  test("test load data with dictionary exclude & include and with empty dimension") {
+    checkAnswer(
+      sql("select ID from exclude_include_t3"), sql("select ID from exclude_include_hive_t3")
+    )
+  }
+
+  override def afterAll {
+    dropTable
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
new file mode 100644
index 0000000..3250768
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.apache.spark.sql.{DataFrame, Row, SaveMode}
+import org.scalatest.BeforeAndAfterAll
+
+class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
+  var df: DataFrame = _
+
+  def buildTestData() = {
+    import sqlContext.implicits._
+    df = sqlContext.sparkContext.parallelize(1 to 1000)
+      .map(x => ("a", "b", x))
+      .toDF("c1", "c2", "c3")
+  }
+
+  def dropTable() = {
+    sql("DROP TABLE IF EXISTS carbon1")
+    sql("DROP TABLE IF EXISTS carbon2")
+    sql("DROP TABLE IF EXISTS carbon3")
+  }
+
+
+
+  override def beforeAll {
+    dropTable
+    buildTestData
+  }
+
+  test("test load dataframe with saving compressed csv files") {
+    // save dataframe to carbon file
+    df.write
+      .format("carbondata")
+      .option("tableName", "carbon1")
+      .option("tempCSV", "true")
+      .option("compress", "true")
+      .mode(SaveMode.Overwrite)
+      .save()
+    checkAnswer(
+      sql("select count(*) from carbon1 where c3 > 500"), Row(500)
+    )
+  }
+
+  test("test load dataframe with saving csv uncompressed files") {
+    // save dataframe to carbon file
+    df.write
+      .format("carbondata")
+      .option("tableName", "carbon2")
+      .option("tempCSV", "true")
+      .option("compress", "false")
+      .mode(SaveMode.Overwrite)
+      .save()
+    checkAnswer(
+      sql("select count(*) from carbon2 where c3 > 500"), Row(500)
+    )
+  }
+
+  test("test load dataframe without saving csv files") {
+    // save dataframe to carbon file
+    df.write
+      .format("carbondata")
+      .option("tableName", "carbon3")
+      .option("tempCSV", "false")
+      .mode(SaveMode.Overwrite)
+      .save()
+    checkAnswer(
+      sql("select count(*) from carbon3 where c3 > 500"), Row(500)
+    )
+  }
+
+  override def afterAll {
+    dropTable
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala
new file mode 100644
index 0000000..8162018
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.processing.etl.DataLoadingException
+
+class TestLoadDataUseAllDictionary extends QueryTest with BeforeAndAfterAll{
+  override def beforeAll {
+    sql("DROP TABLE IF EXISTS t3")
+    sql("""
+           CREATE TABLE IF NOT EXISTS t3
+           (ID Int, date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int)
+           STORED BY 'carbondata'
+           """)
+  }
+
+  test("test load data use all dictionary, and given wrong format dictionary values") {
+    try {
+      sql(s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/windows.csv' into table t3
+           options('FILEHEADER'='id,date,country,name,phonetype,serialname,salary',
+           'All_DICTIONARY_PATH'='$resourcesPath/dict.txt')
+           """)
+      assert(false)
+    } catch {
+      case e: DataLoadingException =>
+        assert(e.getMessage.equals("Data Loading failure, dictionary values are " +
+          "not in correct format!"))
+    }
+  }
+
+  override def afterAll {
+    sql("DROP TABLE IF EXISTS t3")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithDiffTimestampFormat.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithDiffTimestampFormat.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithDiffTimestampFormat.scala
new file mode 100644
index 0000000..35cbe76
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithDiffTimestampFormat.scala
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import java.sql.Timestamp
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
+
+class TestLoadDataWithDiffTimestampFormat extends QueryTest with BeforeAndAfterAll {
+  override def beforeAll {
+    sql("DROP TABLE IF EXISTS t3")
+    sql("""
+           CREATE TABLE IF NOT EXISTS t3
+           (ID Int, date Timestamp, starttime Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int)
+           STORED BY 'carbondata'
+        """)
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+  }
+
+  test("test load data with different timestamp format") {
+      sql(s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/timeStampFormatData1.csv' into table t3
+           OPTIONS('dateformat' = 'starttime:yyyy-MM-dd HH:mm:ss')
+           """)
+      sql(s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/timeStampFormatData2.csv' into table t3
+           OPTIONS('dateformat' = ' date : yyyy-MM-dd , StartTime : yyyy/MM/dd HH:mm:ss')
+           """)
+      checkAnswer(
+        sql("SELECT date FROM t3 WHERE ID = 1"),
+        Seq(Row(Timestamp.valueOf("2015-07-23 00:00:00.0")))
+      )
+      checkAnswer(
+        sql("SELECT starttime FROM t3 WHERE ID = 1"),
+        Seq(Row(Timestamp.valueOf("2016-07-23 01:01:30.0")))
+      )
+      checkAnswer(
+        sql("SELECT date FROM t3 WHERE ID = 18"),
+        Seq(Row(Timestamp.valueOf("2015-07-25 00:00:00.0")))
+      )
+      checkAnswer(
+        sql("SELECT starttime FROM t3 WHERE ID = 18"),
+        Seq(Row(Timestamp.valueOf("2016-07-25 02:32:02.0")))
+      )
+  }
+
+  test("test load data with different timestamp format with wrong setting") {
+    try {
+      sql(s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/timeStampFormatData1.csv' into table t3
+           OPTIONS('dateformat' = '')
+           """)
+      assert(false)
+    } catch {
+      case ex: MalformedCarbonCommandException =>
+        assertResult(ex.getMessage)("Error: Option DateFormat is set an empty string.")
+      case _: Throwable=> assert(false)
+    }
+
+    try {
+      sql(s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/timeStampFormatData1.csv' into table t3
+           OPTIONS('dateformat' = 'fasfdas:yyyy/MM/dd')
+           """)
+      assert(false)
+    } catch {
+      case ex: MalformedCarbonCommandException =>
+        assertResult(ex.getMessage)("Error: Wrong Column Name fasfdas is provided in Option DateFormat.")
+      case _: Throwable => assert(false)
+    }
+
+    try {
+      sql(s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/timeStampFormatData1.csv' into table t3
+           OPTIONS('dateformat' = 'date:  ')
+           """)
+      assert(false)
+    } catch {
+      case ex: MalformedCarbonCommandException =>
+        assertResult(ex.getMessage)("Error: Option DateFormat is not provided for Column date.")
+      case _: Throwable => assert(false)
+    }
+
+    try {
+      sql(s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/timeStampFormatData1.csv' into table t3
+           OPTIONS('dateformat' = 'date  ')
+           """)
+      assert(false)
+    } catch {
+      case ex: MalformedCarbonCommandException =>
+        assertResult(ex.getMessage)("Error: Option DateFormat is not provided for Column date  .")
+      case _: Throwable => assert(false)
+    }
+
+    try {
+      sql(s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/timeStampFormatData1.csv' into table t3
+           OPTIONS('dateformat' = ':yyyy/MM/dd  ')
+           """)
+      assert(false)
+    } catch {
+      case ex: MalformedCarbonCommandException =>
+        assertResult(ex.getMessage)("Error: Wrong Column Name  is provided in Option DateFormat.")
+      case _: Throwable => assert(false)
+    }
+
+  }
+
+  override def afterAll {
+    sql("DROP TABLE IF EXISTS t3")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithFileHeaderException.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithFileHeaderException.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithFileHeaderException.scala
new file mode 100644
index 0000000..7717112
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithFileHeaderException.scala
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+class TestLoadDataWithFileHeaderException extends QueryTest with BeforeAndAfterAll{
+  override def beforeAll {
+    sql("DROP TABLE IF EXISTS t3")
+    sql("""
+           CREATE TABLE IF NOT EXISTS t3
+           (ID Int, date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int)
+           STORED BY 'carbondata'
+           """)
+  }
+
+  test("test load data both file and ddl without file header exception") {
+    try {
+      sql(s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/windows.csv' into table t3
+           """)
+      assert(false)
+    } catch {
+      case e: Exception =>
+        assert(e.getMessage.equals("DataLoad failure: CSV File provided is not proper. " +
+          "Column names in schema and csv header are not same. CSVFile Name : windows.csv"))
+    }
+  }
+
+  test("test load data ddl provided  wrong file header exception") {
+    try {
+      sql(s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/windows.csv' into table t3
+           options('fileheader'='no_column')
+           """)
+      assert(false)
+    } catch {
+      case e: Exception =>
+        assert(e.getMessage.equals("DataLoad failure: CSV header provided in DDL is not proper. " +
+          "Column names in schema and CSV header are not the same."))
+    }
+  }
+
+  override def afterAll {
+    sql("DROP TABLE IF EXISTS t3")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithHiveSyntax.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithHiveSyntax.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithHiveSyntax.scala
new file mode 100644
index 0000000..f2dabe7
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithHiveSyntax.scala
@@ -0,0 +1,692 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import java.io.File
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+/**
+  * Test Class for data loading with hive syntax and old syntax
+  *
+  */
+class TestLoadDataWithHiveSyntax extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    sql("drop table if exists escapechar1")
+    sql("drop table if exists escapechar2")
+    sql("drop table if exists escapechar3")
+    sql("drop table if exists specialcharacter1")
+    sql("drop table if exists specialcharacter2")
+    sql("drop table if exists collessthanschema")
+    sql("drop table if exists decimalarray")
+    sql("drop table if exists decimalstruct")
+    sql("drop table if exists carbontable")
+    sql("drop table if exists hivetable")
+    sql("drop table if exists testtable")
+    sql("drop table if exists testhivetable")
+    sql("drop table if exists testtable1")
+    sql("drop table if exists testhivetable1")
+    sql("drop table if exists complexcarbontable")
+    sql("drop table if exists complex_t3")
+    sql("drop table if exists complex_hive_t3")
+    sql("drop table if exists header_test")
+    sql("drop table if exists duplicateColTest")
+    sql("drop table if exists mixed_header_test")
+    sql("drop table if exists primitivecarbontable")
+    sql("drop table if exists UPPERCASEcube")
+    sql("drop table if exists lowercaseCUBE")
+    sql("drop table if exists carbontable1")
+    sql("drop table if exists hivetable1")
+    sql("drop table if exists comment_test")
+    sql("drop table if exists smallinttable")
+    sql("drop table if exists smallinthivetable")
+    sql(
+      "CREATE table carbontable (empno int, empname String, designation String, doj String, " +
+        "workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, " +
+        "projectcode int, projectjoindate String, projectenddate String, attendance int," +
+        "utilization int,salary int) STORED BY 'org.apache.carbondata.format'"
+    )
+    sql(
+      "create table hivetable(empno int, empname String, designation string, doj String, " +
+        "workgroupcategory int, workgroupcategoryname String,deptno int, deptname String, " +
+        "projectcode int, projectjoindate String,projectenddate String, attendance String," +
+        "utilization String,salary String)row format delimited fields terminated by ','"
+    )
+
+  }
+
+  test("create table with smallint type and query smallint table") {
+    sql(
+      "create table smallinttable(empno smallint, empname String, designation string, " +
+        "doj String, workgroupcategory int, workgroupcategoryname String,deptno int, " +
+        "deptname String, projectcode int, projectjoindate String,projectenddate String, " +
+        "attendance String, utilization String,salary String)" +
+        "STORED BY 'org.apache.carbondata.format'"
+    )
+
+    sql(
+      "create table smallinthivetable(empno smallint, empname String, designation string, " +
+        "doj String, workgroupcategory int, workgroupcategoryname String,deptno int, " +
+        "deptname String, projectcode int, projectjoindate String,projectenddate String, " +
+        "attendance String, utilization String,salary String)" +
+        "row format delimited fields terminated by ','"
+    )
+
+    sql(s"LOAD DATA local inpath '$resourcesPath/data.csv' INTO table smallinttable " +
+      "OPTIONS('USE_KETTLE'='false')")
+    sql(s"LOAD DATA local inpath '$resourcesPath/datawithoutheader.csv' overwrite " +
+      "INTO table smallinthivetable")
+
+    checkAnswer(
+      sql("select empno from smallinttable"),
+      sql("select empno from smallinthivetable")
+    )
+
+    sql("drop table if exists smallinttable")
+    sql("drop table if exists smallinthivetable")
+  }
+
+  test("test data loading and validate query output") {
+    //Create test cube and hive table
+    sql(
+      "CREATE table testtable (empno string, empname String, designation String, doj String, " +
+        "workgroupcategory string, workgroupcategoryname String, deptno string, deptname String, " +
+        "projectcode string, projectjoindate String, projectenddate String,attendance double," +
+        "utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES" +
+        "('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory," +
+        "workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')"
+    )
+    sql(
+      "create table testhivetable(empno string, empname String, designation string, doj String, " +
+        "workgroupcategory string, workgroupcategoryname String,deptno string, deptname String, " +
+        "projectcode string, projectjoindate String,projectenddate String, attendance double," +
+        "utilization double,salary double)row format delimited fields terminated by ','"
+    )
+    //load data into test cube and hive table and validate query result
+    sql(s"LOAD DATA local inpath '$resourcesPath/data.csv' INTO table testtable")
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/datawithoutheader.csv' overwrite INTO table " +
+        "testhivetable"
+    )
+    checkAnswer(sql("select * from testtable"), sql("select * from testhivetable"))
+    //load data incrementally and validate query result
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE testtable OPTIONS" +
+        "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/datawithoutheader.csv' INTO table testhivetable"
+    )
+    checkAnswer(sql("select * from testtable"), sql("select * from testhivetable"))
+    //drop test cube and table
+    sql("drop table if exists testtable")
+    sql("drop table if exists testhivetable")
+  }
+
+  /**
+    * TODO: temporarily changing cube names to different names,
+    * however deletion and creation of cube with same name
+    */
+  test("test data loading with different case file header and validate query output") {
+    //Create test cube and hive table
+    sql(
+      "CREATE table testtable1 (empno string, empname String, designation String, doj String, " +
+        "workgroupcategory string, workgroupcategoryname String, deptno string, deptname String, " +
+        "projectcode string, projectjoindate String, projectenddate String,attendance double," +
+        "utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES" +
+        "('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory," +
+        "workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')"
+    )
+    sql(
+      "create table testhivetable1(empno string, empname String, designation string, doj String, " +
+        "workgroupcategory string, workgroupcategoryname String,deptno string, deptname String, " +
+        "projectcode string, projectjoindate String,projectenddate String, attendance double," +
+        "utilization double,salary double)row format delimited fields terminated by ','"
+    )
+    //load data into test cube and hive table and validate query result
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/datawithoutheader.csv' INTO table testtable1 " +
+        "options('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='EMPno, empname,designation,doj," +
+        "workgroupcategory,workgroupcategoryname,   deptno,deptname,projectcode,projectjoindate," +
+        "projectenddate,  attendance,   utilization,SALARY')"
+    )
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/datawithoutheader.csv' overwrite INTO table " +
+        "testhivetable1"
+    )
+    checkAnswer(sql("select * from testtable1"), sql("select * from testhivetable1"))
+    //drop test cube and table
+    sql("drop table if exists testtable1")
+    sql("drop table if exists testhivetable1")
+  }
+
+  test("test hive table data loading") {
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/datawithoutheader.csv' overwrite INTO table " +
+        "hivetable"
+    )
+    sql(s"LOAD DATA local inpath '$resourcesPath/datawithoutheader.csv' INTO table hivetable")
+  }
+
+  test("test carbon table data loading using old syntax") {
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE carbontable OPTIONS" +
+        "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+  }
+
+  test("test carbon table data loading using new syntax compatible with hive") {
+    sql(s"LOAD DATA local inpath '$resourcesPath/data.csv' INTO table carbontable")
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/data.csv' INTO table carbontable options" +
+        "('DELIMITER'=',', 'QUOTECHAR'='\"')"
+    )
+  }
+
+  test("test carbon table data loading using new syntax with overwrite option compatible with hive")
+  {
+    try {
+      sql(s"LOAD DATA local inpath '$resourcesPath/data.csv' overwrite INTO table carbontable")
+    } catch {
+      case e: Throwable => {
+        assert(e.getMessage
+          .equals("Overwrite is not supported for carbon table with default.carbontable")
+        )
+      }
+    }
+  }
+
+  test("complex types data loading") {
+    sql("create table complexcarbontable(deviceInformationId int, channelsId string," +
+      "ROMSize string, purchasedate string, mobile struct<imei:string, imsi:string>," +
+      "MAC array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
+      "ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>," +
+      "proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
+      "double,contractNumber double) " +
+      "STORED BY 'org.apache.carbondata.format' " +
+      "TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId')"
+    )
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/complexdata.csv' INTO table " +
+        "complexcarbontable " +
+        "OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,channelsId," +
+        "ROMSize,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber'," +
+        "'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')"
+    )
+    sql("drop table if exists complexcarbontable")
+  }
+
+  test(
+    "complex types data loading with more unused columns and different order of complex columns " +
+      "in csv and create table"
+  ) {
+    sql("create table complexcarbontable(deviceInformationId int, channelsId string," +
+      "mobile struct<imei:string, imsi:string>, ROMSize string, purchasedate string," +
+      "MAC array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
+      "ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>," +
+      "proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
+      "double,contractNumber double) " +
+      "STORED BY 'org.apache.carbondata.format' " +
+      "TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId','DICTIONARY_EXCLUDE'='channelsId')"
+    )
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/complextypediffentcolheaderorder.csv' INTO " +
+        "table complexcarbontable " +
+        "OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,channelsId," +
+        "ROMSize,purchasedate,MAC,abc,mobile,locationinfo,proddate,gamePointId,contractNumber'," +
+        "'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')"
+    )
+    sql("select count(*) from complexcarbontable")
+    sql("drop table if exists complexcarbontable")
+  }
+
+  test("test carbon table data loading with csv file Header in caps") {
+    sql("drop table if exists header_test")
+    sql(
+      "create table header_test(empno int, empname String, designation string, doj String, " +
+        "workgroupcategory int, workgroupcategoryname String,deptno int, deptname String, " +
+        "projectcode int, projectjoindate String,projectenddate String, attendance String," +
+        "utilization String,salary String) STORED BY 'org.apache.carbondata.format'"
+    )
+    val csvFilePath = s"$resourcesPath/data_withCAPSHeader.csv"
+    sql("LOAD DATA local inpath '" + csvFilePath + "' INTO table header_test OPTIONS " +
+      "('DELIMITER'=',', 'QUOTECHAR'='\"')");
+    checkAnswer(sql("select empno from header_test"),
+      Seq(Row(11), Row(12))
+    )
+  }
+
+  test("test duplicate column validation") {
+    try {
+      sql("create table duplicateColTest(col1 string, Col1 string)")
+    }
+    catch {
+      case e: Exception => {
+        assert(e.getMessage.contains("Duplicate column name") ||
+          e.getMessage.contains("Found duplicate column"))
+      }
+    }
+  }
+
+  test(
+    "test carbon table data loading with csv file Header in Mixed Case and create table columns " +
+      "in mixed case"
+  ) {
+    sql("drop table if exists mixed_header_test")
+    sql(
+      "create table mixed_header_test(empno int, empname String, Designation string, doj String, " +
+        "Workgroupcategory int, workgroupcategoryname String,deptno int, deptname String, " +
+        "projectcode int, projectjoindate String,projectenddate String, attendance String," +
+        "utilization String,salary String) STORED BY 'org.apache.carbondata.format'"
+    )
+    val csvFilePath = s"$resourcesPath/data_withMixedHeader.csv"
+    sql("LOAD DATA local inpath '" + csvFilePath + "' INTO table mixed_header_test OPTIONS " +
+      "('DELIMITER'=',', 'QUOTECHAR'='\"')");
+    checkAnswer(sql("select empno from mixed_header_test"),
+      Seq(Row(11), Row(12))
+    )
+  }
+
+
+  test("complex types data loading with hive column having more than required column values") {
+    sql("create table complexcarbontable(deviceInformationId int, channelsId string," +
+      "ROMSize string, purchasedate string, mobile struct<imei:string, imsi:string>," +
+      "MAC array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
+      "ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>," +
+      "proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
+      "double,contractNumber double) " +
+      "STORED BY 'org.apache.carbondata.format' " +
+      "TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId')"
+    )
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/complexdatastructextra.csv' INTO table " +
+        "complexcarbontable " +
+        "OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,channelsId," +
+        "ROMSize,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber'," +
+        "'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')"
+    )
+    sql("drop table if exists complexcarbontable")
+  }
+
+  test("complex types & no dictionary columns data loading") {
+    sql("create table complexcarbontable(deviceInformationId int, channelsId string," +
+      "ROMSize string, purchasedate string, mobile struct<imei:string, imsi:string>," +
+      "MAC array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
+      "ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>," +
+      "proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
+      "double,contractNumber double) " +
+      "STORED BY 'org.apache.carbondata.format' " +
+      "TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId', 'DICTIONARY_EXCLUDE'='ROMSize," +
+      "purchasedate')"
+    )
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/complexdata.csv' INTO table " +
+        "complexcarbontable " +
+        "OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,channelsId," +
+        "ROMSize,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber'," +
+        "'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')"
+    );
+    sql("drop table if exists complexcarbontable")
+  }
+
+  test("array<string> and string datatype for same column is not working properly") {
+    sql("create table complexcarbontable(deviceInformationId int, MAC array<string>, channelsId string, "+
+        "ROMSize string, purchasedate string, gamePointId double,contractNumber double) STORED BY 'org.apache.carbondata.format' "+
+        "TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId')")
+    sql(s"LOAD DATA local inpath '$resourcesPath/complexdatareordered.csv' INTO table complexcarbontable "+
+        "OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,MAC,channelsId,ROMSize,purchasedate,gamePointId,contractNumber',"+
+        "'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')")
+    sql("drop table if exists complexcarbontable")
+    sql("create table primitivecarbontable(deviceInformationId int, MAC string, channelsId string, "+
+        "ROMSize string, purchasedate string, gamePointId double,contractNumber double) STORED BY 'org.apache.carbondata.format' "+
+        "TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId')")
+    sql(s"LOAD DATA local inpath '$resourcesPath/complexdatareordered.csv' INTO table primitivecarbontable "+
+        "OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,MAC,channelsId,ROMSize,purchasedate,gamePointId,contractNumber',"+
+        "'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')")
+    sql("drop table if exists primitivecarbontable")
+  }
+
+  test(
+    "test carbon table data loading when table name is in different case with create table, for " +
+      "UpperCase"
+  ) {
+    sql("create table UPPERCASEcube(empno Int, empname String, designation String, " +
+      "doj String, workgroupcategory Int, workgroupcategoryname String, deptno Int, " +
+      "deptname String, projectcode Int, projectjoindate String, projectenddate String, " +
+      "attendance Int,utilization Double,salary Double) STORED BY 'org.apache.carbondata.format'"
+    )
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/data.csv' INTO table uppercasecube OPTIONS" +
+        "('DELIMITER'=',', 'QUOTECHAR'='\"')"
+    )
+    sql("drop table if exists UpperCaseCube")
+  }
+
+  test(
+    "test carbon table data loading when table name is in different case with create table ,for " +
+      "LowerCase"
+  ) {
+    sql("create table lowercaseCUBE(empno Int, empname String, designation String, " +
+      "doj String, workgroupcategory Int, workgroupcategoryname String, deptno Int, " +
+      "deptname String, projectcode Int, projectjoindate String, projectenddate String, " +
+      "attendance Int,utilization Double,salary Double) STORED BY 'org.apache.carbondata.format'"
+    )
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/data.csv' INTO table LOWERCASECUBE OPTIONS" +
+        "('DELIMITER'=',', 'QUOTECHAR'='\"')"
+    )
+    sql("drop table if exists LowErcasEcube")
+  }
+
+  test("test carbon table data loading using escape char 1") {
+    sql("DROP TABLE IF EXISTS escapechar1")
+
+    sql(
+      """
+           CREATE TABLE IF NOT EXISTS escapechar1
+           (ID Int, date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int)
+           STORED BY 'org.apache.carbondata.format'
+      """
+    )
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+    sql(
+      s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/datawithbackslash.csv' into table escapechar1
+           OPTIONS('ESCAPECHAR'='@')
+        """
+    )
+    checkAnswer(sql("select count(*) from escapechar1"), Seq(Row(10)))
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
+    sql("DROP TABLE IF EXISTS escapechar1")
+  }
+
+  test("test carbon table data loading using escape char 2") {
+    sql("DROP TABLE IF EXISTS escapechar2")
+
+    sql(
+      """
+         CREATE TABLE escapechar2(imei string,specialchar string)
+         STORED BY 'org.apache.carbondata.format'
+      """
+    )
+
+    sql(
+      s"""
+       LOAD DATA LOCAL INPATH '$resourcesPath/datawithescapecharacter.csv' into table escapechar2
+          options ('DELIMITER'=',', 'QUOTECHAR'='"','ESCAPECHAR'='\')
+      """
+    )
+    checkAnswer(sql("select count(*) from escapechar2"), Seq(Row(21)))
+    checkAnswer(sql("select specialchar from escapechar2 where imei = '1AA44'"), Seq(Row("escapeesc")))
+    sql("DROP TABLE IF EXISTS escapechar2")
+  }
+
+  test("test carbon table data loading using escape char 3") {
+    sql("DROP TABLE IF EXISTS escapechar3")
+
+    sql(
+      """
+         CREATE TABLE escapechar3(imei string,specialchar string)
+         STORED BY 'org.apache.carbondata.format'
+      """
+    )
+
+    sql(
+      s"""
+       LOAD DATA LOCAL INPATH '$resourcesPath/datawithescapecharacter.csv' into table escapechar3
+          options ('DELIMITER'=',', 'QUOTECHAR'='"','ESCAPECHAR'='@')
+      """
+    )
+    checkAnswer(sql("select count(*) from escapechar3"), Seq(Row(21)))
+    checkAnswer(sql("select specialchar from escapechar3 where imei in ('1232','12323')"), Seq(Row
+    ("ayush@b.com"), Row("ayushb.com")
+    )
+    )
+    sql("DROP TABLE IF EXISTS escapechar3")
+  }
+
+  test("test carbon table data loading with special character 1") {
+    sql("DROP TABLE IF EXISTS specialcharacter1")
+
+    sql(
+      """
+         CREATE TABLE specialcharacter1(imei string,specialchar string)
+         STORED BY 'org.apache.carbondata.format'
+      """
+    )
+
+    sql(
+      s"""
+       LOAD DATA LOCAL INPATH '$resourcesPath/datawithspecialcharacter.csv' into table specialcharacter1
+          options ('DELIMITER'=',', 'QUOTECHAR'='"')
+      """
+    )
+    checkAnswer(sql("select count(*) from specialcharacter1"), Seq(Row(37)))
+    checkAnswer(sql("select specialchar from specialcharacter1 where imei='1AA36'"), Seq(Row("\"i\"")))
+    sql("DROP TABLE IF EXISTS specialcharacter1")
+  }
+
+  test("test carbon table data loading with special character 2") {
+    sql("DROP TABLE IF EXISTS specialcharacter2")
+
+    sql(
+      """
+        CREATE table specialcharacter2(customer_id int, 124_string_level_province String, date_level String,
+        Time_level String, lname String, fname String, mi String, address1 String, address2
+        String, address3 String, address4 String, city String, country String, phone1 String,
+        phone2 String, marital_status String, yearly_income String, gender String, education
+        String, member_card String, occupation String, houseowner String, fullname String,
+        numeric_level double, account_num double, customer_region_id int, total_children int,
+        num_children_at_home int, num_cars_owned int)
+        STORED BY 'org.apache.carbondata.format'
+      """
+    )
+
+    sql(
+      s"""
+       LOAD DATA LOCAL INPATH '$resourcesPath/datawithcomplexspecialchar.csv' into
+       table specialcharacter2 options ('DELIMITER'=',', 'QUOTECHAR'='"','ESCAPECHAR'='"')
+      """
+    )
+    checkAnswer(sql("select count(*) from specialcharacter2"), Seq(Row(150)))
+    checkAnswer(sql("select 124_string_level_province from specialcharacter2 where customer_id=103"),
+      Seq(Row("\"state province # 124\""))
+    )
+    sql("DROP TABLE IF EXISTS specialcharacter2")
+  }
+
+  test("test data which contain column less than schema"){
+    sql("DROP TABLE IF EXISTS collessthanschema")
+
+    sql(
+      """
+           CREATE TABLE IF NOT EXISTS collessthanschema
+           (ID Int, date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int)
+           STORED BY 'org.apache.carbondata.format'
+      """)
+
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+    sql(s"""
+         LOAD DATA LOCAL INPATH '$resourcesPath/lessthandatacolumndata.csv' into table collessthanschema
+        """)
+    checkAnswer(sql("select count(*) from collessthanschema"),Seq(Row(10)))
+    sql("DROP TABLE IF EXISTS collessthanschema")
+  }
+
+  test("test data which contain column with decimal data type in array."){
+    sql("DROP TABLE IF EXISTS decimalarray")
+
+    sql(
+      """
+           CREATE TABLE IF NOT EXISTS decimalarray
+           (ID decimal(5,5), date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int, complex
+           array<decimal(4,2)>)
+           STORED BY 'org.apache.carbondata.format'
+      """
+    )
+
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+    sql(s"""
+         LOAD DATA LOCAL INPATH '$resourcesPath/complexTypeDecimal.csv' into table decimalarray
+        """)
+    checkAnswer(sql("select count(*) from decimalarray"),Seq(Row(8)))
+    sql("DROP TABLE IF EXISTS decimalarray")
+  }
+
+  test("test data which contain column with decimal data type in struct."){
+    sql("DROP TABLE IF EXISTS decimalstruct")
+
+    sql(
+      """
+           CREATE TABLE IF NOT EXISTS decimalstruct
+           (ID decimal(5,5), date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int, complex
+           struct<a:decimal(4,2)>)
+           STORED BY 'org.apache.carbondata.format'
+      """
+    )
+
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+    sql(s"""
+         LOAD DATA LOCAL INPATH '$resourcesPath/complexTypeDecimal.csv' into table decimalstruct
+        """)
+    checkAnswer(sql("select count(*) from decimalstruct"),Seq(Row(8)))
+    sql("DROP TABLE IF EXISTS decimalstruct")
+  }
+
+  test("test data which contain column with decimal data type in array of struct."){
+    sql("DROP TABLE IF EXISTS complex_t3")
+    sql("DROP TABLE IF EXISTS complex_hive_t3")
+
+    sql(
+      """
+           CREATE TABLE complex_t3
+           (ID decimal, date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int, complex
+           array<struct<a:decimal(4,2),str:string>>)
+           STORED BY 'org.apache.carbondata.format'
+      """
+    )
+    sql(
+      """
+           CREATE TABLE complex_hive_t3
+           (ID decimal, date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int, complex
+           array<struct<a:decimal(4,2),str:string>>)
+           row format delimited fields terminated by ','
+      """
+    )
+
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+    sql(s"""
+         LOAD DATA LOCAL INPATH '$resourcesPath/complexTypeDecimalNested.csv' into table complex_t3
+        """)
+    sql(s"""
+         LOAD DATA LOCAL INPATH '$resourcesPath/complexTypeDecimalNestedHive.csv' into table complex_hive_t3
+        """)
+    checkAnswer(sql("select count(*) from complex_t3"),sql("select count(*) from complex_hive_t3"))
+    checkAnswer(sql("select id from complex_t3 where salary = 15000"),sql("select id from complex_hive_t3 where salary = 15000"))
+  }
+
+  test("test data loading when delimiter is '|' and data with header") {
+    sql(
+      "CREATE table carbontable1 (empno string, empname String, designation String, doj String, " +
+        "workgroupcategory string, workgroupcategoryname String, deptno string, deptname String, " +
+        "projectcode string, projectjoindate String, projectenddate String,attendance double," +
+        "utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES" +
+        "('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory," +
+        "workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')"
+    )
+    sql(
+      "create table hivetable1 (empno string, empname String, designation string, doj String, " +
+        "workgroupcategory string, workgroupcategoryname String,deptno string, deptname String, " +
+        "projectcode string, projectjoindate String,projectenddate String, attendance double," +
+        "utilization double,salary double)row format delimited fields terminated by ','"
+    )
+
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/datadelimiter.csv' INTO TABLE carbontable1 OPTIONS" +
+        "('DELIMITER'= '|', 'QUOTECHAR'= '\"')"
+    )
+
+    sql(s"LOAD DATA local inpath '$resourcesPath/datawithoutheader.csv' INTO table hivetable1")
+
+    checkAnswer(sql("select * from carbontable1"), sql("select * from hivetable1"))
+  }
+
+  test("test data loading with comment option") {
+    sql("drop table if exists comment_test")
+    sql(
+      "create table comment_test(imei string, age int, task bigint, num double, level decimal(10," +
+        "3), productdate timestamp, mark int, name string) STORED BY 'org.apache.carbondata.format'"
+    )
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/comment.csv' INTO TABLE comment_test " +
+        "options('DELIMITER' = ',', 'QUOTECHAR' = '.', 'COMMENTCHAR' = '?','FILEHEADER'='imei,age,task,num,level,productdate,mark,name')"
+    )
+    checkAnswer(sql("select imei from comment_test"),Seq(Row("\".carbon"),Row("#?carbon"), Row(""),
+      Row("~carbon,")))
+  }
+
+
+  override def afterAll {
+    sql("drop table if exists escapechar1")
+    sql("drop table if exists escapechar2")
+    sql("drop table if exists escapechar3")
+    sql("drop table if exists specialcharacter1")
+    sql("drop table if exists specialcharacter2")
+    sql("drop table if exists collessthanschema")
+    sql("drop table if exists decimalarray")
+    sql("drop table if exists decimalstruct")
+    sql("drop table if exists carbontable")
+    sql("drop table if exists hivetable")
+    sql("drop table if exists testtable")
+    sql("drop table if exists testhivetable")
+    sql("drop table if exists testtable1")
+    sql("drop table if exists testhivetable1")
+    sql("drop table if exists complexcarbontable")
+    sql("drop table if exists complex_t3")
+    sql("drop table if exists complex_hive_t3")
+    sql("drop table if exists header_test")
+    sql("drop table if exists duplicateColTest")
+    sql("drop table if exists mixed_header_test")
+    sql("drop table if exists primitivecarbontable")
+    sql("drop table if exists UPPERCASEcube")
+    sql("drop table if exists lowercaseCUBE")
+    sql("drop table if exists carbontable1")
+    sql("drop table if exists hivetable1")
+    sql("drop table if exists comment_test")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithMalformedCarbonCommandException.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithMalformedCarbonCommandException.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithMalformedCarbonCommandException.scala
new file mode 100644
index 0000000..954112f
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithMalformedCarbonCommandException.scala
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
+
+class TestLoadDataWithMalformedCarbonCommandException extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+
+    sql("CREATE table TestLoadTableOptions (ID int, date String, country String, name String," +
+        "phonetype String, serialname String, salary int) stored by 'org.apache.carbondata.format'")
+
+  }
+
+  override def afterAll {
+    sql("drop table TestLoadTableOptions")
+  }
+
+  def buildTableWithNoExistDictExclude() = {
+      sql(
+        """
+           CREATE TABLE IF NOT EXISTS t3
+           (ID Int, date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int)
+           STORED BY 'org.apache.carbondata.format'
+           TBLPROPERTIES('DICTIONARY_EXCLUDE'='country,phonetype,CCC')
+        """)
+  }
+
+  def buildTableWithNoExistDictInclude() = {
+      sql(
+        """
+           CREATE TABLE IF NOT EXISTS t3
+           (ID Int, date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int)
+           STORED BY 'org.apache.carbondata.format'
+           TBLPROPERTIES('DICTIONARY_INCLUDE'='AAA,country')
+        """)
+  }
+
+  def buildTableWithSameDictExcludeAndInclude() = {
+      sql(
+        """
+           CREATE TABLE IF NOT EXISTS t3
+           (ID Int, date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int)
+           STORED BY 'org.apache.carbondata.format'
+           TBLPROPERTIES('DICTIONARY_INCLUDE'='country','DICTIONARY_EXCLUDE'='country')
+        """)
+  }
+
+  def buildTableWithSameDictExcludeAndIncludeWithSpaces() = {
+    sql(
+      """
+           CREATE TABLE IF NOT EXISTS t3
+           (ID Int, date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int)
+           STORED BY 'org.apache.carbondata.format'
+           TBLPROPERTIES('DICTIONARY_INCLUDE'='country','DICTIONARY_EXCLUDE'='country ')
+      """)
+  }
+
+  test("test load data with dictionary exclude columns which no exist in table.") {
+    try {
+      buildTableWithNoExistDictExclude()
+    } catch {
+      case e: MalformedCarbonCommandException =>
+        assert(e.getMessage.equals("DICTIONARY_EXCLUDE column: ccc does not exist in table. " +
+          "Please check create table statement."))
+      case _: Throwable => assert(false)
+    }
+  }
+
+  test("test load data with dictionary include columns which no exist in table.") {
+    try {
+      buildTableWithNoExistDictInclude()
+    } catch {
+      case e: MalformedCarbonCommandException =>
+        assert(e.getMessage.equals("DICTIONARY_INCLUDE column: aaa does not exist in table. " +
+          "Please check create table statement."))
+      case _: Throwable => assert(false)
+    }
+  }
+
+  test("test load data with dictionary include is same with dictionary exclude") {
+    try {
+      buildTableWithSameDictExcludeAndInclude()
+    } catch {
+      case e: MalformedCarbonCommandException =>
+        assert(e.getMessage.equals("DICTIONARY_EXCLUDE can not contain the same column: country " +
+          "with DICTIONARY_INCLUDE. Please check create table statement."))
+      case _: Throwable => assert(false)
+    }
+  }
+
+  test("test load data with invalid option") {
+    try {
+      sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/dataretention1.csv' INTO TABLE " +
+        "TestLoadTableOptions OPTIONS('QUOTECHAR'='\"', 'DELIMITERRR' =  ',')")
+      assert(false)
+    } catch {
+      case e: MalformedCarbonCommandException =>
+        assert(e.getMessage.equals("Error: Invalid option(s): delimiterrr"))
+      case _: Throwable => assert(false)
+    }
+  }
+
+  test("test load data with duplicate options") {
+    try {
+      sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/dataretention1.csv' INTO TABLE " +
+        "TestLoadTableOptions OPTIONS('DELIMITER' =  ',', 'quotechar'='\"', 'DELIMITER' =  '$')")
+      assert(false)
+    } catch {
+      case e: MalformedCarbonCommandException =>
+        assert(e.getMessage.equals("Error: Duplicate option(s): delimiter"))
+      case _: Throwable => assert(false)
+    }
+  }
+
+  test("test load data with case sensitive options") {
+    try {
+      sql(
+        s"LOAD DATA local inpath '$resourcesPath/dataretention1.csv' INTO table " +
+          "TestLoadTableOptions options('DeLIMITEr'=',', 'qUOtECHAR'='\"')"
+      )
+    } catch {
+      case _: Throwable => assert(false)
+    }
+  }
+
+  test("test load data with dictionary include is same with dictionary exclude with spaces") {
+    try {
+      buildTableWithSameDictExcludeAndIncludeWithSpaces()
+    } catch {
+      case e: MalformedCarbonCommandException =>
+        assert(e.getMessage.equals("DICTIONARY_EXCLUDE can not contain the same column: country " +
+          "with DICTIONARY_INCLUDE. Please check create table statement."))
+      case _: Throwable => assert(false)
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithNoMeasure.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithNoMeasure.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithNoMeasure.scala
new file mode 100644
index 0000000..1b0b324
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithNoMeasure.scala
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+/**
+  * Test Class for data loading with hive syntax and old syntax
+  *
+  */
+class TestLoadDataWithNoMeasure extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    sql("DROP TABLE IF EXISTS nomeasureTest_sd")
+    sql(
+      "CREATE TABLE nomeasureTest (empno String, doj String) STORED BY 'org.apache.carbondata" +
+        ".format'"
+    )
+    val testData = s"$resourcesPath/datasample.csv"
+    sql("LOAD DATA LOCAL INPATH '" + testData + "' into table nomeasureTest")
+  }
+
+  test("test data loading and validate query output") {
+
+    checkAnswer(
+      sql("select empno from nomeasureTest"),
+      Seq(Row("11"), Row("12"), Row("13"))
+    )
+  }
+
+  test("test data loading with single dictionary column") {
+    sql("DROP TABLE IF EXISTS nomeasureTest_sd")
+    sql("CREATE TABLE nomeasureTest_sd (city String) STORED BY 'org.apache.carbondata.format'")
+    val testData = s"$resourcesPath/datasingleCol.csv"
+    sql("LOAD DATA LOCAL INPATH '" + testData + "' into table nomeasureTest_sd options " +
+      "('FILEHEADER'='city')"
+    )
+
+    checkAnswer(
+      sql("select city from nomeasureTest_sd"),
+      Seq(Row("CA"), Row("LA"), Row("AD"))
+    )
+  }
+
+  test("test data loading with single no dictionary column") {
+    sql("DROP TABLE IF EXISTS nomeasureTest_sd")
+    sql(
+      "CREATE TABLE nomeasureTest_sd (city String) STORED BY 'org.apache.carbondata.format' " +
+        "TBLPROPERTIES ('DICTIONARY_EXCLUDE'='city')"
+    )
+    val testData = s"$resourcesPath/datasingleCol.csv"
+    sql("LOAD DATA LOCAL INPATH '" + testData + "' into table nomeasureTest_sd options " +
+      "('FILEHEADER'='city')"
+    )
+
+    checkAnswer(
+      sql("select city from nomeasureTest_sd"),
+      Seq(Row("CA"), Row("LA"), Row("AD"))
+    )
+  }
+
+  test("test data loading with single complex struct type column") {
+    //only data load check
+    sql("DROP TABLE IF EXISTS nomeasureTest_scd")
+    sql(
+      "CREATE TABLE nomeasureTest_scd (cityDetail struct<cityName:string,cityCode:string>) STORED" +
+        " " +
+        "BY 'org.apache.carbondata.format'"
+    )
+    val testData = s"$resourcesPath/datasingleComplexCol.csv"
+    sql("LOAD DATA LOCAL INPATH '" + testData + "' into table nomeasureTest_scd options " +
+      "('DELIMITER'=',','QUOTECHAR'='\"','FILEHEADER'='cityDetail','COMPLEX_DELIMITER_LEVEL_1'=':')"
+    )
+  }
+
+  test("test data loading with single complex array type column") {
+    //only data load check
+    sql("DROP TABLE IF EXISTS nomeasureTest_scd")
+    sql(
+      "CREATE TABLE nomeasureTest_scd (cityDetail array<string>) STORED" +
+        " " +
+        "BY 'org.apache.carbondata.format'"
+    )
+    val testData = s"$resourcesPath/datasingleComplexCol.csv"
+    sql("LOAD DATA LOCAL INPATH '" + testData + "' into table nomeasureTest_scd options " +
+      "('DELIMITER'=',','QUOTECHAR'='\"','FILEHEADER'='cityDetail'," +
+      "'COMPLEX_DELIMITER_LEVEL_1'=':')"
+    )
+  }
+
+  override def afterAll {
+    sql("drop table nomeasureTest")
+    sql("drop table nomeasureTest_sd")
+    sql("drop table nomeasureTest_scd")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithNotProperInputFile.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithNotProperInputFile.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithNotProperInputFile.scala
new file mode 100644
index 0000000..5fd52dc
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithNotProperInputFile.scala
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import java.io.File
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.apache.spark.util.FileUtils
+
+import org.apache.carbondata.processing.model.CarbonLoadModel
+import org.apache.carbondata.spark.util.GlobalDictionaryUtil
+
+/**
+ * Test class of loading data for carbon table with not proper input file
+ *
+ */
+class TestLoadDataWithNotProperInputFile extends QueryTest {
+
+  test("test loading data with input path exists but has nothing") {
+    try {
+      val carbonLoadModel: CarbonLoadModel = new CarbonLoadModel
+      val dataPath = s"$resourcesPath/nullSample.csv"
+      carbonLoadModel.setFactFilePath(FileUtils.getPaths(dataPath))
+      GlobalDictionaryUtil.loadDataFrame(sqlContext, carbonLoadModel)
+    } catch {
+      case e: Throwable =>
+        assert(e.getMessage.contains("Please check your input path and make sure " +
+          "that files end with '.csv' and content is not empty"))
+    }
+  }
+
+  test("test loading data with input file not ends with '.csv'") {
+    try {
+      val carbonLoadModel: CarbonLoadModel = new CarbonLoadModel
+      val dataPath = s"$resourcesPath/noneCsvFormat.cs"
+      carbonLoadModel.setFactFilePath(FileUtils.getPaths(dataPath))
+      GlobalDictionaryUtil.loadDataFrame(sqlContext, carbonLoadModel)
+    } catch {
+      case e: Throwable =>
+        e.printStackTrace()
+        assert(e.getMessage.contains("Please check your input path and make sure " +
+          "that files end with '.csv' and content is not empty"))
+    }
+  }
+
+  test("test loading data with input file does not exist") {
+    try {
+      val carbonLoadModel: CarbonLoadModel = new CarbonLoadModel
+      val dataPath = s"$resourcesPath/input_file_does_not_exist.csv"
+      carbonLoadModel.setFactFilePath(FileUtils.getPaths(dataPath))
+      GlobalDictionaryUtil.loadDataFrame(sqlContext, carbonLoadModel)
+    } catch {
+      case e: Throwable =>
+        assert(e.getMessage.contains("The input file does not exist"))
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadTblNameIsKeyword.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadTblNameIsKeyword.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadTblNameIsKeyword.scala
new file mode 100644
index 0000000..ddfd607
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadTblNameIsKeyword.scala
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import java.io.File
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+/**
+  * Test Class for data loading into table whose name is key word
+  *
+  */
+class TestLoadTblNameIsKeyword extends QueryTest with BeforeAndAfterAll {
+  val testData = s"$resourcesPath/dimSample.csv"
+  override def beforeAll {
+    sql("drop table if exists STRING")
+    sql("drop table if exists DoUbLe")
+    sql("drop table if exists timestamp")
+    sql("""
+          CREATE TABLE IF NOT EXISTS STRING
+          (id Int, name String, city String)
+          STORED BY 'org.apache.carbondata.format'
+        """)
+    sql("""
+          CREATE TABLE IF NOT EXISTS DoUbLe
+          (id Int, name String, city String)
+          STORED BY 'org.apache.carbondata.format'
+        """)
+    sql("""
+          CREATE TABLE IF NOT EXISTS timestamp
+          (id Int, name String, city String)
+          STORED BY 'org.apache.carbondata.format'
+        """)
+  }
+
+  test("test load data whose name is a keyword of data type") {
+    sql(s"""
+          LOAD DATA LOCAL INPATH '$testData' into table STRING
+        """)
+    checkAnswer(
+      sql("""
+            SELECT count(*) from STRING
+          """),
+      Seq(Row(20)))
+  }
+
+  test("test case in-sensitiveness") {
+    sql(s"""
+          LOAD DATA LOCAL INPATH '$testData' into table DoUbLe
+        """)
+    checkAnswer(
+      sql("""
+            SELECT count(*) from DoUbLe
+          """),
+      Seq(Row(20)))
+  }
+
+  test("test other ddl whose table name a keyword of data type") {
+    sql("describe timestamp")
+    sql(s"""
+          LOAD DATA LOCAL INPATH '$testData' into table timestamp
+        """)
+    sql("show segments for table timestamp")
+    sql("delete segments from table timestamp where starttime before '2099-10-01 18:00:00'")
+    sql("clean files for table timestamp")
+  }
+
+  override def afterAll {
+    sql("drop table STRING")
+    sql("drop table DoUbLe")
+    sql("drop table timestamp")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestTableLevelBlockSize.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestTableLevelBlockSize.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestTableLevelBlockSize.scala
new file mode 100644
index 0000000..e2a6a06
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestTableLevelBlockSize.scala
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
+
+/**
+  * Test Class for table block size
+  *
+  */
+
+class TestTableLevelBlockSize extends QueryTest with BeforeAndAfterAll{
+
+  val testData1 = s"$resourcesPath/dimSample.csv"
+  val testData2 = s"$resourcesPath/example-data.csv"
+
+  override def beforeAll {
+    sql("DROP TABLE IF EXISTS table_blocksize1")
+    sql("DROP TABLE IF EXISTS table_blocksize2")
+    sql("DROP TABLE IF EXISTS table_blocksize3")
+  }
+
+  test("Value test: set table level blocksize value beyong [1,2048]") {
+    try {
+      sql(
+        """
+          CREATE TABLE IF NOT EXISTS table_blocksize1
+          (ID Int, date Timestamp, country String,
+          name String, phonetype String, serialname String, salary Int)
+          STORED BY 'org.apache.carbondata.format'
+          TBLPROPERTIES('table_blocksize'='4096 MB')
+        """)
+      assert(false)
+    } catch {
+      case e : MalformedCarbonCommandException => {
+        assert(e.getMessage.equals("Invalid table_blocksize value found: 4096, " +
+            "only int value from 1 MB to 2048 MB is supported."))
+      }
+    }
+  }
+
+  test("Value test: set table level blocksize in not int value") {
+    try {
+      sql(
+        """
+          CREATE TABLE IF NOT EXISTS table_blocksize2
+          (ID Int, date Timestamp, country String,
+          name String, phonetype String, serialname String, salary Int)
+          STORED BY 'org.apache.carbondata.format'
+          TBLPROPERTIES('table_blocksize'='10Y4 MB')
+        """)
+      assert(false)
+    } catch {
+      case e : MalformedCarbonCommandException => {
+        assert(e.getMessage.equals("Invalid table_blocksize value found: 10y4, " +
+            "only int value from 1 MB to 2048 MB is supported."))
+      }
+    }
+  }
+
+  test("Function test: set table level blocksize load and agg query") {
+
+    sql(
+      """
+        CREATE TABLE IF NOT EXISTS table_blocksize3
+        (ID Int, date Timestamp, country String,
+        name String, phonetype String, serialname String, salary Int)
+        STORED BY 'org.apache.carbondata.format'
+        TBLPROPERTIES('table_blocksize'='512 MB')
+      """)
+
+    CarbonProperties.getInstance()
+        .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+
+    sql(s"""
+           LOAD DATA LOCAL INPATH '$testData2' into table table_blocksize3
+           """)
+
+    checkAnswer(
+      sql("""
+           SELECT country, count(salary) AS amount
+           FROM table_blocksize3
+           WHERE country IN ('china','france')
+           GROUP BY country
+          """),
+      Seq(Row("china", 849), Row("france", 101))
+    )
+
+  }
+
+  override def afterAll {
+    sql("DROP TABLE IF EXISTS table_blocksize1")
+    sql("DROP TABLE IF EXISTS table_blocksize2")
+    sql("DROP TABLE IF EXISTS table_blocksize3")
+    CarbonProperties.getInstance()
+        .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataretention/DataRetentionConcurrencyTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataretention/DataRetentionConcurrencyTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataretention/DataRetentionConcurrencyTestCase.scala
new file mode 100644
index 0000000..b4f266e
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataretention/DataRetentionConcurrencyTestCase.scala
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataretention
+
+import java.util
+import java.util.concurrent.{Callable, Executors}
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+/**
+ * This class contains DataRetention concurrency test cases
+ */
+class DataRetentionConcurrencyTestCase extends QueryTest with BeforeAndAfterAll {
+
+  private val executorService = Executors.newFixedThreadPool(10)
+
+  override def beforeAll {
+
+    sql("drop table if exists concurrent")
+    sql(
+      "create table concurrent (ID int, date String, country String, name " +
+      "String," +
+      "phonetype String, serialname String, salary int) stored by 'org.apache.carbondata.format'"
+
+    )
+    sql(
+      s"LOAD DATA LOCAL INPATH '$resourcesPath/dataretention1.csv' INTO TABLE concurrent " +
+      "OPTIONS('DELIMITER' =  ',')")
+  }
+
+  override def afterAll {
+    executorService.shutdownNow()
+    sql("drop table if exists concurrent")
+  }
+
+  test("DataRetention_Concurrency_load_id") {
+
+    val tasks = new util.ArrayList[Callable[String]]()
+    tasks
+      .add(new QueryTask(s"LOAD DATA LOCAL INPATH '$resourcesPath/dataretention1.csv' INTO TABLE concurrent OPTIONS('DELIMITER' =  ',')"))
+    tasks.add(new QueryTask("Delete segment 0 from table concurrent"))
+    tasks.add(new QueryTask("clean files for table concurrent"))
+    val results = executorService.invokeAll(tasks)
+    for (i <- 0 until tasks.size()) {
+      val res = results.get(i).get
+      assert("PASS".equals(res))
+    }
+    sql("show segments for table concurrent").show()
+
+  }
+
+  test("DataRetention_Concurrency_load_date") {
+
+    sql(
+      s"LOAD DATA LOCAL INPATH '$resourcesPath/dataretention1.csv' INTO TABLE concurrent " +
+      "OPTIONS('DELIMITER' =  ',')")
+
+    val tasks = new util.ArrayList[Callable[String]]()
+    tasks
+      .add(new QueryTask(s"LOAD DATA LOCAL INPATH '$resourcesPath/dataretention1.csv' INTO TABLE concurrent OPTIONS('DELIMITER' =  ',')"))
+    tasks
+      .add(new QueryTask(
+        "DELETE SEGMENTS FROM TABLE concurrent where STARTTIME before '2099-01-01 00:00:00'"))
+    tasks.add(new QueryTask("clean files for table concurrent"))
+    val results = executorService.invokeAll(tasks)
+    for (i <- 0 until tasks.size()) {
+      val res = results.get(i).get
+      assert("PASS".equals(res))
+    }
+    sql("show segments for table concurrent").show()
+
+  }
+
+  class QueryTask(query: String) extends Callable[String] {
+    override def call(): String = {
+      var result = "PASS"
+      try {
+        LOGGER.info("Executing :" + Thread.currentThread().getName)
+        sql(query)
+      } catch {
+        case _: Exception =>
+          result = "FAIL"
+      }
+      result
+    }
+  }
+
+}



Mime
View raw message