carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jack...@apache.org
Subject [08/38] incubator-carbondata git commit: reuse test case for integration module
Date Sat, 07 Jan 2017 16:36:42 GMT
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBoundaryConditionsTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBoundaryConditionsTest.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBoundaryConditionsTest.scala
deleted file mode 100644
index fad2ba2..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBoundaryConditionsTest.scala
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.carbondata.spark.testsuite.datacompaction
-
-import java.io.File
-
-import org.apache.carbondata.core.updatestatus.SegmentStatusManager
-
-import scala.collection.JavaConverters._
-
-import org.apache.spark.sql.common.util.CarbonHiveContext._
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.carbon.{AbsoluteTableIdentifier, CarbonTableIdentifier}
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-
-/**
- * FT for data compaction Boundary condition verification.
- */
-class DataCompactionBoundaryConditionsTest extends QueryTest with BeforeAndAfterAll {
-  val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
-    .getCanonicalPath
-  val resource = currentDirectory + "/src/test/resources/"
-
-  val storeLocation = new File(this.getClass.getResource("/").getPath + "/../test").getCanonicalPath
-  val carbonTableIdentifier: CarbonTableIdentifier =
-    new CarbonTableIdentifier("default", "boundarytest".toLowerCase(), "1")
-
-  override def beforeAll {
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD, "2,2")
-    sql("drop table if exists  boundarytest")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
-    sql(
-      "CREATE TABLE IF NOT EXISTS boundarytest (country String, ID Int, date " +
-      "Timestamp, name " +
-      "String, " +
-      "phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata" +
-      ".format'"
-    )
-
-  }
-
-  /**
-   * Compaction verificatoin in case of no loads.
-   */
-  test("check if compaction is completed correctly.") {
-
-    try {
-      sql("alter table boundarytest compact 'minor'")
-      sql("alter table boundarytest compact 'major'")
-    }
-    catch {
-      case e: Exception =>
-        assert(false)
-    }
-  }
-
-  /**
-   * Compaction verificatoin in case of one loads.
-   */
-  test("check if compaction is completed correctly for one load.") {
-
-    val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
-      .getCanonicalPath
-    var csvFilePath1 = currentDirectory + "/src/test/resources/compaction/compaction1.csv"
-
-
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE boundarytest " +
-        "OPTIONS" +
-        "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("alter table boundarytest compact 'minor'")
-    sql("alter table boundarytest compact 'major'")
-
-  }
-
-
-  override def afterAll {
-    sql("drop table if exists  boundarytest")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala
deleted file mode 100644
index 58d89a9..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.carbondata.spark.testsuite.datacompaction
-
-import java.io.File
-
-import org.apache.carbondata.core.updatestatus.SegmentStatusManager
-import org.apache.spark.sql.Row
-import org.apache.spark.sql.common.util.CarbonHiveContext._
-import org.apache.spark.sql.common.util.QueryTest
-import org.apache.carbondata.core.carbon.{AbsoluteTableIdentifier, CarbonTableIdentifier}
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-import org.scalatest.BeforeAndAfterAll
-
-import scala.collection.JavaConverters._
-
-/**
-  * FT for data compaction scenario.
-  */
-class DataCompactionCardinalityBoundryTest extends QueryTest with BeforeAndAfterAll {
-
-  override def beforeAll {
-    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "true")
-    sql("drop table if exists  cardinalityTest")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
-    sql(
-      "CREATE TABLE IF NOT EXISTS cardinalityTest (country String, ID String, date Timestamp, name " +
-        "String, " +
-        "phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata" +
-        ".format'"
-    )
-
-
-    val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
-      .getCanonicalPath
-    val csvFilePath1 = currentDirectory + "/src/test/resources/compaction/compaction1.csv"
-
-    // loading the rows greater than 256. so that the column cardinality crosses byte boundary.
-    val csvFilePath2 = currentDirectory + "/src/test/resources/compaction/compactioncard2.csv"
-
-    val csvFilePath3 = currentDirectory + "/src/test/resources/compaction/compaction3.csv"
-
-
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE cardinalityTest OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE cardinalityTest  OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    // compaction will happen here.
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath3 + "' INTO TABLE cardinalityTest  OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    // compaction will happen here.
-    sql("alter table cardinalityTest compact 'major'"
-    )
-
-  }
-
-  test("check if compaction is completed or not and  verify select query.") {
-    var status = true
-    var noOfRetries = 0
-    while (status && noOfRetries < 10) {
-
-      val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(new
-          AbsoluteTableIdentifier(
-            CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION),
-            new CarbonTableIdentifier("default", "cardinalityTest", "1")
-          )
-      )
-      val segments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList
-
-      if (!segments.contains("0.1")) {
-        // wait for 2 seconds for compaction to complete.
-        Thread.sleep(500)
-        noOfRetries += 1
-      }
-      else {
-        status = false
-      }
-    }
-    // now check the answers it should be same.
-    checkAnswer(
-      sql("select country,count(*) from cardinalityTest group by country"),
-      Seq(Row("america",1),
-        Row("canada",1),
-        Row("chile",1),
-        Row("china",2),
-        Row("england",1),
-        Row("burma",152),
-        Row("butan",101),
-        Row("mexico",1),
-        Row("newzealand",1),
-        Row("westindies",1),
-        Row("india",1),
-        Row("iran",1),
-        Row("iraq",1),
-        Row("ireland",1)
-      )
-    )
-  }
-
-  override def afterAll {
-    sql("drop table if exists  cardinalityTest")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "false")
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala
deleted file mode 100644
index e138f62..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.carbondata.spark.testsuite.datacompaction
-
-import java.io.File
-
-import org.apache.carbondata.core.updatestatus.SegmentStatusManager
-import org.apache.carbondata.locks.{LockUsage, CarbonLockFactory, ICarbonLock}
-
-import scala.collection.JavaConverters._
-
-import org.apache.spark.sql.common.util.CarbonHiveContext._
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.carbon.path.{CarbonStorePath, CarbonTablePath}
-import org.apache.carbondata.core.carbon.{AbsoluteTableIdentifier, CarbonTableIdentifier}
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-
-/**
-  * FT for data compaction Locking scenario.
-  */
-class DataCompactionLockTest extends QueryTest with BeforeAndAfterAll {
-
-  val absoluteTableIdentifier: AbsoluteTableIdentifier = new
-      AbsoluteTableIdentifier(
-        CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION),
-        new CarbonTableIdentifier(
-          CarbonCommonConstants.DATABASE_DEFAULT_NAME, "compactionlocktesttable", "1")
-      )
-  val carbonTablePath: CarbonTablePath = CarbonStorePath
-    .getCarbonTablePath(absoluteTableIdentifier.getStorePath,
-      absoluteTableIdentifier.getCarbonTableIdentifier
-    )
-  val dataPath: String = carbonTablePath.getMetadataDirectoryPath
-
-  val carbonLock: ICarbonLock =
-    CarbonLockFactory
-      .getCarbonLockObj(absoluteTableIdentifier.getCarbonTableIdentifier, LockUsage.COMPACTION_LOCK)
-
-  override def beforeAll {
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.ENABLE_CONCURRENT_COMPACTION, "true")
-    sql("drop table if exists  compactionlocktesttable")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
-    sql(
-      "CREATE TABLE IF NOT EXISTS compactionlocktesttable (country String, ID Int, date " +
-        "Timestamp, name " +
-        "String, " +
-        "phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata" +
-        ".format'"
-    )
-
-    val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
-      .getCanonicalPath
-    var csvFilePath1 = currentDirectory + "/src/test/resources/compaction/compaction1.csv"
-
-    var csvFilePath2 = currentDirectory + "/src/test/resources/compaction/compaction2.csv"
-    var csvFilePath3 = currentDirectory + "/src/test/resources/compaction/compaction3.csv"
-
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE compactionlocktesttable " +
-      "OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE compactionlocktesttable  " +
-      "OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath3 + "' INTO TABLE compactionlocktesttable  " +
-      "OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    // take the lock so that next compaction will be failed.
-    carbonLock.lockWithRetries()
-
-    // compaction should happen here.
-    try{
-      sql("alter table compactionlocktesttable compact 'major'")
-    }
-    catch {
-      case e : Exception =>
-        assert(true)
-    }
-  }
-
-  /**
-    * Compaction should fail as lock is being held purposefully
-    */
-  test("check if compaction is failed or not.") {
-
-    val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(
-      absoluteTableIdentifier
-    )
-    val segments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList
-
-    if (!segments.contains("0.1")) {
-      assert(true)
-    }
-    else {
-      assert(false)
-    }
-  }
-
-
-  override def afterAll {
-    sql("drop table if exists  compactionlocktesttable")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-    carbonLock.unlock()
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionMinorThresholdTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionMinorThresholdTest.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionMinorThresholdTest.scala
index 257f382..4fba844 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionMinorThresholdTest.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionMinorThresholdTest.scala
@@ -18,30 +18,20 @@
  */
 package org.apache.carbondata.spark.testsuite.datacompaction
 
-import java.io.File
-
-import org.apache.carbondata.core.updatestatus.SegmentStatusManager
-
 import scala.collection.JavaConverters._
 
-import org.apache.spark.sql.common.util.CarbonHiveContext._
 import org.apache.spark.sql.common.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
 
-import org.apache.carbondata.core.carbon.path.{CarbonStorePath, CarbonTablePath}
 import org.apache.carbondata.core.carbon.{AbsoluteTableIdentifier, CarbonTableIdentifier}
 import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.updatestatus.SegmentStatusManager
 import org.apache.carbondata.core.util.CarbonProperties
 
 /**
  * FT for data compaction Minor threshold verification.
  */
 class DataCompactionMinorThresholdTest extends QueryTest with BeforeAndAfterAll {
-  val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
-    .getCanonicalPath
-  val resource = currentDirectory + "/src/test/resources/"
-
-  val storeLocation = new File(this.getClass.getResource("/").getPath + "/../test").getCanonicalPath
   val carbonTableIdentifier: CarbonTableIdentifier =
     new CarbonTableIdentifier("default", "minorthreshold".toLowerCase(), "1")
 
@@ -61,12 +51,9 @@ class DataCompactionMinorThresholdTest extends QueryTest with BeforeAndAfterAll
       ".format'"
     )
 
-    val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
-      .getCanonicalPath
-    var csvFilePath1 = currentDirectory + "/src/test/resources/compaction/compaction1.csv"
-
-    var csvFilePath2 = currentDirectory + "/src/test/resources/compaction/compaction2.csv"
-    var csvFilePath3 = currentDirectory + "/src/test/resources/compaction/compaction3.csv"
+    val csvFilePath1 = s"$resourcesPath/compaction/compaction1.csv"
+    val csvFilePath2 = s"$resourcesPath/compaction/compaction2.csv"
+    val csvFilePath3 = s"$resourcesPath/compaction/compaction3.csv"
 
     sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE minorthreshold " +
         "OPTIONS" +
@@ -106,8 +93,8 @@ class DataCompactionMinorThresholdTest extends QueryTest with BeforeAndAfterAll
     assert(!segments.contains("3"))
   }
 
-
   override def afterAll {
+    sql("drop table if exists  minorthreshold")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
     CarbonProperties.getInstance()

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala
index 42f9cfb..a378abe 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala
@@ -18,19 +18,16 @@
  */
 package org.apache.carbondata.spark.testsuite.datacompaction
 
-import java.io.File
-
-import org.apache.carbondata.core.updatestatus.SegmentStatusManager
-
 import scala.collection.JavaConverters._
 
 import org.apache.spark.sql.Row
-import org.apache.spark.sql.common.util.CarbonHiveContext._
 import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
 import org.apache.carbondata.core.carbon.{AbsoluteTableIdentifier, CarbonTableIdentifier}
 import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.updatestatus.SegmentStatusManager
 import org.apache.carbondata.core.util.CarbonProperties
-import org.scalatest.BeforeAndAfterAll
 
 /**
   * FT for data compaction scenario.
@@ -47,12 +44,9 @@ class DataCompactionNoDictionaryTest extends QueryTest with BeforeAndAfterAll {
     segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList
   }
 
-  val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
-    .getCanonicalPath
-
-  var csvFilePath1 = currentDirectory + "/src/test/resources/compaction/compaction1.csv"
-  var csvFilePath2 = currentDirectory + "/src/test/resources/compaction/compaction2.csv"
-  var csvFilePath3 = currentDirectory + "/src/test/resources/compaction/compaction3.csv"
+  var csvFilePath1 = s"$resourcesPath/compaction/compaction1.csv"
+  var csvFilePath2 = s"$resourcesPath/compaction/compaction2.csv"
+  var csvFilePath3 = s"$resourcesPath/compaction/compaction3.csv"
 
   override def beforeAll {
     CarbonProperties.getInstance()

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
index 3d2e4fe..66be271 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
@@ -18,19 +18,15 @@
  */
 package org.apache.carbondata.spark.testsuite.datacompaction
 
-import java.io.File
-
-import org.apache.carbondata.core.updatestatus.SegmentStatusManager
-
 import scala.collection.JavaConverters._
 
 import org.apache.spark.sql.Row
-import org.apache.spark.sql.common.util.CarbonHiveContext._
 import org.apache.spark.sql.common.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
 
 import org.apache.carbondata.core.carbon.{AbsoluteTableIdentifier, CarbonTableIdentifier}
 import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.updatestatus.SegmentStatusManager
 import org.apache.carbondata.core.util.CarbonProperties
 
 /**
@@ -50,13 +46,9 @@ class DataCompactionTest extends QueryTest with BeforeAndAfterAll {
         ".format'"
     )
 
-
-    val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
-      .getCanonicalPath
-    val csvFilePath1 = currentDirectory + "/src/test/resources/compaction/compaction1.csv"
-
-    val csvFilePath2 = currentDirectory + "/src/test/resources/compaction/compaction2.csv"
-    val csvFilePath3 = currentDirectory + "/src/test/resources/compaction/compaction3.csv"
+    val csvFilePath1 = s"$resourcesPath/compaction/compaction1.csv"
+    val csvFilePath2 = s"$resourcesPath/compaction/compaction2.csv"
+    val csvFilePath3 = s"$resourcesPath/compaction/compaction3.csv"
 
     sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE normalcompaction OPTIONS" +
       "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionIgnoreInMinorTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionIgnoreInMinorTest.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionIgnoreInMinorTest.scala
deleted file mode 100644
index b05ca01..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionIgnoreInMinorTest.scala
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.carbondata.spark.testsuite.datacompaction
-
-import java.io.File
-
-import scala.collection.JavaConverters._
-
-import org.apache.carbondata.core.carbon.path.CarbonStorePath
-import org.apache.carbondata.core.updatestatus.SegmentStatusManager
-import org.apache.spark.sql.Row
-import org.apache.spark.sql.common.util.CarbonHiveContext._
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.carbon.{AbsoluteTableIdentifier, CarbonTableIdentifier}
-import org.apache.carbondata.core.carbon.datastore.TableSegmentUniqueIdentifier
-import org.apache.carbondata.core.carbon.datastore.block.SegmentTaskIndexWrapper
-import org.apache.carbondata.core.carbon.path.CarbonStorePath
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-import org.apache.carbondata.hadoop.CacheClient
-
-/**
-  * FT for compaction scenario where major segment should not be included in minor.
-  */
-class MajorCompactionIgnoreInMinorTest extends QueryTest with BeforeAndAfterAll {
-
-  override def beforeAll {
-    CarbonProperties.getInstance().addProperty("carbon.compaction.level.threshold", "2,2")
-    sql("drop table if exists  ignoremajor")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
-    sql(
-      "CREATE TABLE IF NOT EXISTS ignoremajor (country String, ID Int, date Timestamp, name " +
-        "String, " +
-        "phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata" +
-        ".format'"
-    )
-
-
-    val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
-      .getCanonicalPath
-    val csvFilePath1 = currentDirectory + "/src/test/resources/compaction/compaction1.csv"
-
-    val csvFilePath2 = currentDirectory + "/src/test/resources/compaction/compaction2.csv"
-    val csvFilePath3 = currentDirectory + "/src/test/resources/compaction/compaction3.csv"
-
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE ignoremajor OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE ignoremajor  OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    // compaction will happen here.
-    sql("alter table ignoremajor compact 'major'"
-    )
-      sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE ignoremajor OPTIONS" +
-        "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-      )
-      sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE ignoremajor  OPTIONS" +
-        "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-      )
-      sql("alter table ignoremajor compact 'minor'"
-      )
-
-  }
-
-
-  /**
-    * Test whether major compaction is not included in minor compaction.
-    */
-  test("delete merged folder and check segments") {
-    // delete merged segments
-    sql("clean files for table ignoremajor")
-    sql("select * from ignoremajor").show()
-    val identifier = new AbsoluteTableIdentifier(
-          CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION),
-          new CarbonTableIdentifier(
-            CarbonCommonConstants.DATABASE_DEFAULT_NAME, "ignoremajor", "rrr")
-        )
-    val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(identifier)
-
-    // merged segment should not be there
-    val segments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList
-    assert(segments.contains("0.1"))
-    assert(segments.contains("2.1"))
-    assert(!segments.contains("2"))
-    assert(!segments.contains("3"))
-    val cacheClient = new CacheClient(CarbonProperties.getInstance.
-      getProperty(CarbonCommonConstants.STORE_LOCATION));
-    val segmentIdentifier = new TableSegmentUniqueIdentifier(identifier, "2")
-    val wrapper: SegmentTaskIndexWrapper = cacheClient.getSegmentAccessClient.
-      getIfPresent(segmentIdentifier)
-    assert(null == wrapper)
-
-  }
-
-  /**
-    * Delete should not work on compacted segment.
-    */
-  test("delete compacted segment and check status") {
-    try {
-      sql("delete segment 2 from table ignoremajor")
-      assert(false)
-    }
-    catch {
-      case _:Throwable => assert(true)
-    }
-    val carbontablePath = CarbonStorePath
-      .getCarbonTablePath(CarbonProperties.getInstance
-        .getProperty(CarbonCommonConstants.STORE_LOCATION),
-        new CarbonTableIdentifier(
-          CarbonCommonConstants.DATABASE_DEFAULT_NAME, "ignoremajor", "rrr")
-      )
-      .getMetadataDirectoryPath
-    val segs = SegmentStatusManager.readLoadMetadata(carbontablePath)
-
-    // status should remain as compacted.
-    assert(segs(3).getLoadStatus.equalsIgnoreCase(CarbonCommonConstants.COMPACTED))
-
-  }
-
-  /**
-    * Delete should not work on compacted segment.
-    */
-  test("delete compacted segment by date and check status") {
-    sql(
-      "DELETE SEGMENTS FROM TABLE ignoremajor where STARTTIME before" +
-        " '2222-01-01 19:35:01'"
-    )
-    val carbontablePath = CarbonStorePath
-      .getCarbonTablePath(CarbonProperties.getInstance
-        .getProperty(CarbonCommonConstants.STORE_LOCATION),
-        new CarbonTableIdentifier(
-          CarbonCommonConstants.DATABASE_DEFAULT_NAME, "ignoremajor", "rrr")
-      )
-      .getMetadataDirectoryPath
-    val segs = SegmentStatusManager.readLoadMetadata(carbontablePath)
-
-    // status should remain as compacted for segment 2.
-    assert(segs(3).getLoadStatus.equalsIgnoreCase(CarbonCommonConstants.COMPACTED))
-    // for segment 0.1 . should get deleted
-    assert(segs(2).getLoadStatus.equalsIgnoreCase(CarbonCommonConstants.MARKED_FOR_DELETE))
-
-  }
-
-  override def afterAll {
-    sql("drop table if exists  ignoremajor")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionStopsAfterCompaction.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionStopsAfterCompaction.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionStopsAfterCompaction.scala
deleted file mode 100644
index f816c15..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionStopsAfterCompaction.scala
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.carbondata.spark.testsuite.datacompaction
-
-import java.io.File
-
-import org.apache.carbondata.core.updatestatus.SegmentStatusManager
-import org.apache.spark.sql.common.util.CarbonHiveContext._
-import org.apache.spark.sql.common.util.QueryTest
-import org.apache.carbondata.core.carbon.{AbsoluteTableIdentifier, CarbonTableIdentifier}
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-import org.scalatest.BeforeAndAfterAll
-
-import scala.collection.JavaConverters._
-
-/**
-  * FT for compaction scenario where major compaction will only compact the segments which are
-  * present at the time of triggering the compaction.
-  */
-class MajorCompactionStopsAfterCompaction extends QueryTest with BeforeAndAfterAll {
-
-  override def beforeAll {
-    sql("drop table if exists  stopmajor")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
-    sql(
-      "CREATE TABLE IF NOT EXISTS stopmajor (country String, ID decimal(7,4), date Timestamp, name " +
-        "String, " +
-        "phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata" +
-        ".format'"
-    )
-
-
-    val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
-      .getCanonicalPath
-    val csvFilePath1 = currentDirectory + "/src/test/resources/compaction/compaction1.csv"
-
-    val csvFilePath2 = currentDirectory + "/src/test/resources/compaction/compaction2.csv"
-    val csvFilePath3 = currentDirectory + "/src/test/resources/compaction/compaction3.csv"
-
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE stopmajor OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE stopmajor  OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    // compaction will happen here.
-    sql("alter table stopmajor compact 'major'"
-    )
-    Thread.sleep(2000)
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE stopmajor OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE stopmajor  OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    if (checkCompactionCompletedOrNot("0.1")) {
-    }
-
-  }
-
-  /**
-    * Check if the compaction is completed or not.
-    *
-    * @param requiredSeg
-    * @return
-    */
-  def checkCompactionCompletedOrNot(requiredSeg: String): Boolean = {
-    var status = false
-    var noOfRetries = 0
-    while (!status && noOfRetries < 10) {
-
-      val identifier = new AbsoluteTableIdentifier(
-            CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION),
-            new CarbonTableIdentifier(
-              CarbonCommonConstants.DATABASE_DEFAULT_NAME, "stopmajor", noOfRetries + "")
-          )
-
-      val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(identifier)
-
-      val segments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList
-      segments.foreach(seg =>
-        System.out.println( "valid segment is =" + seg)
-      )
-
-      if (!segments.contains(requiredSeg)) {
-        // wait for 2 seconds for compaction to complete.
-        System.out.println("sleping for 2 seconds.")
-        Thread.sleep(2000)
-        noOfRetries += 1
-      }
-      else {
-        status = true
-      }
-    }
-    return status
-  }
-
-  /**
-    * Test whether major compaction is not included in minor compaction.
-    */
-  test("delete merged folder and check segments") {
-    // delete merged segments
-    sql("clean files for table stopmajor")
-
-    val identifier = new AbsoluteTableIdentifier(
-          CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION),
-          new CarbonTableIdentifier(CarbonCommonConstants.DATABASE_DEFAULT_NAME, "stopmajor", "rrr")
-        )
-
-    val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(identifier)
-
-    // merged segment should not be there
-    val segments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList
-    assert(segments.contains("0.1"))
-    assert(!segments.contains("0.2"))
-    assert(!segments.contains("0"))
-    assert(!segments.contains("1"))
-    assert(segments.contains("2"))
-    assert(segments.contains("3"))
-
-  }
-
-  override def afterAll {
-    sql("drop table if exists  stopmajor")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadPartitionCoalescer.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadPartitionCoalescer.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadPartitionCoalescer.scala
deleted file mode 100644
index 3aa2575..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadPartitionCoalescer.scala
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.carbondata.spark.testsuite.dataload
-
-import org.apache.hadoop.fs.Path
-import org.apache.hadoop.mapreduce.lib.input.FileSplit
-import org.apache.spark.{Partition, SerializableWritable, SparkContext, TaskContext}
-import org.apache.spark.rdd.{DataLoadPartitionCoalescer, RDD}
-import org.apache.spark.sql.Row
-import org.apache.spark.sql.catalyst.expressions.GenericRow
-import org.apache.spark.sql.common.util.CarbonHiveContext._
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-class TestDataLoadPartitionCoalescer extends QueryTest with BeforeAndAfterAll {
-  var nodeList: Array[String] = _
-
-  class DummyPartition(val index: Int,
-                       rawSplit: FileSplit) extends Partition {
-    val serializableHadoopSplit = new SerializableWritable(rawSplit)
-  }
-
-  class Dummy(sc: SparkContext, partitions: Array[Partition]) extends RDD[Row](sc, Nil) {
-    override def compute(split: Partition, context: TaskContext): Iterator[Row] = {
-      new Iterator[Row] {
-        var isFirst = true;
-        override def hasNext: Boolean = isFirst;
-
-        override def next(): Row = {
-          isFirst = false
-          new GenericRow(Array[Any]())
-        }
-      }
-    }
-
-    override protected def getPartitions: Array[Partition] = partitions
-
-    override protected def getPreferredLocations(split: Partition): Seq[String] = {
-      split.asInstanceOf[DummyPartition].serializableHadoopSplit.value.getLocations.toSeq
-    }
-
-  }
-
-  override def beforeAll: Unit = {
-    nodeList = Array("host1", "host2", "host3")
-
-  }
-
-  def createPartition(index: Int, file: String, hosts: Array[String]) : Partition = {
-    new DummyPartition(index, new FileSplit(new Path(file), 0, 1, hosts))
-  }
-
-  def repartition(parts: Array[Partition]): Array[Partition] = {
-    new DataLoadPartitionCoalescer(new Dummy(sparkContext, parts), nodeList).run
-  }
-
-  def checkPartition(prevParts: Array[Partition], parts: Array[Partition]): Unit = {
-    DataLoadPartitionCoalescer.checkPartition(prevParts, parts)
-  }
-
-  test("test number of partitions is more than nodes's") {
-    val prevParts = Array[Partition](
-      createPartition(0, "1.csv", Array("host1", "host2", "host3")),
-      createPartition(1, "2.csv", Array("host1", "host2", "host3")),
-      createPartition(2, "3.csv", Array("host1", "host2", "host3")),
-      createPartition(3, "4.csv", Array("host1", "host2", "host3")),
-      createPartition(4, "5.csv", Array("host1", "host2", "host3"))
-    )
-    val parts = repartition(prevParts)
-    assert(parts.size == 3)
-    checkPartition(prevParts, parts)
-  }
-
-  test("test number of partitions equals nodes's") {
-    val prevParts = Array[Partition](
-      createPartition(0, "1.csv", Array("host1", "host2", "host3")),
-      createPartition(1, "2.csv", Array("host1", "host2", "host3")),
-      createPartition(2, "3.csv", Array("host1", "host2", "host3"))
-    )
-    val parts = repartition(prevParts)
-    assert(parts.size == 3)
-    checkPartition(prevParts, parts)
-  }
-
-  test("test number of partitions is less than nodes's") {
-    val prevParts = Array[Partition](
-      createPartition(0, "1.csv", Array("host1", "host2", "host3")),
-      createPartition(1, "2.csv", Array("host1", "host2", "host3"))
-    )
-    val parts = repartition(prevParts)
-    assert(parts.size == 2)
-    checkPartition(prevParts, parts)
-  }
-
-  test("all partitions are locality") {
-    val prevParts = Array[Partition](
-      createPartition(0, "1.csv", Array("host1", "host2", "host3")),
-      createPartition(1, "2.csv", Array("host1", "host2", "host3"))
-    )
-    val parts = repartition(prevParts)
-    assert(parts.size == 2)
-    checkPartition(prevParts, parts)
-  }
-
-  test("part of partitions are locality1") {
-    val prevParts = Array[Partition](
-      createPartition(0, "1.csv", Array("host1", "host2", "host3")),
-      createPartition(1, "2.csv", Array("host1", "host2", "host4")),
-      createPartition(2, "3.csv", Array("host4", "host5", "host6"))
-    )
-    val parts = repartition(prevParts)
-    assert(parts.size == 3)
-    checkPartition(prevParts, parts)
-  }
-
-  test("part of partitions are locality2") {
-    val prevParts = Array[Partition](
-      createPartition(0, "1.csv", Array("host1", "host2", "host3")),
-      createPartition(1, "2.csv", Array("host1", "host2", "host4")),
-      createPartition(2, "3.csv", Array("host3", "host5", "host6"))
-    )
-    val parts = repartition(prevParts)
-    assert(parts.size == 3)
-    checkPartition(prevParts, parts)
-  }
-
-  test("part of partitions are locality3") {
-    val prevParts = Array[Partition](
-      createPartition(0, "1.csv", Array("host1", "host2", "host7")),
-      createPartition(1, "2.csv", Array("host1", "host2", "host4")),
-      createPartition(2, "3.csv", Array("host4", "host5", "host6"))
-    )
-    val parts = repartition(prevParts)
-    assert(parts.size == 3)
-    checkPartition(prevParts, parts)
-  }
-
-  test("all partition are not locality") {
-    val prevParts = Array[Partition](
-      createPartition(0, "1.csv", Array()),
-      createPartition(1, "2.csv", Array()),
-      createPartition(2, "3.csv", Array("host4", "host5", "host6"))
-    )
-    val parts = repartition(prevParts)
-    assert(parts.size == 3)
-    checkPartition(prevParts, parts)
-  }
-
-  override def afterAll {
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadWithColumnsMoreThanSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadWithColumnsMoreThanSchema.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadWithColumnsMoreThanSchema.scala
deleted file mode 100644
index 931e4e0..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadWithColumnsMoreThanSchema.scala
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.carbondata.spark.testsuite.dataload
-
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-import org.apache.spark.sql.common.util.CarbonHiveContext._
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
-
-/**
- * This class will test data load in which number of columns in data are more than
- * the number of columns in schema
- */
-class TestDataLoadWithColumnsMoreThanSchema extends QueryTest with BeforeAndAfterAll {
-
-  override def beforeAll {
-    sql("DROP TABLE IF EXISTS char_test")
-    sql("DROP TABLE IF EXISTS hive_char_test")
-    sql("CREATE TABLE char_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
-    sql("CREATE TABLE hive_char_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)row format delimited fields terminated by ','")
-    sql("LOAD DATA LOCAL INPATH './src/test/resources/character_carbon.csv' into table char_test")
-    sql("LOAD DATA local inpath './src/test/resources/character_hive.csv' INTO table hive_char_test")
-  }
-
-  test("test count(*) to check for data loss") {
-    checkAnswer(sql("select count(*) from char_test"),
-      sql("select count(*) from hive_char_test"))
-  }
-
-  test("test for invalid value of maxColumns") {
-    sql("DROP TABLE IF EXISTS max_columns_test")
-    sql("CREATE TABLE max_columns_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
-    try {
-      sql("LOAD DATA LOCAL INPATH './src/test/resources/character_carbon.csv' into table max_columns_test options('MAXCOLUMNS'='avfgd')")
-      assert(false)
-    } catch {
-      case _: Throwable => assert(true)
-    }
-  }
-
-  test("test for valid value of maxColumns") {
-    sql("DROP TABLE IF EXISTS valid_max_columns_test")
-    sql("CREATE TABLE valid_max_columns_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
-    try {
-      sql("LOAD DATA LOCAL INPATH './src/test/resources/character_carbon.csv' into table valid_max_columns_test options('MAXCOLUMNS'='400')")
-      checkAnswer(sql("select count(*) from valid_max_columns_test"),
-        sql("select count(*) from hive_char_test"))
-    } catch {
-      case _: Throwable => assert(false)
-    }
-  }
-
-  test("test with invalid maxColumns value") {
-    sql(
-      "CREATE TABLE max_columns_value_test (imei string,age int,task bigint,num double,level " +
-      "decimal(10,3),productdate timestamp,mark int,name string) STORED BY 'org.apache.carbondata" +
-      ".format'")
-    try {
-      sql(
-        "LOAD DATA LOCAL INPATH './src/test/resources/character_carbon.csv' into table " +
-        "max_columns_value_test options('FILEHEADER='imei,age','MAXCOLUMNS'='2')")
-      throw new MalformedCarbonCommandException("Invalid")
-    } catch {
-      case me: MalformedCarbonCommandException =>
-        assert(false)
-      case _: Throwable => assert(true)
-    }
-  }
-
-  test("test for maxcolumns option value greater than threshold value for maxcolumns") {
-    sql("DROP TABLE IF EXISTS valid_max_columns_test")
-    sql("CREATE TABLE valid_max_columns_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
-    try {
-      sql("LOAD DATA LOCAL INPATH './src/test/resources/character_carbon.csv' into table valid_max_columns_test options('MAXCOLUMNS'='22000')")
-      checkAnswer(sql("select count(*) from valid_max_columns_test"),
-        sql("select count(*) from hive_char_test"))
-    } catch {
-      case _: Throwable => assert(false)
-    }
-  }
-
-  test("test for boundary value for maxcolumns") {
-    sql("DROP TABLE IF EXISTS boundary_max_columns_test")
-    sql("CREATE TABLE boundary_max_columns_test (empno string, empname String, designation String, doj String, " +
-        "workgroupcategory string, workgroupcategoryname String, deptno string, deptname String, " +
-        "projectcode string, projectjoindate String, projectenddate String,attendance double," +
-        "utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES" +
-        "('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory," +
-        "workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')")
-    try {
-      sql("LOAD DATA LOCAL INPATH './src/test/resources/data.csv' into table boundary_max_columns_test options('MAXCOLUMNS'='14')")
-      assert(true)
-    } catch {
-      case _: Throwable => assert(false)
-    }
-  }
-
-  test("test for maxcolumns value less than columns in 1st line of csv file") {
-    sql("DROP TABLE IF EXISTS boundary_max_columns_test")
-    sql("CREATE TABLE boundary_max_columns_test (empno string, empname String, designation String, doj String, " +
-        "workgroupcategory string, workgroupcategoryname String, deptno string, deptname String, " +
-        "projectcode string, projectjoindate String, projectenddate String,attendance double," +
-        "utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES" +
-        "('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory," +
-        "workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')")
-    try {
-      sql("LOAD DATA LOCAL INPATH './src/test/resources/data.csv' into table boundary_max_columns_test options('MAXCOLUMNS'='13')")
-      assert(true)
-    } catch {
-      case _: Throwable => assert(false)
-    }
-  }
-
-  override def afterAll {
-    sql("DROP TABLE IF EXISTS char_test")
-    sql("DROP TABLE IF EXISTS hive_char_test")
-    sql("DROP TABLE IF EXISTS max_columns_value_test")
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataWithDicExcludeAndInclude.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataWithDicExcludeAndInclude.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataWithDicExcludeAndInclude.scala
deleted file mode 100644
index 4224856..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataWithDicExcludeAndInclude.scala
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.carbondata.spark.testsuite.dataload
-
-import java.io.File
-
-import org.apache.spark.sql.Row
-import org.apache.spark.sql.common.util.CarbonHiveContext._
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-
-
-class TestLoadDataWithDictionaryExcludeAndInclude extends QueryTest with BeforeAndAfterAll {
-  var filePath: String = _
-  var pwd: String = _
-
-  def buildTestData() = {
-    pwd = new File(this.getClass.getResource("/").getPath + "/../../").getCanonicalPath
-    filePath = pwd + "/src/test/resources/emptyDimensionData.csv"
-  }
-
-  def dropTable() = {
-    sql("DROP TABLE IF EXISTS exclude_include_t3")
-    sql("DROP TABLE IF EXISTS exclude_include_hive_t3")
-  }
-
-  def buildTable() = {
-    try {
-      sql(
-        """
-           CREATE TABLE exclude_include_hive_t3
-           (ID Int, date Timestamp, country String,
-           name String, phonetype String, serialname String, salary Int)
-           row format delimited fields terminated by ','
-        """)
-      sql(
-        """
-           CREATE TABLE exclude_include_t3
-           (ID Int, date Timestamp, country String,
-           name String, phonetype String, serialname String, salary Int)
-           STORED BY 'org.apache.carbondata.format'
-           TBLPROPERTIES('DICTIONARY_EXCLUDE'='country,phonetype,serialname',
-           'DICTIONARY_INCLUDE'='ID')
-        """)
-    } catch {
-      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
-    }
-  }
-
-  def loadTable() = {
-    try {
-      CarbonProperties.getInstance()
-        .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
-      sql(
-        s"""
-           LOAD DATA LOCAL INPATH '$filePath' into table exclude_include_t3
-           """)
-      sql(
-        s"""
-           LOAD DATA LOCAL INPATH './src/test/resources/emptyDimensionDataHive.csv' into table exclude_include_hive_t3
-           """)
-    } catch {
-      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
-    }
-  }
-
-  override def beforeAll {
-    dropTable
-    buildTestData
-    buildTable
-    loadTable
-  }
-
-  test("test load data with dictionary exclude & include and with empty dimension") {
-    checkAnswer(
-      sql("select ID from exclude_include_t3"), sql("select ID from exclude_include_hive_t3")
-    )
-  }
-
-  override def afterAll {
-    dropTable
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
deleted file mode 100644
index 245a5e8..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.carbondata.spark.testsuite.dataload
-
-import org.apache.spark.sql.{DataFrame, Row, SaveMode}
-import org.apache.spark.sql.common.util.CarbonHiveContext._
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
-  var df: DataFrame = _
-
-  def buildTestData() = {
-    import implicits._
-    df = sc.parallelize(1 to 1000)
-      .map(x => ("a", "b", x))
-      .toDF("c1", "c2", "c3")
-  }
-
-  def dropTable() = {
-    sql("DROP TABLE IF EXISTS carbon1")
-    sql("DROP TABLE IF EXISTS carbon2")
-    sql("DROP TABLE IF EXISTS carbon3")
-  }
-
-
-
-  override def beforeAll {
-    dropTable
-    buildTestData
-  }
-
-  test("test load dataframe with saving compressed csv files") {
-    // save dataframe to carbon file
-    df.write
-      .format("carbondata")
-      .option("tableName", "carbon1")
-      .option("tempCSV", "true")
-      .option("compress", "true")
-      .mode(SaveMode.Overwrite)
-      .save()
-    checkAnswer(
-      sql("select count(*) from carbon1 where c3 > 500"), Row(500)
-    )
-  }
-
-  test("test load dataframe with saving csv uncompressed files") {
-    // save dataframe to carbon file
-    df.write
-      .format("carbondata")
-      .option("tableName", "carbon2")
-      .option("tempCSV", "true")
-      .option("compress", "false")
-      .mode(SaveMode.Overwrite)
-      .save()
-    checkAnswer(
-      sql("select count(*) from carbon2 where c3 > 500"), Row(500)
-    )
-  }
-
-  test("test load dataframe without saving csv files") {
-    // save dataframe to carbon file
-    df.write
-      .format("carbondata")
-      .option("tableName", "carbon3")
-      .option("tempCSV", "false")
-      .mode(SaveMode.Overwrite)
-      .save()
-    checkAnswer(
-      sql("select count(*) from carbon3 where c3 > 500"), Row(500)
-    )
-  }
-
-  override def afterAll {
-    dropTable
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala
deleted file mode 100644
index 143b386..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.carbondata.spark.testsuite.dataload
-
-import org.apache.carbondata.processing.etl.DataLoadingException
-import org.apache.spark.sql.common.util.CarbonHiveContext._
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-class TestLoadDataUseAllDictionary extends QueryTest with BeforeAndAfterAll{
-  override def beforeAll {
-    sql("DROP TABLE IF EXISTS t3")
-    sql("""
-           CREATE TABLE IF NOT EXISTS t3
-           (ID Int, date Timestamp, country String,
-           name String, phonetype String, serialname String, salary Int)
-           STORED BY 'carbondata'
-           """)
-  }
-
-  test("test load data use all dictionary, and given wrong format dictionary values") {
-    try {
-      sql(s"""
-           LOAD DATA LOCAL INPATH './src/test/resources/windows.csv' into table t3
-           options('FILEHEADER'='id,date,country,name,phonetype,serialname,salary',
-           'All_DICTIONARY_PATH'='./src/test/resources/dict.txt')
-           """)
-      assert(false)
-    } catch {
-      case e: DataLoadingException =>
-        assert(e.getMessage.equals("Data Loading failure, dictionary values are " +
-          "not in correct format!"))
-    }
-  }
-
-  override def afterAll {
-    sql("DROP TABLE IF EXISTS t3")
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithDiffTimestampFormat.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithDiffTimestampFormat.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithDiffTimestampFormat.scala
deleted file mode 100644
index 3fe3c21..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithDiffTimestampFormat.scala
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.carbondata.spark.testsuite.dataload
-
-import org.apache.spark.sql.common.util.CarbonHiveContext._
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-import java.sql.Timestamp
-
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
-import org.apache.spark.sql.Row
-
-class TestLoadDataWithDiffTimestampFormat extends QueryTest with BeforeAndAfterAll {
-  override def beforeAll {
-    sql("DROP TABLE IF EXISTS t3")
-    sql("""
-           CREATE TABLE IF NOT EXISTS t3
-           (ID Int, date Timestamp, starttime Timestamp, country String,
-           name String, phonetype String, serialname String, salary Int)
-           STORED BY 'carbondata'
-        """)
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
-  }
-
-  test("test load data with different timestamp format") {
-      sql(s"""
-           LOAD DATA LOCAL INPATH './src/test/resources/timeStampFormatData1.csv' into table t3
-           OPTIONS('dateformat' = 'starttime:yyyy-MM-dd HH:mm:ss')
-           """)
-      sql(s"""
-           LOAD DATA LOCAL INPATH './src/test/resources/timeStampFormatData2.csv' into table t3
-           OPTIONS('dateformat' = ' date : yyyy-MM-dd , StartTime : yyyy/MM/dd HH:mm:ss')
-           """)
-      checkAnswer(
-        sql("SELECT date FROM t3 WHERE ID = 1"),
-        Seq(Row(Timestamp.valueOf("2015-07-23 00:00:00.0")))
-      )
-      checkAnswer(
-        sql("SELECT starttime FROM t3 WHERE ID = 1"),
-        Seq(Row(Timestamp.valueOf("2016-07-23 01:01:30.0")))
-      )
-      checkAnswer(
-        sql("SELECT date FROM t3 WHERE ID = 18"),
-        Seq(Row(Timestamp.valueOf("2015-07-25 00:00:00.0")))
-      )
-      checkAnswer(
-        sql("SELECT starttime FROM t3 WHERE ID = 18"),
-        Seq(Row(Timestamp.valueOf("2016-07-25 02:32:02.0")))
-      )
-  }
-
-  test("test load data with different timestamp format with wrong setting") {
-    try {
-      sql(s"""
-           LOAD DATA LOCAL INPATH './src/test/resources/timeStampFormatData1.csv' into table t3
-           OPTIONS('dateformat' = '')
-           """)
-      assert(false)
-    } catch {
-      case ex: MalformedCarbonCommandException =>
-        assertResult(ex.getMessage)("Error: Option DateFormat is set an empty string.")
-      case _ => assert(false)
-    }
-
-    try {
-      sql(s"""
-           LOAD DATA LOCAL INPATH './src/test/resources/timeStampFormatData1.csv' into table t3
-           OPTIONS('dateformat' = 'fasfdas:yyyy/MM/dd')
-           """)
-      assert(false)
-    } catch {
-      case ex: MalformedCarbonCommandException =>
-        assertResult(ex.getMessage)("Error: Wrong Column Name fasfdas is provided in Option DateFormat.")
-      case _ => assert(false)
-    }
-
-    try {
-      sql(s"""
-           LOAD DATA LOCAL INPATH './src/test/resources/timeStampFormatData1.csv' into table t3
-           OPTIONS('dateformat' = 'date:  ')
-           """)
-      assert(false)
-    } catch {
-      case ex: MalformedCarbonCommandException =>
-        assertResult(ex.getMessage)("Error: Option DateFormat is not provided for Column date.")
-      case _ => assert(false)
-    }
-
-    try {
-      sql(s"""
-           LOAD DATA LOCAL INPATH './src/test/resources/timeStampFormatData1.csv' into table t3
-           OPTIONS('dateformat' = 'date  ')
-           """)
-      assert(false)
-    } catch {
-      case ex: MalformedCarbonCommandException =>
-        assertResult(ex.getMessage)("Error: Option DateFormat is not provided for Column date  .")
-      case _ => assert(false)
-    }
-
-    try {
-      sql(s"""
-           LOAD DATA LOCAL INPATH './src/test/resources/timeStampFormatData1.csv' into table t3
-           OPTIONS('dateformat' = ':yyyy/MM/dd  ')
-           """)
-      assert(false)
-    } catch {
-      case ex: MalformedCarbonCommandException =>
-        assertResult(ex.getMessage)("Error: Wrong Column Name  is provided in Option DateFormat.")
-      case _ => assert(false)
-    }
-
-  }
-
-  override def afterAll {
-    sql("DROP TABLE IF EXISTS t3")
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithFileHeaderException.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithFileHeaderException.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithFileHeaderException.scala
deleted file mode 100644
index d6bac33..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithFileHeaderException.scala
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.carbondata.spark.testsuite.dataload
-
-import org.apache.spark.sql.common.util.CarbonHiveContext._
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-class TestLoadDataWithFileHeaderException extends QueryTest with BeforeAndAfterAll{
-  override def beforeAll {
-    sql("DROP TABLE IF EXISTS t3")
-    sql("""
-           CREATE TABLE IF NOT EXISTS t3
-           (ID Int, date Timestamp, country String,
-           name String, phonetype String, serialname String, salary Int)
-           STORED BY 'carbondata'
-           """)
-  }
-
-  test("test load data both file and ddl without file header exception") {
-    try {
-      sql(s"""
-           LOAD DATA LOCAL INPATH './src/test/resources/windows.csv' into table t3
-           """)
-      assert(false)
-    } catch {
-      case e: Exception =>
-        assert(e.getMessage.equals("DataLoad failure: CSV File provided is not proper. " +
-          "Column names in schema and csv header are not same. CSVFile Name : windows.csv"))
-    }
-  }
-
-  test("test load data ddl provided  wrong file header exception") {
-    try {
-      sql(s"""
-           LOAD DATA LOCAL INPATH './src/test/resources/windows.csv' into table t3
-           options('fileheader'='no_column')
-           """)
-      assert(false)
-    } catch {
-      case e: Exception =>
-        assert(e.getMessage.equals("DataLoad failure: CSV header provided in DDL is not proper. " +
-          "Column names in schema and CSV header are not the same."))
-    }
-  }
-
-  override def afterAll {
-    sql("DROP TABLE IF EXISTS t3")
-  }
-}


Mime
View raw message