carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jack...@apache.org
Subject [06/12] carbondata git commit: [CARBONDATA-2024][IUD] after update operation empty folder is being created for compacted segments
Date Wed, 17 Jan 2018 17:50:18 GMT
[CARBONDATA-2024][IUD] after update operation empty folder is being created for compacted segments

This closes #1797


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/2f3f3b54
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/2f3f3b54
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/2f3f3b54

Branch: refs/heads/carbonstore
Commit: 2f3f3b54056afbf6c06e7ed004ee537a8d677c71
Parents: da7bedb
Author: rahulforallp <rahul.kumar@knoldus.in>
Authored: Fri Jan 12 19:02:41 2018 +0530
Committer: Venkata Ramana G <ramana.gollamudi@huawei.com>
Committed: Wed Jan 17 15:45:37 2018 +0530

----------------------------------------------------------------------
 .../iud/UpdateCarbonTableTestCase.scala         | 25 +++++++++++++++++++-
 .../spark/rdd/CarbonDataRDDFactory.scala        |  2 ++
 2 files changed, 26 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/2f3f3b54/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
index 7f6cae4..cf4fc07 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
@@ -16,7 +16,10 @@
  */
 package org.apache.carbondata.spark.testsuite.iud
 
-import org.apache.spark.sql.{Row, SaveMode}
+import java.io.File
+
+import org.apache.spark.sql.test.Spark2TestQueryExecutor
+import org.apache.spark.sql.{CarbonEnv, Row, SaveMode}
 import org.scalatest.BeforeAndAfterAll
 
 import org.apache.carbondata.core.constants.{CarbonCommonConstants, CarbonLoadOptionConstants}
@@ -669,6 +672,26 @@ class UpdateCarbonTableTestCase extends QueryTest with BeforeAndAfterAll
{
     }
   }
 
+  test("empty folder creation after compaction and update") {
+    sql("drop table if exists t")
+    sql("create table t (c1 string, c2 string, c3 int, c4 string) stored by 'carbondata'")
+    sql("insert into t select 'asd','sdf',1,'dfg'")
+    sql("insert into t select 'asdf','sadf',2,'dafg'")
+    sql("insert into t select 'asdq','sqdf',3,'dqfg'")
+    sql("insert into t select 'aswd','sdfw',4,'dfgw'")
+    sql("insert into t select 'aesd','sdef',5,'dfge'")
+    sql("alter table t compact 'minor'")
+    sql("clean files for table t")
+    sql("delete from t where c3 = 2").show()
+    sql("update t set(c4) = ('yyy') where c3 = 3").show()
+    checkAnswer(sql("select count(*) from t where c4 = 'yyy'"), Seq(Row(1)))
+    val f = new File(dblocation + CarbonCommonConstants.FILE_SEPARATOR +
+                     CarbonCommonConstants.FILE_SEPARATOR + "t" +
+                     CarbonCommonConstants.FILE_SEPARATOR + "Fact" +
+                     CarbonCommonConstants.FILE_SEPARATOR + "Part0")
+    assert(f.list().length == 2)
+  }
+
   override def afterAll {
     sql("use default")
     sql("drop database  if exists iud cascade")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2f3f3b54/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index b03b6fa..f37fbd7 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -595,6 +595,8 @@ object CarbonDataRDDFactory {
 
       val loadMetadataDetails = SegmentStatusManager.readLoadMetadata(
         carbonTable.getMetaDataFilepath)
+        .filter(lmd => lmd.getSegmentStatus.equals(SegmentStatus.LOAD_PARTIAL_SUCCESS)
||
+                       lmd.getSegmentStatus.equals(SegmentStatus.SUCCESS))
       val segmentIds = loadMetadataDetails.map(_.getLoadName)
       val segmentIdIndex = segmentIds.zipWithIndex.toMap
       val carbonTablePath = CarbonStorePath.getCarbonTablePath(carbonLoadModel.getTablePath,


Mime
View raw message