carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chenliang...@apache.org
Subject [1/2] incubator-carbondata git commit: CARBONDATA-343 delete some duplicated definition code
Date Sat, 29 Oct 2016 02:18:18 GMT
Repository: incubator-carbondata
Updated Branches:
  refs/heads/master 554c468ee -> d840562a6


CARBONDATA-343 delete some duplicated definition code

CARBONDATA-343 delete some duplicated definition code


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/f0895107
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/f0895107
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/f0895107

Branch: refs/heads/master
Commit: f0895107c754bfcb8a7e938771d2926080ac760b
Parents: 554c468
Author: hexiaoqiao <hexiaoqiao@meituan.com>
Authored: Fri Oct 28 22:44:57 2016 +0800
Committer: chenliang613 <chenliang613@apache.org>
Committed: Sat Oct 29 10:15:42 2016 +0800

----------------------------------------------------------------------
 .../spark/util/GlobalDictionaryUtil.scala        | 19 +++++++++----------
 1 file changed, 9 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/f0895107/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
index a1f4400..d14eedf 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
@@ -745,13 +745,12 @@ object GlobalDictionaryUtil extends Logging {
                                storePath: String,
                                dataFrame: Option[DataFrame] = None): Unit = {
     try {
-      val table = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable.getAbsoluteTableIdentifier
-        .getCarbonTableIdentifier
+      var carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
+      var carbonTableIdentifier = carbonTable.getAbsoluteTableIdentifier.getCarbonTableIdentifier
       // create dictionary folder if not exists
-      val carbonTablePath = CarbonStorePath.getCarbonTablePath(storePath, table)
+      val carbonTablePath = CarbonStorePath.getCarbonTablePath(storePath, carbonTableIdentifier)
       val dictfolderPath = carbonTablePath.getMetadataDirectoryPath
       // columns which need to generate global dictionary file
-      val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
       val dimensions = carbonTable.getDimensionByTableName(
         carbonTable.getFactTableName).asScala.toArray
       // generate global dict from pre defined column dict file
@@ -776,7 +775,7 @@ object GlobalDictionaryUtil extends Logging {
         val colDictFilePath = carbonLoadModel.getColDictFilePath
         if (colDictFilePath != null) {
           // generate predefined dictionary
-          generatePredefinedColDictionary(colDictFilePath, table,
+          generatePredefinedColDictionary(colDictFilePath, carbonTableIdentifier,
             dimensions, carbonLoadModel, sqlContext, storePath, dictfolderPath)
         }
         if (headers.length > df.columns.length) {
@@ -791,8 +790,8 @@ object GlobalDictionaryUtil extends Logging {
         if (requireDimension.nonEmpty) {
           // select column to push down pruning
           df = df.select(requireColumnNames.head, requireColumnNames.tail: _*)
-          val model = createDictionaryLoadModel(carbonLoadModel, table, requireDimension,
-            storePath, dictfolderPath, false)
+          val model = createDictionaryLoadModel(carbonLoadModel, carbonTableIdentifier,
+            requireDimension, storePath, dictfolderPath, false)
           // combine distinct value in a block and partition by column
           val inputRDD = new CarbonBlockDistinctValuesCombineRDD(df.rdd, model)
             .partitionBy(new ColumnPartitioner(model.primDimensions.length))
@@ -814,7 +813,7 @@ object GlobalDictionaryUtil extends Logging {
             if (requireDimensionForDim.length >= 1) {
               dimDataframe = dimDataframe.select(requireColumnNamesForDim.head,
                 requireColumnNamesForDim.tail: _*)
-              val modelforDim = createDictionaryLoadModel(carbonLoadModel, table,
+              val modelforDim = createDictionaryLoadModel(carbonLoadModel, carbonTableIdentifier,
                 requireDimensionForDim, storePath, dictfolderPath, false)
               val inputRDDforDim = new CarbonBlockDistinctValuesCombineRDD(
                 dimDataframe.rdd, modelforDim)
@@ -841,8 +840,8 @@ object GlobalDictionaryUtil extends Logging {
           val (requireDimension, requireColumnNames) =
             pruneDimensions(dimensions, headers, headers)
           if (requireDimension.nonEmpty) {
-            val model = createDictionaryLoadModel(carbonLoadModel, table, requireDimension,
-              storePath, dictfolderPath, false)
+            val model = createDictionaryLoadModel(carbonLoadModel, carbonTableIdentifier,
+              requireDimension, storePath, dictfolderPath, false)
             // check if dictionary files contains bad record
             val accumulator = sqlContext.sparkContext.accumulator(0)
             // read local dictionary file, and group by key


Mime
View raw message