carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chenliang...@apache.org
Subject [1/2] incubator-carbondata git commit: Fixed distinct count issue in 1.6.2
Date Mon, 08 Aug 2016 06:48:29 GMT
Repository: incubator-carbondata
Updated Branches:
  refs/heads/master ea3169e8b -> e0b00bfc1


Fixed distinct count issue in 1.6.2


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/9244dcf5
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/9244dcf5
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/9244dcf5

Branch: refs/heads/master
Commit: 9244dcf59965de6e62767cf9e82e6dd41d6b2e66
Parents: ea3169e
Author: ravipesala <ravi.pesala@gmail.com>
Authored: Sun Aug 7 19:09:29 2016 +0530
Committer: chenliang613 <chenliang613@apache.org>
Committed: Mon Aug 8 14:47:29 2016 +0800

----------------------------------------------------------------------
 .../spark/sql/optimizer/CarbonOptimizer.scala   | 20 ++++++++++++++++++++
 .../spark/thriftserver/CarbonThriftServer.scala |  3 +++
 .../AllDataTypesTestCaseAggregate.scala         |  7 +++++++
 3 files changed, 30 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/9244dcf5/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
index d75eeda..71d920c 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
@@ -441,6 +441,11 @@ class ResolveCarbonFunctions(relations: Seq[CarbonDecoderRelation])
           }
         }
         Aggregate(grpExps, aggExps, agg.child)
+      case expand: Expand =>
+        expand.transformExpressions {
+          case attr: AttributeReference =>
+            updateDataType(attr, relations, allAttrsNotDecode, aliasMap)
+        }
       case filter: Filter =>
         val filterExps = filter.condition transform {
           case attr: AttributeReference =>
@@ -529,6 +534,21 @@ class ResolveCarbonFunctions(relations: Seq[CarbonDecoderRelation])
         }
         a
     }
+    // collect the output of expand and add projections attributes as alias to it.
+    plan.collect {
+      case expand: Expand =>
+        expand.projections.foreach {s =>
+          s.zipWithIndex.foreach { f =>
+            f._1 match {
+              case attr: AttributeReference =>
+                aliasMap.put(expand.output(f._2).toAttribute, attr)
+              case a@Alias(attr: AttributeReference, name) =>
+                aliasMap.put(expand.output(f._2).toAttribute, attr)
+              case others =>
+            }
+          }
+        }
+    }
   }
 
   // Collect aggregates on dimensions so that we can add decoder to it.

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/9244dcf5/integration/spark/src/main/scala/org/carbondata/spark/thriftserver/CarbonThriftServer.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/carbondata/spark/thriftserver/CarbonThriftServer.scala
b/integration/spark/src/main/scala/org/carbondata/spark/thriftserver/CarbonThriftServer.scala
index 8d4c76d..5c9d85a 100644
--- a/integration/spark/src/main/scala/org/carbondata/spark/thriftserver/CarbonThriftServer.scala
+++ b/integration/spark/src/main/scala/org/carbondata/spark/thriftserver/CarbonThriftServer.scala
@@ -35,6 +35,9 @@ object CarbonThriftServer {
       System.setProperty("carbon.properties.filepath",
         sparkHome + '/' + "conf" + '/' + "carbon.properties")
     }
+    if (org.apache.spark.SPARK_VERSION.startsWith("1.6")) {
+      conf.set("spark.sql.hive.thriftServer.singleSession", "true")
+    }
     val sc = new SparkContext(conf)
     val warmUpTime = CarbonProperties.getInstance().getProperty("carbon.spark.warmUpTime",
"5000")
     try {

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/9244dcf5/integration/spark/src/test/scala/org/carbondata/spark/testsuite/aggquery/AllDataTypesTestCaseAggregate.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/aggquery/AllDataTypesTestCaseAggregate.scala
b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/aggquery/AllDataTypesTestCaseAggregate.scala
index 2569968..fd8cab1 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/aggquery/AllDataTypesTestCaseAggregate.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/aggquery/AllDataTypesTestCaseAggregate.scala
@@ -98,6 +98,13 @@ class AllDataTypesTestCaseAggregate extends QueryTest with BeforeAndAfterAll
{
         "(designation) order by empname"))
   }
 
+  test("select count(empno), count(distinct(empno)) from alldatatypestableAGG")
+  {
+    checkAnswer(
+      sql("select count(empno), count(distinct(empno)) from alldatatypestableAGG"),
+      sql("select count(empno), count(distinct(empno)) from alldatatypescubeAGG_hive"))
+  }
+
   override def afterAll {
     sql("drop table alldatatypestableAGG")
   }


Mime
View raw message