carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jack...@apache.org
Subject [1/5] incubator-carbondata git commit: WIP Added code for new V3 format to optimize scan
Date Fri, 24 Feb 2017 05:38:42 GMT
Repository: incubator-carbondata
Updated Branches:
  refs/heads/master 766671c79 -> 3e36cdf54


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDictionaryDecoder.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDictionaryDecoder.scala
b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDictionaryDecoder.scala
index 0c80a36..32279ed 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDictionaryDecoder.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDictionaryDecoder.scala
@@ -181,19 +181,16 @@ case class CarbonDictionaryDecoder(
             override final def hasNext: Boolean = iter.hasNext
 
             override final def next(): InternalRow = {
-              val startTime = System.currentTimeMillis()
               val row: InternalRow = iter.next()
               val data = row.toSeq(dataTypes).toArray
               dictIndex.foreach { index =>
                 if (data(index) != null) {
                   data(index) = DataTypeUtil.getDataBasedOnDataType(dicts(index)
-                    .getDictionaryValueForKey(data(index).asInstanceOf[Int]),
+                    .getDictionaryValueForKeyInBytes(data(index).asInstanceOf[Int]),
                     getDictionaryColumnIds(index)._3)
                 }
               }
-              val result = unsafeProjection(new GenericInternalRow(data))
-              total += System.currentTimeMillis() - startTime
-              result
+              unsafeProjection(new GenericInternalRow(data))
             }
           }
         }
@@ -342,7 +339,7 @@ class CarbonDecoderRDD(
         dictIndex.foreach { index =>
           if (data(index) != null) {
             data(index) = DataTypeUtil.getDataBasedOnDataType(dicts(index)
-              .getDictionaryValueForKey(data(index).asInstanceOf[Int]),
+                .getDictionaryValueForKeyInBytes(data(index).asInstanceOf[Int]),
               getDictionaryColumnIds(index)._3)
           }
         }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
index 5a50614..afbbae1 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
@@ -195,6 +195,7 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with PredicateHelper
{
           agg.aggregateExpressions.map {
             case attr: AttributeReference =>
             case a@Alias(attr: AttributeReference, name) =>
+            case Alias(AggregateExpression(Count(Seq(attr: AttributeReference)), _, _, _),
_) =>
             case aggExp: AggregateExpression =>
               aggExp.transform {
                 case aggExp: AggregateExpression =>


Mime
View raw message