carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ravipes...@apache.org
Subject [14/50] [abbrv] carbondata git commit: [CARBONDATA-1592] Added analysis exception to handle event exceptions
Date Sun, 28 Jan 2018 06:45:43 GMT
[CARBONDATA-1592] Added analysis exception to handle event exceptions

This closes #1788


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/f557545d
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/f557545d
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/f557545d

Branch: refs/heads/fgdatamap
Commit: f557545d19565ffa0116e9689253f1aa5de50381
Parents: 1012c62
Author: Manohar <manohar.crazy09@gmail.com>
Authored: Wed Jan 10 20:07:33 2018 +0530
Committer: ravipesala <ravi.pesala@gmail.com>
Committed: Thu Jan 11 16:06:47 2018 +0530

----------------------------------------------------------------------
 .../org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala  | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/f557545d/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 7982071..b03b6fa 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -37,7 +37,7 @@ import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat, FileSplit}
 import org.apache.spark.{SparkEnv, SparkException, TaskContext}
 import org.apache.spark.deploy.SparkHadoopUtil
 import org.apache.spark.rdd.{DataLoadCoalescedRDD, DataLoadPartitionCoalescer, NewHadoopRDD,
RDD}
-import org.apache.spark.sql.{CarbonEnv, DataFrame, Row, SQLContext}
+import org.apache.spark.sql.{AnalysisException, CarbonEnv, DataFrame, Row, SQLContext}
 import org.apache.spark.sql.execution.command.{CompactionModel, ExecutionErrors, UpdateTableModel}
 import org.apache.spark.sql.hive.DistributionUtil
 import org.apache.spark.sql.optimizer.CarbonFilters
@@ -417,6 +417,9 @@ object CarbonDataRDDFactory {
                 .trimErrorMessage(sparkException.getCause.getMessage)
               errorMessage = errorMessage + " : " + executorMessage
             }
+          case aex: AnalysisException =>
+            LOGGER.error(aex.getMessage())
+            throw aex
           case _ =>
             if (ex.getCause != null) {
               executorMessage = ex.getCause.getMessage


Mime
View raw message