carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chenliang...@apache.org
Subject [1/2] incubator-carbondata git commit: To unify definition for SparkContext, CarbonContext, HiveContext, SQLContext
Date Sun, 03 Jul 2016 03:51:13 GMT
Repository: incubator-carbondata
Updated Branches:
  refs/heads/master 2cdeb89e2 -> d16347e89


To unify definition for SparkContext,CarbonContext,HiveContext,SQLContext

Unify definition for SparkContext,CarbonContext,HiveContext,SQLContext

To unify definition for SparkContext,CarbonContext,HiveContext,SQLContext


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/b0bc10b6
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/b0bc10b6
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/b0bc10b6

Branch: refs/heads/master
Commit: b0bc10b65f03fdc16ee241bde7c35627b31be80d
Parents: 2cdeb89
Author: chenliang613 <chenliang613@apache.org>
Authored: Sat Jul 2 15:35:51 2016 +0530
Committer: chenliang613 <chenliang613@apache.org>
Committed: Sun Jul 3 09:18:42 2016 +0530

----------------------------------------------------------------------
 .../examples/GenerateDictionaryExample.scala    |  8 ++++----
 .../scala/org/apache/spark/sql/CarbonEnv.scala  |  2 +-
 .../spark/sql/hive/CarbonMetastoreCatalog.scala | 11 ++++++-----
 .../spark/sql/hive/CarbonSQLDialect.scala       |  2 +-
 .../carbondata/spark/csv/CarbonTextFile.scala   | 14 +++++++-------
 .../spark/rdd/CarbonDataFrameRDD.scala          |  4 ++--
 .../spark/rdd/CarbonDataRDDFactory.scala        | 20 ++++++++++----------
 .../org/carbondata/spark/rdd/Compactor.scala    |  4 ++--
 .../spark/thriftserver/CarbonThriftServer.scala |  4 ++--
 ...estampDataTypeDirectDictionaryTestCase.scala |  2 +-
 ...TypeDirectDictionaryWithNoDictTestCase.scala |  2 +-
 .../TimestampDataTypeNullDataTest.scala         |  2 +-
 12 files changed, 38 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/b0bc10b6/examples/src/main/scala/org/carbondata/examples/GenerateDictionaryExample.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/carbondata/examples/GenerateDictionaryExample.scala
b/examples/src/main/scala/org/carbondata/examples/GenerateDictionaryExample.scala
index 9356d47..5269d05 100644
--- a/examples/src/main/scala/org/carbondata/examples/GenerateDictionaryExample.scala
+++ b/examples/src/main/scala/org/carbondata/examples/GenerateDictionaryExample.scala
@@ -57,13 +57,13 @@ object GenerateDictionaryExample {
     printDictionary(cc, tableIdentifier, dictFolderPath)
   }
 
-  def printDictionary(carbonContext: CarbonContext, carbonTableIdentifier: CarbonTableIdentifier,
+  def printDictionary(cc: CarbonContext, carbonTableIdentifier: CarbonTableIdentifier,
                       dictFolderPath: String) {
     val dataBaseName = carbonTableIdentifier.getDatabaseName
     val tableName = carbonTableIdentifier.getTableName
-    val carbonRelation = CarbonEnv.getInstance(carbonContext).carbonCatalog.
+    val carbonRelation = CarbonEnv.getInstance(cc).carbonCatalog.
       lookupRelation1(Option(dataBaseName),
-        tableName) (carbonContext).asInstanceOf[CarbonRelation]
+        tableName) (cc).asInstanceOf[CarbonRelation]
     val carbonTable = carbonRelation.cubeMeta.carbonTable
     val dimensions = carbonTable.getDimensionByTableName(tableName.toLowerCase())
       .toArray.map(_.asInstanceOf[CarbonDimension])
@@ -77,7 +77,7 @@ object GenerateDictionaryExample {
       println(s"Key\t\t\tValue")
       val columnIdentifier = new DictionaryColumnUniqueIdentifier(carbonTableIdentifier,
         dimension.getColumnIdentifier, dimension.getDataType)
-      val dict = CarbonLoaderUtil.getDictionary(columnIdentifier, carbonContext.storePath)
+      val dict = CarbonLoaderUtil.getDictionary(columnIdentifier, cc.storePath)
       var index: Int = 1
       var distinctValue = dict.getDictionaryValueForKey(index)
       while (distinctValue != null) {

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/b0bc10b6/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
index 11ae76f..9f6b0b4 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
@@ -22,7 +22,7 @@ import org.apache.spark.sql.hive.{CarbonMetastoreCatalog, HiveContext}
 /**
  * Carbon Environment for unified context
  */
-case class CarbonEnv(carbonContext: HiveContext, carbonCatalog: CarbonMetastoreCatalog)
+case class CarbonEnv(hiveContext: HiveContext, carbonCatalog: CarbonMetastoreCatalog)
 
 object CarbonEnv {
   val className = classOf[CarbonEnv].getCanonicalName

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/b0bc10b6/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
index aeae761..d84f554 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
@@ -101,8 +101,9 @@ case class DictionaryMap(dictionaryMap: Map[String, Boolean]) {
   }
 }
 
-class CarbonMetastoreCatalog(hive: HiveContext, val storePath: String, client: ClientInterface)
-  extends HiveMetastoreCatalog(client, hive)
+class CarbonMetastoreCatalog(hiveContext: HiveContext, val storePath: String,
+    client: ClientInterface)
+  extends HiveMetastoreCatalog(client, hiveContext)
     with spark.Logging {
 
   @transient val LOGGER = LogServiceFactory
@@ -159,7 +160,7 @@ class CarbonMetastoreCatalog(hive: HiveContext, val storePath: String,
client: C
     if (CarbonProperties.getInstance()
       .getProperty(CarbonCommonConstants.LOCK_TYPE, CarbonCommonConstants.LOCK_TYPE_DEFAULT)
       .equalsIgnoreCase(CarbonCommonConstants.CARBON_LOCK_TYPE_ZOOKEEPER)) {
-      val zookeeperUrl = hive.getConf("spark.deploy.zookeeper.url", "127.0.0.1:2181")
+      val zookeeperUrl = hiveContext.getConf("spark.deploy.zookeeper.url", "127.0.0.1:2181")
       ZookeeperInit.getInstance(zookeeperUrl)
     }
 
@@ -344,7 +345,7 @@ class CarbonMetastoreCatalog(hive: HiveContext, val storePath: String,
client: C
   def getNodeList: Array[String] = {
 
     val arr =
-      hive.sparkContext.getExecutorMemoryStatus.map {
+      hiveContext.sparkContext.getExecutorMemoryStatus.map {
         kv =>
           kv._1.split(":")(0)
       }.toSeq
@@ -352,7 +353,7 @@ class CarbonMetastoreCatalog(hive: HiveContext, val storePath: String,
client: C
     val selectedLocalIPList = localhostIPs.filter(arr.contains(_))
 
     val nodelist: List[String] = withoutDriverIP(arr.toList)(selectedLocalIPList.contains(_))
-    val masterMode = hive.sparkContext.getConf.get("spark.master")
+    val masterMode = hiveContext.sparkContext.getConf.get("spark.master")
     if (nodelist.nonEmpty) {
       // Specific for Yarn Mode
       if ("yarn-cluster".equals(masterMode) || "yarn-client".equals(masterMode)) {

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/b0bc10b6/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSQLDialect.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSQLDialect.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSQLDialect.scala
index d80703e..a33b63e 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSQLDialect.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSQLDialect.scala
@@ -23,7 +23,7 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 
 import org.carbondata.spark.exception.MalformedCarbonCommandException
 
-private[spark] class CarbonSQLDialect(context: HiveContext) extends ParserDialect {
+private[spark] class CarbonSQLDialect(hiveContext: HiveContext) extends ParserDialect {
 
   @transient
   protected val sqlParser = new CarbonSqlParser

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/b0bc10b6/integration/spark/src/main/scala/org/carbondata/spark/csv/CarbonTextFile.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/carbondata/spark/csv/CarbonTextFile.scala
b/integration/spark/src/main/scala/org/carbondata/spark/csv/CarbonTextFile.scala
index 00895c0..d6cdaaa 100644
--- a/integration/spark/src/main/scala/org/carbondata/spark/csv/CarbonTextFile.scala
+++ b/integration/spark/src/main/scala/org/carbondata/spark/csv/CarbonTextFile.scala
@@ -37,26 +37,26 @@ import org.carbondata.spark.rdd.CarbonDataRDDFactory
  */
 private[csv] object CarbonTextFile {
 
-  private def newHadoopRDD(context: SparkContext, location: String) = {
-    val hadoopConfiguration = new Configuration(context.hadoopConfiguration)
+  private def newHadoopRDD(sc: SparkContext, location: String) = {
+    val hadoopConfiguration = new Configuration(sc.hadoopConfiguration)
     hadoopConfiguration.setStrings(FileInputFormat.INPUT_DIR, location)
     hadoopConfiguration.setBoolean(FileInputFormat.INPUT_DIR_RECURSIVE, true)
-    CarbonDataRDDFactory.configSplitMaxSize(context, location, hadoopConfiguration)
+    CarbonDataRDDFactory.configSplitMaxSize(sc, location, hadoopConfiguration)
     new NewHadoopRDD[LongWritable, Text](
-      context,
+      sc,
       classOf[TextInputFormat],
       classOf[LongWritable],
       classOf[Text],
       hadoopConfiguration).setName("newHadoopRDD-spark-csv")
   }
 
-  def withCharset(context: SparkContext, location: String, charset: String): RDD[String]
= {
+  def withCharset(sc: SparkContext, location: String, charset: String): RDD[String] = {
     if (Charset.forName(charset) == TextFile.DEFAULT_CHARSET) {
-      newHadoopRDD(context, location).map(pair => pair._2.toString)
+      newHadoopRDD(sc, location).map(pair => pair._2.toString)
     } else {
       // can't pass a Charset object here cause its not serializable
       // TODO: maybe use mapPartitions instead?
-      newHadoopRDD(context, location).map(
+      newHadoopRDD(sc, location).map(
         pair => new String(pair._2.getBytes, 0, pair._2.getLength, charset))
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/b0bc10b6/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataFrameRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataFrameRDD.scala
b/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataFrameRDD.scala
index b3e09b0..5a0436e 100644
--- a/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataFrameRDD.scala
+++ b/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataFrameRDD.scala
@@ -20,8 +20,8 @@ package org.carbondata.spark.rdd
 import org.apache.spark.sql.{CarbonContext, DataFrame, Row}
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 
-class CarbonDataFrameRDD(val sc: CarbonContext, logicalPlan: LogicalPlan)
-  extends DataFrame(sc, logicalPlan) {
+class CarbonDataFrameRDD(val cc: CarbonContext, logicalPlan: LogicalPlan)
+  extends DataFrame(cc, logicalPlan) {
 
   override def collect(): Array[Row] = {
 

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/b0bc10b6/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataRDDFactory.scala
b/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index c297c02..1b8bb38 100644
--- a/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -93,7 +93,7 @@ object CarbonDataRDDFactory extends Logging {
   }
 
   def mergeCarbonData(
-      sc: SQLContext,
+      sqlContext: SQLContext,
       carbonLoadModel: CarbonLoadModel,
       storeLocation: String,
       hdfsStoreLocation: String,
@@ -442,7 +442,7 @@ object CarbonDataRDDFactory extends Logging {
     }
   }
 
-  def loadCarbonData(sc: SQLContext,
+  def loadCarbonData(sqlContext: SQLContext,
       carbonLoadModel: CarbonLoadModel,
       storeLocation: String,
       hdfsStoreLocation: String,
@@ -485,7 +485,7 @@ object CarbonDataRDDFactory extends Logging {
 
         if (lock.lockWithRetries()) {
           logger.info("Acquired the compaction lock.")
-          startCompactionThreads(sc,
+          startCompactionThreads(sqlContext,
             carbonLoadModel,
             partitioner,
             hdfsStoreLocation,
@@ -566,9 +566,9 @@ object CarbonDataRDDFactory extends Logging {
       // reading the start time of data load.
       val loadStartTime = CarbonLoaderUtil.readCurrentTime()
       carbonLoadModel.setFactTimeStamp(loadStartTime)
-      val cubeCreationTime = CarbonEnv.getInstance(sc).carbonCatalog
+      val cubeCreationTime = CarbonEnv.getInstance(sqlContext).carbonCatalog
         .getCubeCreationTime(carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName)
-      val schemaLastUpdatedTime = CarbonEnv.getInstance(sc).carbonCatalog
+      val schemaLastUpdatedTime = CarbonEnv.getInstance(sqlContext).carbonCatalog
         .getSchemaLastUpdatedTime(carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName)
 
       // compaction handling
@@ -605,7 +605,7 @@ object CarbonDataRDDFactory extends Logging {
                   pathBuilder.substring(0, pathBuilder.size - 1)
                 }
                 (split.getPartition.getUniqueID, SplitUtils.getSplits(pathBuilder.toString(),
-                  sc.sparkContext
+                  sqlContext.sparkContext
                 ))
             }
           } else {
@@ -624,7 +624,7 @@ object CarbonDataRDDFactory extends Logging {
                 }
                 pathBuilder.append(split.getPartition.getUniqueID).append("/")
                 (split.getPartition.getUniqueID,
-                  SplitUtils.getSplits(pathBuilder.toString, sc.sparkContext))
+                  SplitUtils.getSplits(pathBuilder.toString, sqlContext.sparkContext))
             }
           }
 
@@ -637,13 +637,13 @@ object CarbonDataRDDFactory extends Logging {
            * 4)DummyLoadRDD output (host,Array[BlockDetails])as the parameter to CarbonDataLoadRDD
            *   which parititon by host
            */
-          val hadoopConfiguration = new Configuration(sc.sparkContext.hadoopConfiguration)
+          val hadoopConfiguration = new Configuration(sqlContext.sparkContext.hadoopConfiguration)
           // FileUtils will skip file which is no csv, and return all file path which split
by ','
           val filePaths = carbonLoadModel.getFactFilePath
           hadoopConfiguration.set("mapreduce.input.fileinputformat.inputdir", filePaths)
           hadoopConfiguration.set("mapreduce.input.fileinputformat.input.dir.recursive",
"true")
 
-          configSplitMaxSize(sc.sparkContext, filePaths, hadoopConfiguration)
+          configSplitMaxSize(sqlContext.sparkContext, filePaths, hadoopConfiguration)
 
           val inputFormat = new org.apache.hadoop.mapreduce.lib.input.TextInputFormat
           inputFormat match {
@@ -700,7 +700,7 @@ object CarbonDataRDDFactory extends Logging {
         carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName,
         partitioner.partitionCount, currentLoadCount.toString)
       val status = new
-          CarbonDataLoadRDD(sc.sparkContext,
+          CarbonDataLoadRDD(sqlContext.sparkContext,
             new DataLoadResultImpl(),
             carbonLoadModel,
             storeLocation,

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/b0bc10b6/integration/spark/src/main/scala/org/carbondata/spark/rdd/Compactor.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/carbondata/spark/rdd/Compactor.scala b/integration/spark/src/main/scala/org/carbondata/spark/rdd/Compactor.scala
index 0c9d37b..d63e730 100644
--- a/integration/spark/src/main/scala/org/carbondata/spark/rdd/Compactor.scala
+++ b/integration/spark/src/main/scala/org/carbondata/spark/rdd/Compactor.scala
@@ -48,7 +48,7 @@ object Compactor {
     kettleHomePath: String,
     cubeCreationTime: Long,
     loadsToMerge: java.util.List[LoadMetadataDetails],
-    sc: SQLContext): Unit = {
+    sqlContext: SQLContext): Unit = {
 
     val startTime = System.nanoTime();
     val mergedLoadName = CarbonDataMergerUtil.getMergedLoadName(loadsToMerge)
@@ -85,7 +85,7 @@ object Compactor {
     )
 
     val mergeStatus = new CarbonMergerRDD(
-      sc.sparkContext,
+      sqlContext.sparkContext,
       new MergeResultImpl(),
       carbonLoadModel,
       carbonMergerMapping

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/b0bc10b6/integration/spark/src/main/scala/org/carbondata/spark/thriftserver/CarbonThriftServer.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/carbondata/spark/thriftserver/CarbonThriftServer.scala
b/integration/spark/src/main/scala/org/carbondata/spark/thriftserver/CarbonThriftServer.scala
index f2c11b2..8d4c76d 100644
--- a/integration/spark/src/main/scala/org/carbondata/spark/thriftserver/CarbonThriftServer.scala
+++ b/integration/spark/src/main/scala/org/carbondata/spark/thriftserver/CarbonThriftServer.scala
@@ -47,9 +47,9 @@ object CarbonThriftServer {
         Thread.sleep(30000)
     }
 
-    val carbonContext = new CarbonContext(sc, args.head)
+    val cc = new CarbonContext(sc, args.head)
 
-    HiveThriftServer2.startWithContext(carbonContext)
+    HiveThriftServer2.startWithContext(cc)
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/b0bc10b6/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryTestCase.scala
b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryTestCase.scala
index 606e04b..b8e2988 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryTestCase.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryTestCase.scala
@@ -40,7 +40,7 @@ import org.scalatest.BeforeAndAfterAll
   *
   */
 class TimestampDataTypeDirectDictionaryTest extends QueryTest with BeforeAndAfterAll {
-  var oc: HiveContext = _
+  var hiveContext: HiveContext = _
 
   override def beforeAll {
     try {

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/b0bc10b6/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryWithNoDictTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryWithNoDictTestCase.scala
b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryWithNoDictTestCase.scala
index dafe8ee..5a60460 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryWithNoDictTestCase.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryWithNoDictTestCase.scala
@@ -39,7 +39,7 @@ import org.carbondata.core.util.CarbonProperties
   *
   */
 class TimestampDataTypeDirectDictionaryWithNoDictTestCase extends QueryTest with BeforeAndAfterAll
{
-  var oc: HiveContext = _
+  var hiveContext: HiveContext = _
 
   override def beforeAll {
     try {

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/b0bc10b6/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeNullDataTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeNullDataTest.scala
b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeNullDataTest.scala
index c8c1f81..be50dc4 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeNullDataTest.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeNullDataTest.scala
@@ -38,7 +38,7 @@ import org.scalatest.BeforeAndAfterAll
   *
   */
 class TimestampDataTypeNullDataTest extends QueryTest with BeforeAndAfterAll {
-  var oc: HiveContext = _
+  var hiveContext: HiveContext = _
 
   override def beforeAll {
     try {


Mime
View raw message