spark-reviews mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mengxr <...@git.apache.org>
Subject [GitHub] spark pull request: [WIP] [SPARK-4587] [mllib] ML model import/exp...
Date Sat, 31 Jan 2015 09:17:06 GMT
Github user mengxr commented on a diff in the pull request:

    https://github.com/apache/spark/pull/4233#discussion_r23887816
  
    --- Diff: mllib/src/main/scala/org/apache/spark/mllib/classification/LogisticRegression.scala
---
    @@ -68,6 +79,65 @@ class LogisticRegressionModel (
           case None => score
         }
       }
    +
    +  override def save(sc: SparkContext, path: String): Unit = {
    +    val sqlContext = new SQLContext(sc)
    +    import sqlContext._
    +
    +    // Create JSON metadata.
    +    val metadata = LogisticRegressionModel.Metadata(
    +      clazz = this.getClass.getName, version = Exportable.latestVersion)
    +    val metadataRDD: DataFrame = sc.parallelize(Seq(metadata))
    +    metadataRDD.toJSON.saveAsTextFile(path + "/metadata")
    +    // Create Parquet data.
    +    val data = LogisticRegressionModel.Data(weights, intercept, threshold)
    +    val dataRDD: DataFrame = sc.parallelize(Seq(data))
    +    dataRDD.saveAsParquetFile(path + "/data")
    +  }
    +}
    +
    +object LogisticRegressionModel extends Importable[LogisticRegressionModel] {
    +
    +  private case class Metadata(clazz: String, version: String)
    +
    +  private case class Data(weights: Vector, intercept: Double, threshold: Option[Double])
    +
    +  override def load(sc: SparkContext, path: String): LogisticRegressionModel = {
    +    val sqlContext = new SQLContext(sc)
    +    import sqlContext._
    +
    +    // Load JSON metadata.
    +    val metadataRDD = sqlContext.jsonFile(path + "/metadata")
    +    val metadataArray = metadataRDD.select("clazz", "version").take(1)
    +    assert(metadataArray.size == 1,
    +      s"Unable to load LogisticRegressionModel metadata from: ${path + "/metadata"}")
    +    metadataArray(0) match {
    --- End diff --
    
    The loading part could be more generic. For each model and each version, we need to maintain
a loader that reads saved models to the current version or claim incompatible. For example,

    
    ~~~
    object LogisticRegressionModel extends Importable[LogisticRegressionModel] {
      def load(sc path): LogisticRegressionModel = {
        val metadata = ...
        val importer = Importers.get(version)
        importer.load(sc, path)
      }
      object Importers {
         def get(version: String): Importer[LogisticRegressionModel] = {
            version match { 
              case "1.0" => new V1()
              case "2.0" => ...
              case _ => throw new Exception(...)
            }
         }
        class V1 extends Importer[LogisticRegressionModel] {
          ...
        }
      }
    ~~~


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastructure@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org


Mime
View raw message