spark-reviews mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jkbradley <...@git.apache.org>
Subject [GitHub] spark pull request: [SPARK-13038] [PySpark] Add load/save to pipel...
Date Tue, 15 Mar 2016 00:19:08 GMT
Github user jkbradley commented on a diff in the pull request:

    https://github.com/apache/spark/pull/11683#discussion_r56098424
  
    --- Diff: python/pyspark/ml/base.py ---
    @@ -0,0 +1,119 @@
    +#
    +# Licensed to the Apache Software Foundation (ASF) under one or more
    +# contributor license agreements.  See the NOTICE file distributed with
    +# this work for additional information regarding copyright ownership.
    +# The ASF licenses this file to You under the Apache License, Version 2.0
    +# (the "License"); you may not use this file except in compliance with
    +# the License.  You may obtain a copy of the License at
    +#
    +#    http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing, software
    +# distributed under the License is distributed on an "AS IS" BASIS,
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +# See the License for the specific language governing permissions and
    +# limitations under the License.
    +#
    +
    +from abc import ABCMeta, abstractmethod
    +
    +from pyspark import since
    +from pyspark.ml.param import Params
    +from pyspark.mllib.common import inherit_doc
    +
    +
    +@inherit_doc
    +class Estimator(Params):
    +    """
    +    Abstract class for estimators that fit models to data.
    +    .. versionadded:: 1.3.0
    +    """
    +
    +    __metaclass__ = ABCMeta
    +
    +    @abstractmethod
    +    def _fit(self, dataset):
    +        """
    +        Fits a model to the input dataset. This is called by the
    +        default implementation of fit.
    +        :param dataset: input dataset, which is an instance of
    +                        :py:class:`pyspark.sql.DataFrame`
    +        :returns: fitted model
    +        """
    +        raise NotImplementedError()
    +
    +    @since("1.3.0")
    +    def fit(self, dataset, params=None):
    +        """
    +        Fits a model to the input dataset with optional parameters.
    +        :param dataset: input dataset, which is an instance of
    +                        :py:class:`pyspark.sql.DataFrame`
    +        :param params: an optional param map that overrides embedded
    +                       params. If a list/tuple of param maps is given,
    +                       this calls fit on each param map and returns a
    +                       list of models.
    +        :returns: fitted model(s)
    +        """
    +        if params is None:
    +            params = dict()
    +        if isinstance(params, (list, tuple)):
    +            return [self.fit(dataset, paramMap) for paramMap in params]
    +        elif isinstance(params, dict):
    +            if params:
    +                return self.copy(params)._fit(dataset)
    +            else:
    +                return self._fit(dataset)
    +        else:
    +            raise ValueError("Params must be either a param map or a list/tuple of param
maps, "
    +                             "but got %s." % type(params))
    +
    +
    +@inherit_doc
    +class Transformer(Params):
    +    """
    +    Abstract class for transformers that transform one dataset into
    +    another.
    +    .. versionadded:: 1.3.0
    +    """
    +
    +    __metaclass__ = ABCMeta
    +
    +    @abstractmethod
    +    def _transform(self, dataset):
    +        """
    +        Transforms the input dataset.
    +        :param dataset: input dataset, which is an instance of
    +                        :py:class:`pyspark.sql.DataFrame`
    +        :returns: transformed dataset
    +        """
    +        raise NotImplementedError()
    +
    +    @since("1.3.0")
    +    def transform(self, dataset, params=None):
    +        """
    +        Transforms the input dataset with optional parameters.
    +        :param dataset: input dataset, which is an instance of
    +                        :py:class:`pyspark.sql.DataFrame`
    +        :param params: an optional param map that overrides embedded
    +                       params.
    +        :returns: transformed dataset
    +        """
    +        if params is None:
    +            params = dict()
    +        if isinstance(params, dict):
    +            if params:
    +                return self.copy(params,)._transform(dataset)
    --- End diff --
    
    remove extra comma (I'm not sure why that was in the original code.)


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastructure@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org


Mime
View raw message