flink-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From thvasilo <...@git.apache.org>
Subject [GitHub] flink pull request: [Flink-1727]Decision tree
Date Mon, 22 Jun 2015 14:51:09 GMT
Github user thvasilo commented on a diff in the pull request:

    https://github.com/apache/flink/pull/710#discussion_r32942116
  
    --- Diff: flink-staging/flink-ml/src/main/scala/org/apache/flink/ml/classification/DecisionTree.scala
---
    @@ -0,0 +1,490 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.flink.ml.classification
    +
    +import java.lang.Double.MAX_VALUE
    +import java.util
    +
    +import org.apache.flink.api.common.functions.{RichFlatMapFunction, RichMapFunction}
    +import org.apache.flink.api.scala._
    +import org.apache.flink.configuration.Configuration
    +import org.apache.flink.ml.common.FlinkMLTools.ModuloKeyPartitioner
    +import org.apache.flink.ml.common._
    +import org.apache.flink.ml.math.{CategoricalHistogram, ContinuousHistogram, OnlineHistogram,
Vector}
    +import org.apache.flink.ml.pipeline.{FitOperation, PredictOperation, Predictor}
    +import org.apache.flink.ml.tree._
    +import org.apache.flink.util.Collector
    +
    +import scala.collection.mutable
    +
    +class DecisionTree extends Predictor[DecisionTree] {
    +
    +  import DecisionTree._
    +
    +  // store the decision tree learned after the fit operation
    +  var treeOption: Option[DataSet[Tree]] = None
    +
    +  /** Sets the maximum allowed depth of the tree.
    +    * Currently only allowed values up to 30
    +    *
    +    * *@param depth
    +    * @return itself
    +    */
    +  def setDepth(depth: Int): DecisionTree = {
    +    require(depth <= 30, "Maximum depth allowed: 30")
    +    parameters.add(Depth, depth)
    +    this
    +  }
    +
    +  /** Sets minimum number of instances that must be present at a node for its parent
to split
    +    *
    +    * *@param minInstancesPerNode
    +    * @return itself
    +    */
    +  def setMinInstancePerNode(minInstancesPerNode: Int): DecisionTree = {
    +    require(minInstancesPerNode >= 1,
    +      "Every node must have at least one instance associated with it")
    +    parameters.add(MinInstancesPerNode, minInstancesPerNode)
    +    this
    +  }
    +
    +  /** Sets whether or not to prune the tree after building
    +    *
    +    * *@param prune
    +    * @return itself
    +    */
    +  def setPruning(prune: Boolean): DecisionTree = {
    +    parameters.add(Pruning, prune)
    +    this
    +  }
    +
    +  /** Sets maximum number of bins to be used for calculating splits.
    +    *
    +    * *@param maxBins
    +    * @return itself
    +    */
    +  def setMaxBins(maxBins: Int): DecisionTree = {
    +    require(maxBins >= 1, "Maximum bins used must be at least one")
    +    parameters.add(MaxBins, maxBins)
    +    this
    +  }
    +
    +  /** Sets the splitting strategy. Gini and Entropy supported.
    +    *
    +    * *@param splitStrategy
    +    * @return itself
    +    */
    +  def setSplitStrategy(splitStrategy: String): DecisionTree = {
    +    require(splitStrategy == "Gini" || splitStrategy == "Entropy",
    +      "Algorithm " + splitStrategy + " not supported")
    +    parameters.add(SplitStrategy, splitStrategy)
    +    this
    +  }
    +
    +  /** Sets the dimension of data. Will be cross checked with the data later
    +    *
    +    * *@param dimension
    +    * @return itself
    +    */
    +  def setDimension(dimension: Int): DecisionTree = {
    +    require(dimension >= 1, "Dimension cannot be less than one")
    +    parameters.add(Dimension, dimension)
    +    this
    +  }
    +
    +  /** Sets which fields are to be considered categorical. Array of field indices
    +    *
    +    * *@param category
    +    * @return itself
    +    */
    +  def setCategory(category: Array[Int]): DecisionTree = {
    +    parameters.add(Category, category)
    +    this
    +  }
    +
    +  /** Sets how many classes there are in the data [will be cross checked with the data
later]
    +    *
    +    * *@param numClasses
    +    * @return itself
    +    */
    +  def setClasses(numClasses: Int): DecisionTree = {
    +    require(numClasses > 1, "There must be at least two classes in the data")
    +    parameters.add(Classes, numClasses)
    +    this
    +  }
    +
    +  def testAccuracy(input: DataSet[LabeledVector], tree: DataSet[Tree]): Double = {
    +
    +    val accuracyMapper = new RichMapFunction[LabeledVector, (Int, Int)] {
    +      var tree: Tree = _
    +
    +      override def open(parameters: Configuration): Unit = {
    +        tree = getRuntimeContext.getBroadcastVariable(DECISION_TREE).get(0)
    +      }
    +
    +      override def map(labeledVector: LabeledVector): (Int, Int) = {
    +        val label = tree.filter(labeledVector.vector)._2 - tree.config.labelAddition
    +        if (label == labeledVector.label) (1, 1)
    +        else (0, 1)
    +      }
    +    }
    +    val result = input.map(accuracyMapper).withBroadcastSet(tree, DECISION_TREE).
    +      reduce((a, b) => (a._1 + b._1, a._2 + b._2)).
    +      collect().toArray.apply(0)
    +    100 * (result._1 + 0.0) / result._2
    +  }
    +}
    +
    +/** Companion object of Decision Tree.
    +  * Contains convenience functions and the parameter type definitions of the algorithm.
    +  *
    +  */
    +object DecisionTree {
    +  val DECISION_TREE = "decision_tree"
    +  val DECISION_TREE_CONFIG = "decision_tree_configuration"
    +  val DECISION_TREE_HISTS = "decision_tree_histograms"
    +
    +  def apply(): DecisionTree = {
    +    new DecisionTree()
    +  }
    +
    +  case object Depth extends Parameter[Int] {
    +    val defaultValue: Option[Int] = Some(30)
    +  }
    +
    +  case object SplitStrategy extends Parameter[String] {
    +    val defaultValue: Option[String] = Some("Gini")
    +  }
    +
    +  case object MinInstancesPerNode extends Parameter[Int] {
    +    val defaultValue: Option[Int] = Some(1)
    +  }
    +
    +  case object Pruning extends Parameter[Boolean] {
    +    val defaultValue: Option[Boolean] = Some(false)
    +  }
    +
    +  case object MaxBins extends Parameter[Int] {
    +    val defaultValue: Option[Int] = Some(100)
    +  }
    +
    +  case object Dimension extends Parameter[Int] {
    +    val defaultValue: Option[Int] = Some(2)
    +  }
    +
    +  case object Category extends Parameter[Array[Int]] {
    +    val defaultValue: Option[Array[Int]] = Some(Array.ofDim(0))
    +  }
    +
    +  case object Classes extends Parameter[Int] {
    +    val defaultValue: Option[Int] = Some(2)
    +  }
    +
    +
    +  // ========================================== Operations =========================================
    +
    +  /** [[org.apache.flink.ml.pipeline.PredictOperation]] for vector types. The result
type is a
    +    * [[LabeledVector]]
    +    *
    +    * @tparam T Subtype of [[Vector]]
    +    * @return
    +    */
    +  implicit def predictValues[T <: Vector] = {
    +    new PredictOperation[DecisionTree, T, LabeledVector] {
    +      override def predict(
    +                            instance: DecisionTree,
    +                            predictParameters: ParameterMap,
    +                            input: DataSet[T])
    +      : DataSet[LabeledVector] = {
    +
    +        instance.treeOption match {
    +          case Some(tree) => {
    +            input.map(new PredictionMapper[T]).withBroadcastSet(tree, DECISION_TREE)
    +          }
    +          case None => {
    +            throw new RuntimeException("The Decision Tree model has not been trained.
Call first" +
    +              " fit before calling the predict operation.")
    +          }
    +        }
    +      }
    +    }
    +  }
    +
    +  /** Mapper to calculate the value of the prediction function. This is a RichMapFunction,
because
    +    * we broadcast the tree to all mappers.
    +    */
    +  class PredictionMapper[T <: Vector] extends RichMapFunction[T, LabeledVector] {
    --- End diff --
    
    As with above, better to switch to mapWithBcVariable


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastructure@apache.org or file a JIRA ticket
with INFRA.
---

Mime
View raw message