spark-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From m...@apache.org
Subject spark git commit: [SPARK-11975][ML] Remove duplicate mllib example (DT/RF/GBT in Java/Python)
Date Mon, 30 Nov 2015 23:01:19 GMT
Repository: spark
Updated Branches:
  refs/heads/branch-1.6 1562ef10f -> a387cef3a


[SPARK-11975][ML] Remove duplicate mllib example (DT/RF/GBT in Java/Python)

Remove duplicate mllib example (DT/RF/GBT in Java/Python).
Since we have tutorial code for DT/RF/GBT classification/regression in Scala/Java/Python and
example applications for DT/RF/GBT in Scala, so we mark these as duplicated and remove them.
mengxr

Author: Yanbo Liang <ybliang8@gmail.com>

Closes #9954 from yanboliang/SPARK-11975.

(cherry picked from commit de64b65f7cf2ac58c1abc310ba547637fdbb8557)
Signed-off-by: Xiangrui Meng <meng@databricks.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/a387cef3
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/a387cef3
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/a387cef3

Branch: refs/heads/branch-1.6
Commit: a387cef3a40d47a8ca7fa9c6aa2842318700df49
Parents: 1562ef1
Author: Yanbo Liang <ybliang8@gmail.com>
Authored: Mon Nov 30 15:01:08 2015 -0800
Committer: Xiangrui Meng <meng@databricks.com>
Committed: Mon Nov 30 15:01:16 2015 -0800

----------------------------------------------------------------------
 .../spark/examples/mllib/JavaDecisionTree.java  | 116 ---------------
 .../mllib/JavaGradientBoostedTreesRunner.java   | 126 ----------------
 .../examples/mllib/JavaRandomForestExample.java | 139 ------------------
 .../main/python/mllib/decision_tree_runner.py   | 144 -------------------
 .../main/python/mllib/gradient_boosted_trees.py |  77 ----------
 .../main/python/mllib/random_forest_example.py  |  90 ------------
 6 files changed, 692 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/a387cef3/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTree.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTree.java
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTree.java
deleted file mode 100644
index 1f82e3f..0000000
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTree.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.examples.mllib;
-
-import java.util.HashMap;
-
-import scala.Tuple2;
-
-import org.apache.spark.api.java.function.Function2;
-import org.apache.spark.api.java.JavaPairRDD;
-import org.apache.spark.api.java.JavaRDD;
-import org.apache.spark.api.java.JavaSparkContext;
-import org.apache.spark.api.java.function.Function;
-import org.apache.spark.api.java.function.PairFunction;
-import org.apache.spark.mllib.regression.LabeledPoint;
-import org.apache.spark.mllib.tree.DecisionTree;
-import org.apache.spark.mllib.tree.model.DecisionTreeModel;
-import org.apache.spark.mllib.util.MLUtils;
-import org.apache.spark.SparkConf;
-
-/**
- * Classification and regression using decision trees.
- */
-public final class JavaDecisionTree {
-
-  public static void main(String[] args) {
-    String datapath = "data/mllib/sample_libsvm_data.txt";
-    if (args.length == 1) {
-      datapath = args[0];
-    } else if (args.length > 1) {
-      System.err.println("Usage: JavaDecisionTree <libsvm format data file>");
-      System.exit(1);
-    }
-    SparkConf sparkConf = new SparkConf().setAppName("JavaDecisionTree");
-    JavaSparkContext sc = new JavaSparkContext(sparkConf);
-
-    JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc.sc(), datapath).toJavaRDD().cache();
-
-    // Compute the number of classes from the data.
-    Integer numClasses = data.map(new Function<LabeledPoint, Double>() {
-      @Override public Double call(LabeledPoint p) {
-        return p.label();
-      }
-    }).countByValue().size();
-
-    // Set parameters.
-    //  Empty categoricalFeaturesInfo indicates all features are continuous.
-    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
-    String impurity = "gini";
-    Integer maxDepth = 5;
-    Integer maxBins = 32;
-
-    // Train a DecisionTree model for classification.
-    final DecisionTreeModel model = DecisionTree.trainClassifier(data, numClasses,
-      categoricalFeaturesInfo, impurity, maxDepth, maxBins);
-
-    // Evaluate model on training instances and compute training error
-    JavaPairRDD<Double, Double> predictionAndLabel =
-      data.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
-        @Override public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
-        }
-      });
-    Double trainErr =
-      1.0 * predictionAndLabel.filter(new Function<Tuple2<Double, Double>, Boolean>()
{
-        @Override public Boolean call(Tuple2<Double, Double> pl) {
-          return !pl._1().equals(pl._2());
-        }
-      }).count() / data.count();
-    System.out.println("Training error: " + trainErr);
-    System.out.println("Learned classification tree model:\n" + model);
-
-    // Train a DecisionTree model for regression.
-    impurity = "variance";
-    final DecisionTreeModel regressionModel = DecisionTree.trainRegressor(data,
-        categoricalFeaturesInfo, impurity, maxDepth, maxBins);
-
-    // Evaluate model on training instances and compute training error
-    JavaPairRDD<Double, Double> regressorPredictionAndLabel =
-      data.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
-        @Override public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(regressionModel.predict(p.features()),
p.label());
-        }
-      });
-    Double trainMSE =
-      regressorPredictionAndLabel.map(new Function<Tuple2<Double, Double>, Double>()
{
-        @Override public Double call(Tuple2<Double, Double> pl) {
-          Double diff = pl._1() - pl._2();
-          return diff * diff;
-        }
-      }).reduce(new Function2<Double, Double, Double>() {
-        @Override public Double call(Double a, Double b) {
-          return a + b;
-        }
-      }) / data.count();
-    System.out.println("Training Mean Squared Error: " + trainMSE);
-    System.out.println("Learned regression tree model:\n" + regressionModel);
-
-    sc.stop();
-  }
-}

http://git-wip-us.apache.org/repos/asf/spark/blob/a387cef3/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostedTreesRunner.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostedTreesRunner.java
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostedTreesRunner.java
deleted file mode 100644
index a1844d5..0000000
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostedTreesRunner.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.examples.mllib;
-
-import scala.Tuple2;
-
-import org.apache.spark.SparkConf;
-import org.apache.spark.api.java.JavaPairRDD;
-import org.apache.spark.api.java.JavaRDD;
-import org.apache.spark.api.java.JavaSparkContext;
-import org.apache.spark.api.java.function.Function;
-import org.apache.spark.api.java.function.Function2;
-import org.apache.spark.api.java.function.PairFunction;
-import org.apache.spark.mllib.regression.LabeledPoint;
-import org.apache.spark.mllib.tree.GradientBoostedTrees;
-import org.apache.spark.mllib.tree.configuration.BoostingStrategy;
-import org.apache.spark.mllib.tree.model.GradientBoostedTreesModel;
-import org.apache.spark.mllib.util.MLUtils;
-
-/**
- * Classification and regression using gradient-boosted decision trees.
- */
-public final class JavaGradientBoostedTreesRunner {
-
-  private static void usage() {
-    System.err.println("Usage: JavaGradientBoostedTreesRunner <libsvm format data file>"
+
-        " <Classification/Regression>");
-    System.exit(-1);
-  }
-
-  public static void main(String[] args) {
-    String datapath = "data/mllib/sample_libsvm_data.txt";
-    String algo = "Classification";
-    if (args.length >= 1) {
-      datapath = args[0];
-    }
-    if (args.length >= 2) {
-      algo = args[1];
-    }
-    if (args.length > 2) {
-      usage();
-    }
-    SparkConf sparkConf = new SparkConf().setAppName("JavaGradientBoostedTreesRunner");
-    JavaSparkContext sc = new JavaSparkContext(sparkConf);
-
-    JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc.sc(), datapath).toJavaRDD().cache();
-
-    // Set parameters.
-    //  Note: All features are treated as continuous.
-    BoostingStrategy boostingStrategy = BoostingStrategy.defaultParams(algo);
-    boostingStrategy.setNumIterations(10);
-    boostingStrategy.treeStrategy().setMaxDepth(5);
-
-    if (algo.equals("Classification")) {
-      // Compute the number of classes from the data.
-      Integer numClasses = data.map(new Function<LabeledPoint, Double>() {
-        @Override public Double call(LabeledPoint p) {
-          return p.label();
-        }
-      }).countByValue().size();
-      boostingStrategy.treeStrategy().setNumClasses(numClasses);
-
-      // Train a GradientBoosting model for classification.
-      final GradientBoostedTreesModel model = GradientBoostedTrees.train(data, boostingStrategy);
-
-      // Evaluate model on training instances and compute training error
-      JavaPairRDD<Double, Double> predictionAndLabel =
-          data.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
-            @Override public Tuple2<Double, Double> call(LabeledPoint p) {
-              return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
-            }
-          });
-      Double trainErr =
-          1.0 * predictionAndLabel.filter(new Function<Tuple2<Double, Double>, Boolean>()
{
-            @Override public Boolean call(Tuple2<Double, Double> pl) {
-              return !pl._1().equals(pl._2());
-            }
-          }).count() / data.count();
-      System.out.println("Training error: " + trainErr);
-      System.out.println("Learned classification tree model:\n" + model);
-    } else if (algo.equals("Regression")) {
-      // Train a GradientBoosting model for classification.
-      final GradientBoostedTreesModel model = GradientBoostedTrees.train(data, boostingStrategy);
-
-      // Evaluate model on training instances and compute training error
-      JavaPairRDD<Double, Double> predictionAndLabel =
-          data.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
-            @Override public Tuple2<Double, Double> call(LabeledPoint p) {
-              return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
-            }
-          });
-      Double trainMSE =
-          predictionAndLabel.map(new Function<Tuple2<Double, Double>, Double>()
{
-            @Override public Double call(Tuple2<Double, Double> pl) {
-              Double diff = pl._1() - pl._2();
-              return diff * diff;
-            }
-          }).reduce(new Function2<Double, Double, Double>() {
-            @Override public Double call(Double a, Double b) {
-              return a + b;
-            }
-          }) / data.count();
-      System.out.println("Training Mean Squared Error: " + trainMSE);
-      System.out.println("Learned regression tree model:\n" + model);
-    } else {
-      usage();
-    }
-
-    sc.stop();
-  }
-}

http://git-wip-us.apache.org/repos/asf/spark/blob/a387cef3/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestExample.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestExample.java
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestExample.java
deleted file mode 100644
index 89a4e09..0000000
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestExample.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.examples.mllib;
-
-import scala.Tuple2;
-
-import java.util.HashMap;
-
-import org.apache.spark.SparkConf;
-import org.apache.spark.api.java.JavaPairRDD;
-import org.apache.spark.api.java.JavaRDD;
-import org.apache.spark.api.java.JavaSparkContext;
-import org.apache.spark.api.java.function.Function;
-import org.apache.spark.api.java.function.Function2;
-import org.apache.spark.api.java.function.PairFunction;
-import org.apache.spark.mllib.regression.LabeledPoint;
-import org.apache.spark.mllib.tree.RandomForest;
-import org.apache.spark.mllib.tree.model.RandomForestModel;
-import org.apache.spark.mllib.util.MLUtils;
-
-public final class JavaRandomForestExample {
-
-  /**
-   * Note: This example illustrates binary classification.
-   * For information on multiclass classification, please refer to the JavaDecisionTree.java
-   * example.
-   */
-  private static void testClassification(JavaRDD<LabeledPoint> trainingData,
-                                         JavaRDD<LabeledPoint> testData) {
-    // Train a RandomForest model.
-    //  Empty categoricalFeaturesInfo indicates all features are continuous.
-    Integer numClasses = 2;
-    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
-    Integer numTrees = 3; // Use more in practice.
-    String featureSubsetStrategy = "auto"; // Let the algorithm choose.
-    String impurity = "gini";
-    Integer maxDepth = 4;
-    Integer maxBins = 32;
-    Integer seed = 12345;
-
-    final RandomForestModel model = RandomForest.trainClassifier(trainingData, numClasses,
-        categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins,
-        seed);
-
-    // Evaluate model on test instances and compute test error
-    JavaPairRDD<Double, Double> predictionAndLabel =
-        testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
-          @Override
-          public Tuple2<Double, Double> call(LabeledPoint p) {
-            return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
-          }
-        });
-    Double testErr =
-        1.0 * predictionAndLabel.filter(new Function<Tuple2<Double, Double>, Boolean>()
{
-          @Override
-          public Boolean call(Tuple2<Double, Double> pl) {
-            return !pl._1().equals(pl._2());
-          }
-        }).count() / testData.count();
-    System.out.println("Test Error: " + testErr);
-    System.out.println("Learned classification forest model:\n" + model.toDebugString());
-  }
-
-  private static void testRegression(JavaRDD<LabeledPoint> trainingData,
-                                     JavaRDD<LabeledPoint> testData) {
-    // Train a RandomForest model.
-    //  Empty categoricalFeaturesInfo indicates all features are continuous.
-    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
-    Integer numTrees = 3; // Use more in practice.
-    String featureSubsetStrategy = "auto"; // Let the algorithm choose.
-    String impurity = "variance";
-    Integer maxDepth = 4;
-    Integer maxBins = 32;
-    Integer seed = 12345;
-
-    final RandomForestModel model = RandomForest.trainRegressor(trainingData,
-        categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins,
-        seed);
-
-    // Evaluate model on test instances and compute test error
-    JavaPairRDD<Double, Double> predictionAndLabel =
-        testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
-          @Override
-          public Tuple2<Double, Double> call(LabeledPoint p) {
-            return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
-          }
-        });
-    Double testMSE =
-        predictionAndLabel.map(new Function<Tuple2<Double, Double>, Double>()
{
-          @Override
-          public Double call(Tuple2<Double, Double> pl) {
-            Double diff = pl._1() - pl._2();
-            return diff * diff;
-          }
-        }).reduce(new Function2<Double, Double, Double>() {
-          @Override
-          public Double call(Double a, Double b) {
-            return a + b;
-          }
-        }) / testData.count();
-    System.out.println("Test Mean Squared Error: " + testMSE);
-    System.out.println("Learned regression forest model:\n" + model.toDebugString());
-  }
-
-  public static void main(String[] args) {
-    SparkConf sparkConf = new SparkConf().setAppName("JavaRandomForestExample");
-    JavaSparkContext sc = new JavaSparkContext(sparkConf);
-
-    // Load and parse the data file.
-    String datapath = "data/mllib/sample_libsvm_data.txt";
-    JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc.sc(), datapath).toJavaRDD();
-    // Split the data into training and test sets (30% held out for testing)
-    JavaRDD<LabeledPoint>[] splits = data.randomSplit(new double[]{0.7, 0.3});
-    JavaRDD<LabeledPoint> trainingData = splits[0];
-    JavaRDD<LabeledPoint> testData = splits[1];
-
-    System.out.println("\nRunning example of classification using RandomForest\n");
-    testClassification(trainingData, testData);
-
-    System.out.println("\nRunning example of regression using RandomForest\n");
-    testRegression(trainingData, testData);
-    sc.stop();
-  }
-}

http://git-wip-us.apache.org/repos/asf/spark/blob/a387cef3/examples/src/main/python/mllib/decision_tree_runner.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/mllib/decision_tree_runner.py b/examples/src/main/python/mllib/decision_tree_runner.py
deleted file mode 100755
index 513ed8f..0000000
--- a/examples/src/main/python/mllib/decision_tree_runner.py
+++ /dev/null
@@ -1,144 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Decision tree classification and regression using MLlib.
-
-This example requires NumPy (http://www.numpy.org/).
-"""
-from __future__ import print_function
-
-import numpy
-import os
-import sys
-
-from operator import add
-
-from pyspark import SparkContext
-from pyspark.mllib.regression import LabeledPoint
-from pyspark.mllib.tree import DecisionTree
-from pyspark.mllib.util import MLUtils
-
-
-def getAccuracy(dtModel, data):
-    """
-    Return accuracy of DecisionTreeModel on the given RDD[LabeledPoint].
-    """
-    seqOp = (lambda acc, x: acc + (x[0] == x[1]))
-    predictions = dtModel.predict(data.map(lambda x: x.features))
-    truth = data.map(lambda p: p.label)
-    trainCorrect = predictions.zip(truth).aggregate(0, seqOp, add)
-    if data.count() == 0:
-        return 0
-    return trainCorrect / (0.0 + data.count())
-
-
-def getMSE(dtModel, data):
-    """
-    Return mean squared error (MSE) of DecisionTreeModel on the given
-    RDD[LabeledPoint].
-    """
-    seqOp = (lambda acc, x: acc + numpy.square(x[0] - x[1]))
-    predictions = dtModel.predict(data.map(lambda x: x.features))
-    truth = data.map(lambda p: p.label)
-    trainMSE = predictions.zip(truth).aggregate(0, seqOp, add)
-    if data.count() == 0:
-        return 0
-    return trainMSE / (0.0 + data.count())
-
-
-def reindexClassLabels(data):
-    """
-    Re-index class labels in a dataset to the range {0,...,numClasses-1}.
-    If all labels in that range already appear at least once,
-     then the returned RDD is the same one (without a mapping).
-    Note: If a label simply does not appear in the data,
-          the index will not include it.
-          Be aware of this when reindexing subsampled data.
-    :param data: RDD of LabeledPoint where labels are integer values
-                 denoting labels for a classification problem.
-    :return: Pair (reindexedData, origToNewLabels) where
-             reindexedData is an RDD of LabeledPoint with labels in
-              the range {0,...,numClasses-1}, and
-             origToNewLabels is a dictionary mapping original labels
-              to new labels.
-    """
-    # classCounts: class --> # examples in class
-    classCounts = data.map(lambda x: x.label).countByValue()
-    numExamples = sum(classCounts.values())
-    sortedClasses = sorted(classCounts.keys())
-    numClasses = len(classCounts)
-    # origToNewLabels: class --> index in 0,...,numClasses-1
-    if (numClasses < 2):
-        print("Dataset for classification should have at least 2 classes."
-              " The given dataset had only %d classes." % numClasses, file=sys.stderr)
-        exit(1)
-    origToNewLabels = dict([(sortedClasses[i], i) for i in range(0, numClasses)])
-
-    print("numClasses = %d" % numClasses)
-    print("Per-class example fractions, counts:")
-    print("Class\tFrac\tCount")
-    for c in sortedClasses:
-        frac = classCounts[c] / (numExamples + 0.0)
-        print("%g\t%g\t%d" % (c, frac, classCounts[c]))
-
-    if (sortedClasses[0] == 0 and sortedClasses[-1] == numClasses - 1):
-        return (data, origToNewLabels)
-    else:
-        reindexedData = \
-            data.map(lambda x: LabeledPoint(origToNewLabels[x.label], x.features))
-        return (reindexedData, origToNewLabels)
-
-
-def usage():
-    print("Usage: decision_tree_runner [libsvm format data filepath]", file=sys.stderr)
-    exit(1)
-
-
-if __name__ == "__main__":
-    if len(sys.argv) > 2:
-        usage()
-    sc = SparkContext(appName="PythonDT")
-
-    # Load data.
-    dataPath = 'data/mllib/sample_libsvm_data.txt'
-    if len(sys.argv) == 2:
-        dataPath = sys.argv[1]
-    if not os.path.isfile(dataPath):
-        sc.stop()
-        usage()
-    points = MLUtils.loadLibSVMFile(sc, dataPath)
-
-    # Re-index class labels if needed.
-    (reindexedData, origToNewLabels) = reindexClassLabels(points)
-    numClasses = len(origToNewLabels)
-
-    # Train a classifier.
-    categoricalFeaturesInfo = {}  # no categorical features
-    model = DecisionTree.trainClassifier(reindexedData, numClasses=numClasses,
-                                         categoricalFeaturesInfo=categoricalFeaturesInfo)
-    # Print learned tree and stats.
-    print("Trained DecisionTree for classification:")
-    print("  Model numNodes: %d" % model.numNodes())
-    print("  Model depth: %d" % model.depth())
-    print("  Training accuracy: %g" % getAccuracy(model, reindexedData))
-    if model.numNodes() < 20:
-        print(model.toDebugString())
-    else:
-        print(model)
-
-    sc.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/a387cef3/examples/src/main/python/mllib/gradient_boosted_trees.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/mllib/gradient_boosted_trees.py b/examples/src/main/python/mllib/gradient_boosted_trees.py
deleted file mode 100644
index 781bd61..0000000
--- a/examples/src/main/python/mllib/gradient_boosted_trees.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Gradient boosted Trees classification and regression using MLlib.
-"""
-from __future__ import print_function
-
-import sys
-
-from pyspark.context import SparkContext
-from pyspark.mllib.tree import GradientBoostedTrees
-from pyspark.mllib.util import MLUtils
-
-
-def testClassification(trainingData, testData):
-    # Train a GradientBoostedTrees model.
-    #  Empty categoricalFeaturesInfo indicates all features are continuous.
-    model = GradientBoostedTrees.trainClassifier(trainingData, categoricalFeaturesInfo={},
-                                                 numIterations=30, maxDepth=4)
-    # Evaluate model on test instances and compute test error
-    predictions = model.predict(testData.map(lambda x: x.features))
-    labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
-    testErr = labelsAndPredictions.filter(lambda v_p: v_p[0] != v_p[1]).count() \
-        / float(testData.count())
-    print('Test Error = ' + str(testErr))
-    print('Learned classification ensemble model:')
-    print(model.toDebugString())
-
-
-def testRegression(trainingData, testData):
-    # Train a GradientBoostedTrees model.
-    #  Empty categoricalFeaturesInfo indicates all features are continuous.
-    model = GradientBoostedTrees.trainRegressor(trainingData, categoricalFeaturesInfo={},
-                                                numIterations=30, maxDepth=4)
-    # Evaluate model on test instances and compute test error
-    predictions = model.predict(testData.map(lambda x: x.features))
-    labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
-    testMSE = labelsAndPredictions.map(lambda vp: (vp[0] - vp[1]) * (vp[0] - vp[1])).sum()
\
-        / float(testData.count())
-    print('Test Mean Squared Error = ' + str(testMSE))
-    print('Learned regression ensemble model:')
-    print(model.toDebugString())
-
-
-if __name__ == "__main__":
-    if len(sys.argv) > 1:
-        print("Usage: gradient_boosted_trees", file=sys.stderr)
-        exit(1)
-    sc = SparkContext(appName="PythonGradientBoostedTrees")
-
-    # Load and parse the data file into an RDD of LabeledPoint.
-    data = MLUtils.loadLibSVMFile(sc, 'data/mllib/sample_libsvm_data.txt')
-    # Split the data into training and test sets (30% held out for testing)
-    (trainingData, testData) = data.randomSplit([0.7, 0.3])
-
-    print('\nRunning example of classification using GradientBoostedTrees\n')
-    testClassification(trainingData, testData)
-
-    print('\nRunning example of regression using GradientBoostedTrees\n')
-    testRegression(trainingData, testData)
-
-    sc.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/a387cef3/examples/src/main/python/mllib/random_forest_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/mllib/random_forest_example.py b/examples/src/main/python/mllib/random_forest_example.py
deleted file mode 100755
index 4cfdad8..0000000
--- a/examples/src/main/python/mllib/random_forest_example.py
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Random Forest classification and regression using MLlib.
-
-Note: This example illustrates binary classification.
-      For information on multiclass classification, please refer to the decision_tree_runner.py
-      example.
-"""
-from __future__ import print_function
-
-import sys
-
-from pyspark.context import SparkContext
-from pyspark.mllib.tree import RandomForest
-from pyspark.mllib.util import MLUtils
-
-
-def testClassification(trainingData, testData):
-    # Train a RandomForest model.
-    #  Empty categoricalFeaturesInfo indicates all features are continuous.
-    #  Note: Use larger numTrees in practice.
-    #  Setting featureSubsetStrategy="auto" lets the algorithm choose.
-    model = RandomForest.trainClassifier(trainingData, numClasses=2,
-                                         categoricalFeaturesInfo={},
-                                         numTrees=3, featureSubsetStrategy="auto",
-                                         impurity='gini', maxDepth=4, maxBins=32)
-
-    # Evaluate model on test instances and compute test error
-    predictions = model.predict(testData.map(lambda x: x.features))
-    labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
-    testErr = labelsAndPredictions.filter(lambda v_p: v_p[0] != v_p[1]).count()\
-        / float(testData.count())
-    print('Test Error = ' + str(testErr))
-    print('Learned classification forest model:')
-    print(model.toDebugString())
-
-
-def testRegression(trainingData, testData):
-    # Train a RandomForest model.
-    #  Empty categoricalFeaturesInfo indicates all features are continuous.
-    #  Note: Use larger numTrees in practice.
-    #  Setting featureSubsetStrategy="auto" lets the algorithm choose.
-    model = RandomForest.trainRegressor(trainingData, categoricalFeaturesInfo={},
-                                        numTrees=3, featureSubsetStrategy="auto",
-                                        impurity='variance', maxDepth=4, maxBins=32)
-
-    # Evaluate model on test instances and compute test error
-    predictions = model.predict(testData.map(lambda x: x.features))
-    labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
-    testMSE = labelsAndPredictions.map(lambda v_p1: (v_p1[0] - v_p1[1]) * (v_p1[0] - v_p1[1]))\
-        .sum() / float(testData.count())
-    print('Test Mean Squared Error = ' + str(testMSE))
-    print('Learned regression forest model:')
-    print(model.toDebugString())
-
-
-if __name__ == "__main__":
-    if len(sys.argv) > 1:
-        print("Usage: random_forest_example", file=sys.stderr)
-        exit(1)
-    sc = SparkContext(appName="PythonRandomForestExample")
-
-    # Load and parse the data file into an RDD of LabeledPoint.
-    data = MLUtils.loadLibSVMFile(sc, 'data/mllib/sample_libsvm_data.txt')
-    # Split the data into training and test sets (30% held out for testing)
-    (trainingData, testData) = data.randomSplit([0.7, 0.3])
-
-    print('\nRunning example of classification using RandomForest\n')
-    testClassification(trainingData, testData)
-
-    print('\nRunning example of regression using RandomForest\n')
-    testRegression(trainingData, testData)
-
-    sc.stop()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


Mime
View raw message