spark-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From r...@apache.org
Subject spark git commit: [SPARK-11947][SQL] Mark deprecated methods with "This will be removed in Spark 2.0."
Date Wed, 25 Nov 2015 02:58:58 GMT
Repository: spark
Updated Branches:
  refs/heads/master 25bbd3c16 -> 4d6bbbc03


[SPARK-11947][SQL] Mark deprecated methods with "This will be removed in Spark 2.0."

Also fixed some documentation as I saw them.

Author: Reynold Xin <rxin@databricks.com>

Closes #9930 from rxin/SPARK-11947.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/4d6bbbc0
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/4d6bbbc0
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/4d6bbbc0

Branch: refs/heads/master
Commit: 4d6bbbc03ddb6650b00eb638e4876a196014c19c
Parents: 25bbd3c
Author: Reynold Xin <rxin@databricks.com>
Authored: Tue Nov 24 18:58:55 2015 -0800
Committer: Reynold Xin <rxin@databricks.com>
Committed: Tue Nov 24 18:58:55 2015 -0800

----------------------------------------------------------------------
 project/MimaExcludes.scala                      |   3 +-
 .../scala/org/apache/spark/sql/Column.scala     |  20 ++--
 .../scala/org/apache/spark/sql/DataFrame.scala  |  72 +++++++----
 .../scala/org/apache/spark/sql/Dataset.scala    |   1 +
 .../scala/org/apache/spark/sql/SQLContext.scala |  88 +++++++-------
 .../org/apache/spark/sql/SQLImplicits.scala     |  25 +++-
 .../org/apache/spark/sql/SparkSQLParser.scala   | 118 -------------------
 .../spark/sql/execution/SparkSQLParser.scala    | 117 ++++++++++++++++++
 .../scala/org/apache/spark/sql/functions.scala  |  52 ++++----
 .../SimpleTextHadoopFsRelationSuite.scala       |   6 +-
 10 files changed, 282 insertions(+), 220 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/4d6bbbc0/project/MimaExcludes.scala
----------------------------------------------------------------------
diff --git a/project/MimaExcludes.scala b/project/MimaExcludes.scala
index bb45d1b..54a9ad9 100644
--- a/project/MimaExcludes.scala
+++ b/project/MimaExcludes.scala
@@ -108,7 +108,8 @@ object MimaExcludes {
         ProblemFilters.exclude[MissingClassProblem](
           "org.apache.spark.rdd.MapPartitionsWithPreparationRDD"),
         ProblemFilters.exclude[MissingClassProblem](
-          "org.apache.spark.rdd.MapPartitionsWithPreparationRDD$")
+          "org.apache.spark.rdd.MapPartitionsWithPreparationRDD$"),
+        ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.SparkSQLParser")
       ) ++ Seq(
         // SPARK-11485
         ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.DataFrameHolder.df"),

http://git-wip-us.apache.org/repos/asf/spark/blob/4d6bbbc0/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
index 30c554a..b3cd9e1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
@@ -42,7 +42,8 @@ private[sql] object Column {
 
 /**
  * A [[Column]] where an [[Encoder]] has been given for the expected input and return type.
- * @since 1.6.0
+ * To create a [[TypedColumn]], use the `as` function on a [[Column]].
+ *
  * @tparam T The input type expected for this expression.  Can be `Any` if the expression is type
  *           checked by the analyzer instead of the compiler (i.e. `expr("sum(...)")`).
  * @tparam U The output type of this column.
@@ -51,7 +52,8 @@ private[sql] object Column {
  */
 class TypedColumn[-T, U](
     expr: Expression,
-    private[sql] val encoder: ExpressionEncoder[U]) extends Column(expr) {
+    private[sql] val encoder: ExpressionEncoder[U])
+  extends Column(expr) {
 
   /**
    * Inserts the specific input type and schema into any expressions that are expected to operate
@@ -61,12 +63,11 @@ class TypedColumn[-T, U](
       inputEncoder: ExpressionEncoder[_],
       schema: Seq[Attribute]): TypedColumn[T, U] = {
     val boundEncoder = inputEncoder.bind(schema).asInstanceOf[ExpressionEncoder[Any]]
-    new TypedColumn[T, U] (expr transform {
-      case ta: TypedAggregateExpression if ta.aEncoder.isEmpty =>
-        ta.copy(
-          aEncoder = Some(boundEncoder),
-          children = schema)
-    }, encoder)
+    new TypedColumn[T, U](
+      expr transform { case ta: TypedAggregateExpression if ta.aEncoder.isEmpty =>
+        ta.copy(aEncoder = Some(boundEncoder), children = schema)
+      },
+      encoder)
   }
 }
 
@@ -691,8 +692,9 @@ class Column(protected[sql] val expr: Expression) extends Logging {
    *
    * @group expr_ops
    * @since 1.3.0
+   * @deprecated As of 1.5.0. Use isin. This will be removed in Spark 2.0.
    */
-  @deprecated("use isin", "1.5.0")
+  @deprecated("use isin. This will be removed in Spark 2.0.", "1.5.0")
   @scala.annotation.varargs
   def in(list: Any*): Column = isin(list : _*)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/4d6bbbc0/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index 5586fc9..5eca1db 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -1713,9 +1713,9 @@ class DataFrame private[sql](
   ////////////////////////////////////////////////////////////////////////////
 
   /**
-   * @deprecated As of 1.3.0, replaced by `toDF()`.
+   * @deprecated As of 1.3.0, replaced by `toDF()`. This will be removed in Spark 2.0.
    */
-  @deprecated("use toDF", "1.3.0")
+  @deprecated("Use toDF. This will be removed in Spark 2.0.", "1.3.0")
   def toSchemaRDD: DataFrame = this
 
   /**
@@ -1725,9 +1725,9 @@ class DataFrame private[sql](
    * given name; if you pass `false`, it will throw if the table already
    * exists.
    * @group output
-   * @deprecated As of 1.340, replaced by `write().jdbc()`.
+   * @deprecated As of 1.340, replaced by `write().jdbc()`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.jdbc()", "1.4.0")
+  @deprecated("Use write.jdbc(). This will be removed in Spark 2.0.", "1.4.0")
   def createJDBCTable(url: String, table: String, allowExisting: Boolean): Unit = {
     val w = if (allowExisting) write.mode(SaveMode.Overwrite) else write
     w.jdbc(url, table, new Properties)
@@ -1744,9 +1744,9 @@ class DataFrame private[sql](
    * the RDD in order via the simple statement
    * `INSERT INTO table VALUES (?, ?, ..., ?)` should not fail.
    * @group output
-   * @deprecated As of 1.4.0, replaced by `write().jdbc()`.
+   * @deprecated As of 1.4.0, replaced by `write().jdbc()`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.jdbc()", "1.4.0")
+  @deprecated("Use write.jdbc(). This will be removed in Spark 2.0.", "1.4.0")
   def insertIntoJDBC(url: String, table: String, overwrite: Boolean): Unit = {
     val w = if (overwrite) write.mode(SaveMode.Overwrite) else write.mode(SaveMode.Append)
     w.jdbc(url, table, new Properties)
@@ -1757,9 +1757,9 @@ class DataFrame private[sql](
    * Files that are written out using this method can be read back in as a [[DataFrame]]
    * using the `parquetFile` function in [[SQLContext]].
    * @group output
-   * @deprecated As of 1.4.0, replaced by `write().parquet()`.
+   * @deprecated As of 1.4.0, replaced by `write().parquet()`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.parquet(path)", "1.4.0")
+  @deprecated("Use write.parquet(path). This will be removed in Spark 2.0.", "1.4.0")
   def saveAsParquetFile(path: String): Unit = {
     write.format("parquet").mode(SaveMode.ErrorIfExists).save(path)
   }
@@ -1782,8 +1782,9 @@ class DataFrame private[sql](
    *
    * @group output
    * @deprecated As of 1.4.0, replaced by `write().saveAsTable(tableName)`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.saveAsTable(tableName)", "1.4.0")
+  @deprecated("Use write.saveAsTable(tableName). This will be removed in Spark 2.0.", "1.4.0")
   def saveAsTable(tableName: String): Unit = {
     write.mode(SaveMode.ErrorIfExists).saveAsTable(tableName)
   }
@@ -1805,8 +1806,10 @@ class DataFrame private[sql](
    *
    * @group output
    * @deprecated As of 1.4.0, replaced by `write().mode(mode).saveAsTable(tableName)`.
+   *              This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.mode(mode).saveAsTable(tableName)", "1.4.0")
+  @deprecated("Use write.mode(mode).saveAsTable(tableName). This will be removed in Spark 2.0.",
+    "1.4.0")
   def saveAsTable(tableName: String, mode: SaveMode): Unit = {
     write.mode(mode).saveAsTable(tableName)
   }
@@ -1829,8 +1832,10 @@ class DataFrame private[sql](
    *
    * @group output
    * @deprecated As of 1.4.0, replaced by `write().format(source).saveAsTable(tableName)`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.format(source).saveAsTable(tableName)", "1.4.0")
+  @deprecated("Use write.format(source).saveAsTable(tableName). This will be removed in Spark 2.0.",
+    "1.4.0")
   def saveAsTable(tableName: String, source: String): Unit = {
     write.format(source).saveAsTable(tableName)
   }
@@ -1853,8 +1858,10 @@ class DataFrame private[sql](
    *
    * @group output
    * @deprecated As of 1.4.0, replaced by `write().mode(mode).saveAsTable(tableName)`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.format(source).mode(mode).saveAsTable(tableName)", "1.4.0")
+  @deprecated("Use write.format(source).mode(mode).saveAsTable(tableName). " +
+    "This will be removed in Spark 2.0.", "1.4.0")
   def saveAsTable(tableName: String, source: String, mode: SaveMode): Unit = {
     write.format(source).mode(mode).saveAsTable(tableName)
   }
@@ -1877,9 +1884,10 @@ class DataFrame private[sql](
    * @group output
    * @deprecated As of 1.4.0, replaced by
    *            `write().format(source).mode(mode).options(options).saveAsTable(tableName)`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.format(source).mode(mode).options(options).saveAsTable(tableName)",
-    "1.4.0")
+  @deprecated("Use write.format(source).mode(mode).options(options).saveAsTable(tableName). " +
+    "This will be removed in Spark 2.0.", "1.4.0")
   def saveAsTable(
       tableName: String,
       source: String,
@@ -1907,9 +1915,10 @@ class DataFrame private[sql](
    * @group output
    * @deprecated As of 1.4.0, replaced by
    *            `write().format(source).mode(mode).options(options).saveAsTable(tableName)`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.format(source).mode(mode).options(options).saveAsTable(tableName)",
-    "1.4.0")
+  @deprecated("Use write.format(source).mode(mode).options(options).saveAsTable(tableName). " +
+    "This will be removed in Spark 2.0.", "1.4.0")
   def saveAsTable(
       tableName: String,
       source: String,
@@ -1923,9 +1932,9 @@ class DataFrame private[sql](
    * using the default data source configured by spark.sql.sources.default and
    * [[SaveMode.ErrorIfExists]] as the save mode.
    * @group output
-   * @deprecated As of 1.4.0, replaced by `write().save(path)`.
+   * @deprecated As of 1.4.0, replaced by `write().save(path)`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.save(path)", "1.4.0")
+  @deprecated("Use write.save(path). This will be removed in Spark 2.0.", "1.4.0")
   def save(path: String): Unit = {
     write.save(path)
   }
@@ -1935,8 +1944,9 @@ class DataFrame private[sql](
    * using the default data source configured by spark.sql.sources.default.
    * @group output
    * @deprecated As of 1.4.0, replaced by `write().mode(mode).save(path)`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.mode(mode).save(path)", "1.4.0")
+  @deprecated("Use write.mode(mode).save(path). This will be removed in Spark 2.0.", "1.4.0")
   def save(path: String, mode: SaveMode): Unit = {
     write.mode(mode).save(path)
   }
@@ -1946,8 +1956,9 @@ class DataFrame private[sql](
    * using [[SaveMode.ErrorIfExists]] as the save mode.
    * @group output
    * @deprecated As of 1.4.0, replaced by `write().format(source).save(path)`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.format(source).save(path)", "1.4.0")
+  @deprecated("Use write.format(source).save(path). This will be removed in Spark 2.0.", "1.4.0")
   def save(path: String, source: String): Unit = {
     write.format(source).save(path)
   }
@@ -1957,8 +1968,10 @@ class DataFrame private[sql](
    * [[SaveMode]] specified by mode.
    * @group output
    * @deprecated As of 1.4.0, replaced by `write().format(source).mode(mode).save(path)`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.format(source).mode(mode).save(path)", "1.4.0")
+  @deprecated("Use write.format(source).mode(mode).save(path). " +
+    "This will be removed in Spark 2.0.", "1.4.0")
   def save(path: String, source: String, mode: SaveMode): Unit = {
     write.format(source).mode(mode).save(path)
   }
@@ -1969,8 +1982,10 @@ class DataFrame private[sql](
    * @group output
    * @deprecated As of 1.4.0, replaced by
    *            `write().format(source).mode(mode).options(options).save(path)`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.format(source).mode(mode).options(options).save()", "1.4.0")
+  @deprecated("Use write.format(source).mode(mode).options(options).save(). " +
+    "This will be removed in Spark 2.0.", "1.4.0")
   def save(
       source: String,
       mode: SaveMode,
@@ -1985,8 +2000,10 @@ class DataFrame private[sql](
    * @group output
    * @deprecated As of 1.4.0, replaced by
    *            `write().format(source).mode(mode).options(options).save(path)`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.format(source).mode(mode).options(options).save()", "1.4.0")
+  @deprecated("Use write.format(source).mode(mode).options(options).save(). " +
+    "This will be removed in Spark 2.0.", "1.4.0")
   def save(
       source: String,
       mode: SaveMode,
@@ -1994,14 +2011,15 @@ class DataFrame private[sql](
     write.format(source).mode(mode).options(options).save()
   }
 
-
   /**
    * Adds the rows from this RDD to the specified table, optionally overwriting the existing data.
    * @group output
    * @deprecated As of 1.4.0, replaced by
    *            `write().mode(SaveMode.Append|SaveMode.Overwrite).saveAsTable(tableName)`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.mode(SaveMode.Append|SaveMode.Overwrite).saveAsTable(tableName)", "1.4.0")
+  @deprecated("Use write.mode(SaveMode.Append|SaveMode.Overwrite).saveAsTable(tableName). " +
+    "This will be removed in Spark 2.0.", "1.4.0")
   def insertInto(tableName: String, overwrite: Boolean): Unit = {
     write.mode(if (overwrite) SaveMode.Overwrite else SaveMode.Append).insertInto(tableName)
   }
@@ -2012,8 +2030,10 @@ class DataFrame private[sql](
    * @group output
    * @deprecated As of 1.4.0, replaced by
    *            `write().mode(SaveMode.Append).saveAsTable(tableName)`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use write.mode(SaveMode.Append).saveAsTable(tableName)", "1.4.0")
+  @deprecated("Use write.mode(SaveMode.Append).saveAsTable(tableName). " +
+    "This will be removed in Spark 2.0.", "1.4.0")
   def insertInto(tableName: String): Unit = {
     write.mode(SaveMode.Append).insertInto(tableName)
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/4d6bbbc0/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
index 17e2611..dd84b8b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
@@ -32,6 +32,7 @@ import org.apache.spark.sql.execution.{Queryable, QueryExecution}
 import org.apache.spark.sql.types.StructType
 
 /**
+ * :: Experimental ::
  * A [[Dataset]] is a strongly typed collection of objects that can be transformed in parallel
  * using functional or relational operations.
  *

http://git-wip-us.apache.org/repos/asf/spark/blob/4d6bbbc0/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index 39471d2..46bf544 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -942,33 +942,33 @@ class SQLContext private[sql](
   ////////////////////////////////////////////////////////////////////////////
 
   /**
-   * @deprecated As of 1.3.0, replaced by `createDataFrame()`.
+   * @deprecated As of 1.3.0, replaced by `createDataFrame()`. This will be removed in Spark 2.0.
    */
-  @deprecated("use createDataFrame", "1.3.0")
+  @deprecated("Use createDataFrame. This will be removed in Spark 2.0.", "1.3.0")
   def applySchema(rowRDD: RDD[Row], schema: StructType): DataFrame = {
     createDataFrame(rowRDD, schema)
   }
 
   /**
-   * @deprecated As of 1.3.0, replaced by `createDataFrame()`.
+   * @deprecated As of 1.3.0, replaced by `createDataFrame()`. This will be removed in Spark 2.0.
    */
-  @deprecated("use createDataFrame", "1.3.0")
+  @deprecated("Use createDataFrame. This will be removed in Spark 2.0.", "1.3.0")
   def applySchema(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = {
     createDataFrame(rowRDD, schema)
   }
 
   /**
-   * @deprecated As of 1.3.0, replaced by `createDataFrame()`.
+   * @deprecated As of 1.3.0, replaced by `createDataFrame()`. This will be removed in Spark 2.0.
    */
-  @deprecated("use createDataFrame", "1.3.0")
+  @deprecated("Use createDataFrame. This will be removed in Spark 2.0.", "1.3.0")
   def applySchema(rdd: RDD[_], beanClass: Class[_]): DataFrame = {
     createDataFrame(rdd, beanClass)
   }
 
   /**
-   * @deprecated As of 1.3.0, replaced by `createDataFrame()`.
+   * @deprecated As of 1.3.0, replaced by `createDataFrame()`. This will be removed in Spark 2.0.
    */
-  @deprecated("use createDataFrame", "1.3.0")
+  @deprecated("Use createDataFrame. This will be removed in Spark 2.0.", "1.3.0")
   def applySchema(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = {
     createDataFrame(rdd, beanClass)
   }
@@ -978,9 +978,9 @@ class SQLContext private[sql](
    * [[DataFrame]] if no paths are passed in.
    *
    * @group specificdata
-   * @deprecated As of 1.4.0, replaced by `read().parquet()`.
+   * @deprecated As of 1.4.0, replaced by `read().parquet()`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use read.parquet()", "1.4.0")
+  @deprecated("Use read.parquet(). This will be removed in Spark 2.0.", "1.4.0")
   @scala.annotation.varargs
   def parquetFile(paths: String*): DataFrame = {
     if (paths.isEmpty) {
@@ -995,9 +995,9 @@ class SQLContext private[sql](
    * It goes through the entire dataset once to determine the schema.
    *
    * @group specificdata
-   * @deprecated As of 1.4.0, replaced by `read().json()`.
+   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use read.json()", "1.4.0")
+  @deprecated("Use read.json(). This will be removed in Spark 2.0.", "1.4.0")
   def jsonFile(path: String): DataFrame = {
     read.json(path)
   }
@@ -1007,18 +1007,18 @@ class SQLContext private[sql](
    * returning the result as a [[DataFrame]].
    *
    * @group specificdata
-   * @deprecated As of 1.4.0, replaced by `read().json()`.
+   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use read.json()", "1.4.0")
+  @deprecated("Use read.json(). This will be removed in Spark 2.0.", "1.4.0")
   def jsonFile(path: String, schema: StructType): DataFrame = {
     read.schema(schema).json(path)
   }
 
   /**
    * @group specificdata
-   * @deprecated As of 1.4.0, replaced by `read().json()`.
+   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use read.json()", "1.4.0")
+  @deprecated("Use read.json(). This will be removed in Spark 2.0.", "1.4.0")
   def jsonFile(path: String, samplingRatio: Double): DataFrame = {
     read.option("samplingRatio", samplingRatio.toString).json(path)
   }
@@ -1029,9 +1029,9 @@ class SQLContext private[sql](
    * It goes through the entire dataset once to determine the schema.
    *
    * @group specificdata
-   * @deprecated As of 1.4.0, replaced by `read().json()`.
+   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use read.json()", "1.4.0")
+  @deprecated("Use read.json(). This will be removed in Spark 2.0.", "1.4.0")
   def jsonRDD(json: RDD[String]): DataFrame = read.json(json)
 
   /**
@@ -1040,9 +1040,9 @@ class SQLContext private[sql](
    * It goes through the entire dataset once to determine the schema.
    *
    * @group specificdata
-   * @deprecated As of 1.4.0, replaced by `read().json()`.
+   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use read.json()", "1.4.0")
+  @deprecated("Use read.json(). This will be removed in Spark 2.0.", "1.4.0")
   def jsonRDD(json: JavaRDD[String]): DataFrame = read.json(json)
 
   /**
@@ -1050,9 +1050,9 @@ class SQLContext private[sql](
    * returning the result as a [[DataFrame]].
    *
    * @group specificdata
-   * @deprecated As of 1.4.0, replaced by `read().json()`.
+   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use read.json()", "1.4.0")
+  @deprecated("Use read.json(). This will be removed in Spark 2.0.", "1.4.0")
   def jsonRDD(json: RDD[String], schema: StructType): DataFrame = {
     read.schema(schema).json(json)
   }
@@ -1062,9 +1062,9 @@ class SQLContext private[sql](
    * schema, returning the result as a [[DataFrame]].
    *
    * @group specificdata
-   * @deprecated As of 1.4.0, replaced by `read().json()`.
+   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use read.json()", "1.4.0")
+  @deprecated("Use read.json(). This will be removed in Spark 2.0.", "1.4.0")
   def jsonRDD(json: JavaRDD[String], schema: StructType): DataFrame = {
     read.schema(schema).json(json)
   }
@@ -1074,9 +1074,9 @@ class SQLContext private[sql](
    * schema, returning the result as a [[DataFrame]].
    *
    * @group specificdata
-   * @deprecated As of 1.4.0, replaced by `read().json()`.
+   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use read.json()", "1.4.0")
+  @deprecated("Use read.json(). This will be removed in Spark 2.0.", "1.4.0")
   def jsonRDD(json: RDD[String], samplingRatio: Double): DataFrame = {
     read.option("samplingRatio", samplingRatio.toString).json(json)
   }
@@ -1086,9 +1086,9 @@ class SQLContext private[sql](
    * schema, returning the result as a [[DataFrame]].
    *
    * @group specificdata
-   * @deprecated As of 1.4.0, replaced by `read().json()`.
+   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use read.json()", "1.4.0")
+  @deprecated("Use read.json(). This will be removed in Spark 2.0.", "1.4.0")
   def jsonRDD(json: JavaRDD[String], samplingRatio: Double): DataFrame = {
     read.option("samplingRatio", samplingRatio.toString).json(json)
   }
@@ -1098,9 +1098,9 @@ class SQLContext private[sql](
    * using the default data source configured by spark.sql.sources.default.
    *
    * @group genericdata
-   * @deprecated As of 1.4.0, replaced by `read().load(path)`.
+   * @deprecated As of 1.4.0, replaced by `read().load(path)`. This will be removed in Spark 2.0.
    */
-  @deprecated("Use read.load(path)", "1.4.0")
+  @deprecated("Use read.load(path). This will be removed in Spark 2.0.", "1.4.0")
   def load(path: String): DataFrame = {
     read.load(path)
   }
@@ -1110,8 +1110,9 @@ class SQLContext private[sql](
    *
    * @group genericdata
    * @deprecated As of 1.4.0, replaced by `read().format(source).load(path)`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use read.format(source).load(path)", "1.4.0")
+  @deprecated("Use read.format(source).load(path). This will be removed in Spark 2.0.", "1.4.0")
   def load(path: String, source: String): DataFrame = {
     read.format(source).load(path)
   }
@@ -1122,8 +1123,10 @@ class SQLContext private[sql](
    *
    * @group genericdata
    * @deprecated As of 1.4.0, replaced by `read().format(source).options(options).load()`.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use read.format(source).options(options).load()", "1.4.0")
+  @deprecated("Use read.format(source).options(options).load(). " +
+    "This will be removed in Spark 2.0.", "1.4.0")
   def load(source: String, options: java.util.Map[String, String]): DataFrame = {
     read.options(options).format(source).load()
   }
@@ -1135,7 +1138,8 @@ class SQLContext private[sql](
    * @group genericdata
    * @deprecated As of 1.4.0, replaced by `read().format(source).options(options).load()`.
    */
-  @deprecated("Use read.format(source).options(options).load()", "1.4.0")
+  @deprecated("Use read.format(source).options(options).load(). " +
+    "This will be removed in Spark 2.0.", "1.4.0")
   def load(source: String, options: Map[String, String]): DataFrame = {
     read.options(options).format(source).load()
   }
@@ -1148,7 +1152,8 @@ class SQLContext private[sql](
    * @deprecated As of 1.4.0, replaced by
    *            `read().format(source).schema(schema).options(options).load()`.
    */
-  @deprecated("Use read.format(source).schema(schema).options(options).load()", "1.4.0")
+  @deprecated("Use read.format(source).schema(schema).options(options).load(). " +
+    "This will be removed in Spark 2.0.", "1.4.0")
   def load(source: String, schema: StructType, options: java.util.Map[String, String]): DataFrame =
   {
     read.format(source).schema(schema).options(options).load()
@@ -1162,7 +1167,8 @@ class SQLContext private[sql](
    * @deprecated As of 1.4.0, replaced by
    *            `read().format(source).schema(schema).options(options).load()`.
    */
-  @deprecated("Use read.format(source).schema(schema).options(options).load()", "1.4.0")
+  @deprecated("Use read.format(source).schema(schema).options(options).load(). " +
+    "This will be removed in Spark 2.0.", "1.4.0")
   def load(source: String, schema: StructType, options: Map[String, String]): DataFrame = {
     read.format(source).schema(schema).options(options).load()
   }
@@ -1172,9 +1178,9 @@ class SQLContext private[sql](
    * url named table.
    *
    * @group specificdata
-   * @deprecated As of 1.4.0, replaced by `read().jdbc()`.
+   * @deprecated As of 1.4.0, replaced by `read().jdbc()`. This will be removed in Spark 2.0.
    */
-  @deprecated("use read.jdbc()", "1.4.0")
+  @deprecated("Use read.jdbc(). This will be removed in Spark 2.0.", "1.4.0")
   def jdbc(url: String, table: String): DataFrame = {
     read.jdbc(url, table, new Properties)
   }
@@ -1190,9 +1196,9 @@ class SQLContext private[sql](
    * @param numPartitions the number of partitions.  the range `minValue`-`maxValue` will be split
    *                      evenly into this many partitions
    * @group specificdata
-   * @deprecated As of 1.4.0, replaced by `read().jdbc()`.
+   * @deprecated As of 1.4.0, replaced by `read().jdbc()`. This will be removed in Spark 2.0.
    */
-  @deprecated("use read.jdbc()", "1.4.0")
+  @deprecated("Use read.jdbc(). This will be removed in Spark 2.0.", "1.4.0")
   def jdbc(
       url: String,
       table: String,
@@ -1210,9 +1216,9 @@ class SQLContext private[sql](
    * of the [[DataFrame]].
    *
    * @group specificdata
-   * @deprecated As of 1.4.0, replaced by `read().jdbc()`.
+   * @deprecated As of 1.4.0, replaced by `read().jdbc()`. This will be removed in Spark 2.0.
    */
-  @deprecated("use read.jdbc()", "1.4.0")
+  @deprecated("Use read.jdbc(). This will be removed in Spark 2.0.", "1.4.0")
   def jdbc(url: String, table: String, theParts: Array[String]): DataFrame = {
     read.jdbc(url, table, theParts, new Properties)
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/4d6bbbc0/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
index 25ffdcd..6735d02 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
@@ -30,19 +30,38 @@ import org.apache.spark.unsafe.types.UTF8String
 
 /**
  * A collection of implicit methods for converting common Scala objects into [[DataFrame]]s.
+ *
+ * @since 1.6.0
  */
 abstract class SQLImplicits {
+
   protected def _sqlContext: SQLContext
 
+  /** @since 1.6.0 */
   implicit def newProductEncoder[T <: Product : TypeTag]: Encoder[T] = ExpressionEncoder()
 
+  /** @since 1.6.0 */
   implicit def newIntEncoder: Encoder[Int] = ExpressionEncoder()
+
+  /** @since 1.6.0 */
   implicit def newLongEncoder: Encoder[Long] = ExpressionEncoder()
+
+  /** @since 1.6.0 */
   implicit def newDoubleEncoder: Encoder[Double] = ExpressionEncoder()
+
+  /** @since 1.6.0 */
   implicit def newFloatEncoder: Encoder[Float] = ExpressionEncoder()
+
+  /** @since 1.6.0 */
   implicit def newByteEncoder: Encoder[Byte] = ExpressionEncoder()
+
+  /** @since 1.6.0 */
   implicit def newShortEncoder: Encoder[Short] = ExpressionEncoder()
+  /** @since 1.6.0 */
+
   implicit def newBooleanEncoder: Encoder[Boolean] = ExpressionEncoder()
+
+  /** @since 1.6.0 */
   implicit def newStringEncoder: Encoder[String] = ExpressionEncoder()
 
   /**
@@ -84,9 +103,9 @@ abstract class SQLImplicits {
     DataFrameHolder(_sqlContext.createDataFrame(data))
   }
 
-  // Do NOT add more implicit conversions. They are likely to break source compatibility by
-  // making existing implicit conversions ambiguous. In particular, RDD[Double] is dangerous
-  // because of [[DoubleRDDFunctions]].
+  // Do NOT add more implicit conversions for primitive types.
+  // They are likely to break source compatibility by making existing implicit conversions
+  // ambiguous. In particular, RDD[Double] is dangerous because of [[DoubleRDDFunctions]].
 
   /**
    * Creates a single column DataFrame from an RDD[Int].

http://git-wip-us.apache.org/repos/asf/spark/blob/4d6bbbc0/sql/core/src/main/scala/org/apache/spark/sql/SparkSQLParser.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SparkSQLParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/SparkSQLParser.scala
deleted file mode 100644
index ea8fce6..0000000
--- a/sql/core/src/main/scala/org/apache/spark/sql/SparkSQLParser.scala
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.sql
-
-import scala.util.parsing.combinator.RegexParsers
-
-import org.apache.spark.sql.catalyst.AbstractSparkSQLParser
-import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
-import org.apache.spark.sql.catalyst.plans.logical.{DescribeFunction, LogicalPlan, ShowFunctions}
-import org.apache.spark.sql.execution._
-import org.apache.spark.sql.types.StringType
-
-
-/**
- * The top level Spark SQL parser. This parser recognizes syntaxes that are available for all SQL
- * dialects supported by Spark SQL, and delegates all the other syntaxes to the `fallback` parser.
- *
- * @param fallback A function that parses an input string to a logical plan
- */
-private[sql] class SparkSQLParser(fallback: String => LogicalPlan) extends AbstractSparkSQLParser {
-
-  // A parser for the key-value part of the "SET [key = [value ]]" syntax
-  private object SetCommandParser extends RegexParsers {
-    private val key: Parser[String] = "(?m)[^=]+".r
-
-    private val value: Parser[String] = "(?m).*$".r
-
-    private val output: Seq[Attribute] = Seq(AttributeReference("", StringType, nullable = false)())
-
-    private val pair: Parser[LogicalPlan] =
-      (key ~ ("=".r ~> value).?).? ^^ {
-        case None => SetCommand(None)
-        case Some(k ~ v) => SetCommand(Some(k.trim -> v.map(_.trim)))
-      }
-
-    def apply(input: String): LogicalPlan = parseAll(pair, input) match {
-      case Success(plan, _) => plan
-      case x => sys.error(x.toString)
-    }
-  }
-
-  protected val AS = Keyword("AS")
-  protected val CACHE = Keyword("CACHE")
-  protected val CLEAR = Keyword("CLEAR")
-  protected val DESCRIBE = Keyword("DESCRIBE")
-  protected val EXTENDED = Keyword("EXTENDED")
-  protected val FUNCTION = Keyword("FUNCTION")
-  protected val FUNCTIONS = Keyword("FUNCTIONS")
-  protected val IN = Keyword("IN")
-  protected val LAZY = Keyword("LAZY")
-  protected val SET = Keyword("SET")
-  protected val SHOW = Keyword("SHOW")
-  protected val TABLE = Keyword("TABLE")
-  protected val TABLES = Keyword("TABLES")
-  protected val UNCACHE = Keyword("UNCACHE")
-
-  override protected lazy val start: Parser[LogicalPlan] =
-    cache | uncache | set | show | desc | others
-
-  private lazy val cache: Parser[LogicalPlan] =
-    CACHE ~> LAZY.? ~ (TABLE ~> ident) ~ (AS ~> restInput).? ^^ {
-      case isLazy ~ tableName ~ plan =>
-        CacheTableCommand(tableName, plan.map(fallback), isLazy.isDefined)
-    }
-
-  private lazy val uncache: Parser[LogicalPlan] =
-    ( UNCACHE ~ TABLE ~> ident ^^ {
-        case tableName => UncacheTableCommand(tableName)
-      }
-    | CLEAR ~ CACHE ^^^ ClearCacheCommand
-    )
-
-  private lazy val set: Parser[LogicalPlan] =
-    SET ~> restInput ^^ {
-      case input => SetCommandParser(input)
-    }
-
-  // It can be the following patterns:
-  // SHOW FUNCTIONS;
-  // SHOW FUNCTIONS mydb.func1;
-  // SHOW FUNCTIONS func1;
-  // SHOW FUNCTIONS `mydb.a`.`func1.aa`;
-  private lazy val show: Parser[LogicalPlan] =
-    ( SHOW ~> TABLES ~ (IN ~> ident).? ^^ {
-        case _ ~ dbName => ShowTablesCommand(dbName)
-      }
-    | SHOW ~ FUNCTIONS ~> ((ident <~ ".").? ~ (ident | stringLit)).? ^^ {
-        case Some(f) => ShowFunctions(f._1, Some(f._2))
-        case None => ShowFunctions(None, None)
-      }
-    )
-
-  private lazy val desc: Parser[LogicalPlan] =
-    DESCRIBE ~ FUNCTION ~> EXTENDED.? ~ (ident | stringLit) ^^ {
-      case isExtended ~ functionName => DescribeFunction(functionName, isExtended.isDefined)
-    }
-
-  private lazy val others: Parser[LogicalPlan] =
-    wholeInput ^^ {
-      case input => fallback(input)
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/spark/blob/4d6bbbc0/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSQLParser.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSQLParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSQLParser.scala
new file mode 100644
index 0000000..b3e8d0d
--- /dev/null
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSQLParser.scala
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution
+
+import scala.util.parsing.combinator.RegexParsers
+
+import org.apache.spark.sql.catalyst.AbstractSparkSQLParser
+import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
+import org.apache.spark.sql.catalyst.plans.logical
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.types.StringType
+
+/**
+ * The top level Spark SQL parser. This parser recognizes syntaxes that are available for all SQL
+ * dialects supported by Spark SQL, and delegates all the other syntaxes to the `fallback` parser.
+ *
+ * @param fallback A function that parses an input string to a logical plan
+ */
+class SparkSQLParser(fallback: String => LogicalPlan) extends AbstractSparkSQLParser {
+
+  // A parser for the key-value part of the "SET [key = [value ]]" syntax
+  private object SetCommandParser extends RegexParsers {
+    private val key: Parser[String] = "(?m)[^=]+".r
+
+    private val value: Parser[String] = "(?m).*$".r
+
+    private val output: Seq[Attribute] = Seq(AttributeReference("", StringType, nullable = false)())
+
+    private val pair: Parser[LogicalPlan] =
+      (key ~ ("=".r ~> value).?).? ^^ {
+        case None => SetCommand(None)
+        case Some(k ~ v) => SetCommand(Some(k.trim -> v.map(_.trim)))
+      }
+
+    def apply(input: String): LogicalPlan = parseAll(pair, input) match {
+      case Success(plan, _) => plan
+      case x => sys.error(x.toString)
+    }
+  }
+
+  protected val AS = Keyword("AS")
+  protected val CACHE = Keyword("CACHE")
+  protected val CLEAR = Keyword("CLEAR")
+  protected val DESCRIBE = Keyword("DESCRIBE")
+  protected val EXTENDED = Keyword("EXTENDED")
+  protected val FUNCTION = Keyword("FUNCTION")
+  protected val FUNCTIONS = Keyword("FUNCTIONS")
+  protected val IN = Keyword("IN")
+  protected val LAZY = Keyword("LAZY")
+  protected val SET = Keyword("SET")
+  protected val SHOW = Keyword("SHOW")
+  protected val TABLE = Keyword("TABLE")
+  protected val TABLES = Keyword("TABLES")
+  protected val UNCACHE = Keyword("UNCACHE")
+
+  override protected lazy val start: Parser[LogicalPlan] =
+    cache | uncache | set | show | desc | others
+
+  private lazy val cache: Parser[LogicalPlan] =
+    CACHE ~> LAZY.? ~ (TABLE ~> ident) ~ (AS ~> restInput).? ^^ {
+      case isLazy ~ tableName ~ plan =>
+        CacheTableCommand(tableName, plan.map(fallback), isLazy.isDefined)
+    }
+
+  private lazy val uncache: Parser[LogicalPlan] =
+    ( UNCACHE ~ TABLE ~> ident ^^ {
+        case tableName => UncacheTableCommand(tableName)
+      }
+    | CLEAR ~ CACHE ^^^ ClearCacheCommand
+    )
+
+  private lazy val set: Parser[LogicalPlan] =
+    SET ~> restInput ^^ {
+      case input => SetCommandParser(input)
+    }
+
+  // It can be the following patterns:
+  // SHOW FUNCTIONS;
+  // SHOW FUNCTIONS mydb.func1;
+  // SHOW FUNCTIONS func1;
+  // SHOW FUNCTIONS `mydb.a`.`func1.aa`;
+  private lazy val show: Parser[LogicalPlan] =
+    ( SHOW ~> TABLES ~ (IN ~> ident).? ^^ {
+        case _ ~ dbName => ShowTablesCommand(dbName)
+      }
+    | SHOW ~ FUNCTIONS ~> ((ident <~ ".").? ~ (ident | stringLit)).? ^^ {
+        case Some(f) => logical.ShowFunctions(f._1, Some(f._2))
+        case None => logical.ShowFunctions(None, None)
+      }
+    )
+
+  private lazy val desc: Parser[LogicalPlan] =
+    DESCRIBE ~ FUNCTION ~> EXTENDED.? ~ (ident | stringLit) ^^ {
+      case isExtended ~ functionName => logical.DescribeFunction(functionName, isExtended.isDefined)
+    }
+
+  private lazy val others: Parser[LogicalPlan] =
+    wholeInput ^^ {
+      case input => fallback(input)
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/4d6bbbc0/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index 6137ce3..77dd5bc 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -17,8 +17,6 @@
 
 package org.apache.spark.sql
 
-
-
 import scala.language.implicitConversions
 import scala.reflect.runtime.universe.{TypeTag, typeTag}
 import scala.util.Try
@@ -39,11 +37,11 @@ import org.apache.spark.util.Utils
  * "bridge" methods due to the use of covariant return types.
  *
  * {{{
- * In LegacyFunctions:
- * public abstract org.apache.spark.sql.Column avg(java.lang.String);
+ *   // In LegacyFunctions:
+ *   public abstract org.apache.spark.sql.Column avg(java.lang.String);
  *
- * In functions:
- * public static org.apache.spark.sql.TypedColumn<java.lang.Object, java.lang.Object> avg(...);
+ *   // In functions:
+ *   public static org.apache.spark.sql.TypedColumn<java.lang.Object, java.lang.Object> avg(...);
  * }}}
  *
  * This allows us to use the same functions both in typed [[Dataset]] operations and untyped
@@ -2528,8 +2526,9 @@ object functions extends LegacyFunctions {
     * @group udf_funcs
     * @since 1.3.0
     * @deprecated As of 1.5.0, since it's redundant with udf()
+   *              This will be removed in Spark 2.0.
     */
-  @deprecated("Use udf", "1.5.0")
+  @deprecated("Use udf. This will be removed in Spark 2.0.", "1.5.0")
   def callUDF(f: Function0[_], returnType: DataType): Column = withExpr {
     ScalaUDF(f, returnType, Seq())
   }
@@ -2541,8 +2540,9 @@ object functions extends LegacyFunctions {
     * @group udf_funcs
     * @since 1.3.0
     * @deprecated As of 1.5.0, since it's redundant with udf()
+   *              This will be removed in Spark 2.0.
     */
-  @deprecated("Use udf", "1.5.0")
+  @deprecated("Use udf. This will be removed in Spark 2.0.", "1.5.0")
   def callUDF(f: Function1[_, _], returnType: DataType, arg1: Column): Column = withExpr {
     ScalaUDF(f, returnType, Seq(arg1.expr))
   }
@@ -2554,8 +2554,9 @@ object functions extends LegacyFunctions {
     * @group udf_funcs
     * @since 1.3.0
     * @deprecated As of 1.5.0, since it's redundant with udf()
+   *              This will be removed in Spark 2.0.
     */
-  @deprecated("Use udf", "1.5.0")
+  @deprecated("Use udf. This will be removed in Spark 2.0.", "1.5.0")
   def callUDF(f: Function2[_, _, _], returnType: DataType, arg1: Column, arg2: Column): Column = withExpr {
     ScalaUDF(f, returnType, Seq(arg1.expr, arg2.expr))
   }
@@ -2567,8 +2568,9 @@ object functions extends LegacyFunctions {
     * @group udf_funcs
     * @since 1.3.0
     * @deprecated As of 1.5.0, since it's redundant with udf()
+   *              This will be removed in Spark 2.0.
     */
-  @deprecated("Use udf", "1.5.0")
+  @deprecated("Use udf. This will be removed in Spark 2.0.", "1.5.0")
   def callUDF(f: Function3[_, _, _, _], returnType: DataType, arg1: Column, arg2: Column, arg3: Column): Column = withExpr {
     ScalaUDF(f, returnType, Seq(arg1.expr, arg2.expr, arg3.expr))
   }
@@ -2580,8 +2582,9 @@ object functions extends LegacyFunctions {
     * @group udf_funcs
     * @since 1.3.0
     * @deprecated As of 1.5.0, since it's redundant with udf()
+   *              This will be removed in Spark 2.0.
     */
-  @deprecated("Use udf", "1.5.0")
+  @deprecated("Use udf. This will be removed in Spark 2.0.", "1.5.0")
   def callUDF(f: Function4[_, _, _, _, _], returnType: DataType, arg1: Column, arg2: Column, arg3: Column, arg4: Column): Column = withExpr {
     ScalaUDF(f, returnType, Seq(arg1.expr, arg2.expr, arg3.expr, arg4.expr))
   }
@@ -2593,8 +2596,9 @@ object functions extends LegacyFunctions {
     * @group udf_funcs
     * @since 1.3.0
     * @deprecated As of 1.5.0, since it's redundant with udf()
+   *              This will be removed in Spark 2.0.
     */
-  @deprecated("Use udf", "1.5.0")
+  @deprecated("Use udf. This will be removed in Spark 2.0.", "1.5.0")
   def callUDF(f: Function5[_, _, _, _, _, _], returnType: DataType, arg1: Column, arg2: Column, arg3: Column, arg4: Column, arg5: Column): Column = withExpr {
     ScalaUDF(f, returnType, Seq(arg1.expr, arg2.expr, arg3.expr, arg4.expr, arg5.expr))
   }
@@ -2606,8 +2610,9 @@ object functions extends LegacyFunctions {
     * @group udf_funcs
     * @since 1.3.0
     * @deprecated As of 1.5.0, since it's redundant with udf()
+   *              This will be removed in Spark 2.0.
     */
-  @deprecated("Use udf", "1.5.0")
+  @deprecated("Use udf. This will be removed in Spark 2.0.", "1.5.0")
   def callUDF(f: Function6[_, _, _, _, _, _, _], returnType: DataType, arg1: Column, arg2: Column, arg3: Column, arg4: Column, arg5: Column, arg6: Column): Column = withExpr {
     ScalaUDF(f, returnType, Seq(arg1.expr, arg2.expr, arg3.expr, arg4.expr, arg5.expr, arg6.expr))
   }
@@ -2619,8 +2624,9 @@ object functions extends LegacyFunctions {
     * @group udf_funcs
     * @since 1.3.0
     * @deprecated As of 1.5.0, since it's redundant with udf()
+   *              This will be removed in Spark 2.0.
     */
-  @deprecated("Use udf", "1.5.0")
+  @deprecated("Use udf. This will be removed in Spark 2.0.", "1.5.0")
   def callUDF(f: Function7[_, _, _, _, _, _, _, _], returnType: DataType, arg1: Column, arg2: Column, arg3: Column, arg4: Column, arg5: Column, arg6: Column, arg7: Column): Column = withExpr {
     ScalaUDF(f, returnType, Seq(arg1.expr, arg2.expr, arg3.expr, arg4.expr, arg5.expr, arg6.expr, arg7.expr))
   }
@@ -2632,8 +2638,9 @@ object functions extends LegacyFunctions {
     * @group udf_funcs
     * @since 1.3.0
     * @deprecated As of 1.5.0, since it's redundant with udf()
+   *              This will be removed in Spark 2.0.
     */
-  @deprecated("Use udf", "1.5.0")
+  @deprecated("Use udf. This will be removed in Spark 2.0.", "1.5.0")
   def callUDF(f: Function8[_, _, _, _, _, _, _, _, _], returnType: DataType, arg1: Column, arg2: Column, arg3: Column, arg4: Column, arg5: Column, arg6: Column, arg7: Column, arg8: Column): Column = withExpr {
     ScalaUDF(f, returnType, Seq(arg1.expr, arg2.expr, arg3.expr, arg4.expr, arg5.expr, arg6.expr, arg7.expr, arg8.expr))
   }
@@ -2644,9 +2651,10 @@ object functions extends LegacyFunctions {
     *
     * @group udf_funcs
     * @since 1.3.0
-    * @deprecated As of 1.5.0, since it's redundant with udf()
+    * @deprecated As of 1.5.0, since it's redundant with udf().
+   *              This will be removed in Spark 2.0.
     */
-  @deprecated("Use udf", "1.5.0")
+  @deprecated("Use udf. This will be removed in Spark 2.0.", "1.5.0")
   def callUDF(f: Function9[_, _, _, _, _, _, _, _, _, _], returnType: DataType, arg1: Column, arg2: Column, arg3: Column, arg4: Column, arg5: Column, arg6: Column, arg7: Column, arg8: Column, arg9: Column): Column = withExpr {
     ScalaUDF(f, returnType, Seq(arg1.expr, arg2.expr, arg3.expr, arg4.expr, arg5.expr, arg6.expr, arg7.expr, arg8.expr, arg9.expr))
   }
@@ -2657,9 +2665,10 @@ object functions extends LegacyFunctions {
     *
     * @group udf_funcs
     * @since 1.3.0
-    * @deprecated As of 1.5.0, since it's redundant with udf()
+    * @deprecated As of 1.5.0, since it's redundant with udf().
+   *              This will be removed in Spark 2.0.
     */
-  @deprecated("Use udf", "1.5.0")
+  @deprecated("Use udf. This will be removed in Spark 2.0.", "1.5.0")
   def callUDF(f: Function10[_, _, _, _, _, _, _, _, _, _, _], returnType: DataType, arg1: Column, arg2: Column, arg3: Column, arg4: Column, arg5: Column, arg6: Column, arg7: Column, arg8: Column, arg9: Column, arg10: Column): Column = withExpr {
     ScalaUDF(f, returnType, Seq(arg1.expr, arg2.expr, arg3.expr, arg4.expr, arg5.expr, arg6.expr, arg7.expr, arg8.expr, arg9.expr, arg10.expr))
   }
@@ -2700,9 +2709,10 @@ object functions extends LegacyFunctions {
    *
    * @group udf_funcs
    * @since 1.4.0
-   * @deprecated As of 1.5.0, since it was not coherent to have two functions callUdf and callUDF
+   * @deprecated As of 1.5.0, since it was not coherent to have two functions callUdf and callUDF.
+   *             This will be removed in Spark 2.0.
    */
-  @deprecated("Use callUDF", "1.5.0")
+  @deprecated("Use callUDF. This will be removed in Spark 2.0.", "1.5.0")
   def callUdf(udfName: String, cols: Column*): Column = withExpr {
     // Note: we avoid using closures here because on file systems that are case-insensitive, the
     // compiled class file for the closure here will conflict with the one in callUDF (upper case).

http://git-wip-us.apache.org/repos/asf/spark/blob/4d6bbbc0/sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextHadoopFsRelationSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextHadoopFsRelationSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextHadoopFsRelationSuite.scala
index 81af684..b554d13 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextHadoopFsRelationSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextHadoopFsRelationSuite.scala
@@ -80,7 +80,11 @@ class SimpleTextHadoopFsRelationSuite extends HadoopFsRelationTest with Predicat
 
   private var partitionedDF: DataFrame = _
 
-  private val partitionedDataSchema: StructType = StructType('a.int :: 'b.int :: 'c.string :: Nil)
+  private val partitionedDataSchema: StructType =
+    new StructType()
+      .add("a", IntegerType)
+      .add("b", IntegerType)
+      .add("c", StringType)
 
   protected override def beforeAll(): Unit = {
     this.tempPath = Utils.createTempDir()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


Mime
View raw message