spark-reviews mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From HyukjinKwon <...@git.apache.org>
Subject [GitHub] spark pull request #16976: [SPARK-19610][SQL] Support parsing multiline CSV ...
Date Mon, 05 Jun 2017 02:55:59 GMT
Github user HyukjinKwon commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16976#discussion_r120034049
  
    --- Diff: sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
---
    @@ -961,56 +968,121 @@ class CSVSuite extends QueryTest with SharedSQLContext with SQLTestUtils
{
       }
     
       test("SPARK-18699 put malformed records in a `columnNameOfCorruptRecord` field") {
    -    val schema = new StructType().add("a", IntegerType).add("b", TimestampType)
    -    val df1 = spark
    -      .read
    -      .option("mode", "PERMISSIVE")
    -      .schema(schema)
    -      .csv(testFile(valueMalformedFile))
    -    checkAnswer(df1,
    -      Row(null, null) ::
    -      Row(1, java.sql.Date.valueOf("1983-08-04")) ::
    -      Nil)
    -
    -    // If `schema` has `columnNameOfCorruptRecord`, it should handle corrupt records
    -    val columnNameOfCorruptRecord = "_unparsed"
    -    val schemaWithCorrField1 = schema.add(columnNameOfCorruptRecord, StringType)
    -    val df2 = spark
    -      .read
    -      .option("mode", "PERMISSIVE")
    -      .option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
    -      .schema(schemaWithCorrField1)
    -      .csv(testFile(valueMalformedFile))
    -    checkAnswer(df2,
    -      Row(null, null, "0,2013-111-11 12:13:14") ::
    -      Row(1, java.sql.Date.valueOf("1983-08-04"), null) ::
    -      Nil)
    -
    -    // We put a `columnNameOfCorruptRecord` field in the middle of a schema
    -    val schemaWithCorrField2 = new StructType()
    -      .add("a", IntegerType)
    -      .add(columnNameOfCorruptRecord, StringType)
    -      .add("b", TimestampType)
    -    val df3 = spark
    -      .read
    -      .option("mode", "PERMISSIVE")
    -      .option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
    -      .schema(schemaWithCorrField2)
    -      .csv(testFile(valueMalformedFile))
    -    checkAnswer(df3,
    -      Row(null, "0,2013-111-11 12:13:14", null) ::
    -      Row(1, null, java.sql.Date.valueOf("1983-08-04")) ::
    -      Nil)
    -
    -    val errMsg = intercept[AnalysisException] {
    -      spark
    +    Seq(false, true).foreach { wholeFile =>
    +      val schema = new StructType().add("a", IntegerType).add("b", TimestampType)
    +      val df1 = spark
    +        .read
    +        .option("mode", "PERMISSIVE")
    +        .option("wholeFile", wholeFile)
    +        .schema(schema)
    +        .csv(testFile(valueMalformedFile))
    +      checkAnswer(df1,
    +        Row(null, null) ::
    +        Row(1, java.sql.Date.valueOf("1983-08-04")) ::
    +        Nil)
    +
    +      // If `schema` has `columnNameOfCorruptRecord`, it should handle corrupt records
    +      val columnNameOfCorruptRecord = "_unparsed"
    +      val schemaWithCorrField1 = schema.add(columnNameOfCorruptRecord, StringType)
    +      val df2 = spark
             .read
             .option("mode", "PERMISSIVE")
             .option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
    -        .schema(schema.add(columnNameOfCorruptRecord, IntegerType))
    +        .option("wholeFile", wholeFile)
    +        .schema(schemaWithCorrField1)
             .csv(testFile(valueMalformedFile))
    -        .collect
    -    }.getMessage
    -    assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
    +      checkAnswer(df2,
    +        Row(null, null, "0,2013-111-11 12:13:14") ::
    +        Row(1, java.sql.Date.valueOf("1983-08-04"), null) ::
    +        Nil)
    +
    +      // We put a `columnNameOfCorruptRecord` field in the middle of a schema
    +      val schemaWithCorrField2 = new StructType()
    +        .add("a", IntegerType)
    +        .add(columnNameOfCorruptRecord, StringType)
    +        .add("b", TimestampType)
    +      val df3 = spark
    +        .read
    +        .option("mode", "PERMISSIVE")
    +        .option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
    +        .option("wholeFile", wholeFile)
    +        .schema(schemaWithCorrField2)
    +        .csv(testFile(valueMalformedFile))
    +      checkAnswer(df3,
    +        Row(null, "0,2013-111-11 12:13:14", null) ::
    +        Row(1, null, java.sql.Date.valueOf("1983-08-04")) ::
    +        Nil)
    +
    +      val errMsg = intercept[AnalysisException] {
    +        spark
    +          .read
    +          .option("mode", "PERMISSIVE")
    +          .option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
    +          .option("wholeFile", wholeFile)
    +          .schema(schema.add(columnNameOfCorruptRecord, IntegerType))
    +          .csv(testFile(valueMalformedFile))
    +          .collect
    +      }.getMessage
    +      assert(errMsg.startsWith("The field for corrupt records must be string type and
nullable"))
    +    }
    +  }
    +
    +  test("SPARK-19610: Parse normal multi-line CSV files") {
    +    val primitiveFieldAndType = Seq(
    +      """"
    +        |string","integer
    +        |
    +        |
    +        |","long
    +        |
    +        |","bigInteger",double,boolean,null""".stripMargin,
    +      """"this is a
    +        |simple
    +        |string.","
    +        |
    +        |10","
    +        |21474836470","92233720368547758070","
    +        |
    +        |1.7976931348623157E308",true,""".stripMargin)
    +
    +    withTempPath { path =>
    +      primitiveFieldAndType.toDF("value").coalesce(1).write.text(path.getAbsolutePath)
    +
    +      val df = spark.read
    +        .option("header", true)
    --- End diff --
    
    Ah, sorry, I got you. So, you mean what if other files still have the headers? It will
skip. Univocity parser handles this (did I understood correctly?)


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastructure@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org


Mime
View raw message