spark-reviews mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From steveloughran <...@git.apache.org>
Subject [GitHub] spark pull request #19269: [SPARK-22026][SQL][WIP] data source v2 write path
Date Mon, 16 Oct 2017 11:39:20 GMT
Github user steveloughran commented on a diff in the pull request:

    https://github.com/apache/spark/pull/19269#discussion_r144822800
  
    --- Diff: sql/core/src/test/scala/org/apache/spark/sql/sources/v2/SimpleWritableDataSource.scala
---
    @@ -0,0 +1,254 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.spark.sql.sources.v2
    +
    +import java.io.{BufferedReader, InputStreamReader}
    +import java.text.SimpleDateFormat
    +import java.util.{Collections, Date, List => JList, Locale, Optional, UUID}
    +
    +import scala.collection.JavaConverters._
    +
    +import org.apache.hadoop.conf.Configuration
    +import org.apache.hadoop.fs.{FileSystem, Path}
    +
    +import org.apache.spark.SparkContext
    +import org.apache.spark.sql.{Row, SaveMode}
    +import org.apache.spark.sql.catalyst.InternalRow
    +import org.apache.spark.sql.sources.v2.reader.{DataReader, DataSourceV2Reader, ReadTask}
    +import org.apache.spark.sql.sources.v2.writer._
    +import org.apache.spark.sql.types.{DataType, StructType}
    +import org.apache.spark.util.SerializableConfiguration
    +
    +/**
    + * A HDFS based transactional writable data source.
    + * Each task writes data to `target/_temporary/jobId/$jobId-$partitionId-$attemptNumber`.
    + * Each job moves files from `target/_temporary/jobId/` to `target`.
    + */
    +class SimpleWritableDataSource extends DataSourceV2 with ReadSupport with WriteSupport
{
    +
    +  private val schema = new StructType().add("i", "long").add("j", "long")
    +
    +  class Reader(path: String, conf: Configuration) extends DataSourceV2Reader {
    +    override def readSchema(): StructType = schema
    +
    +    override def createReadTasks(): JList[ReadTask[Row]] = {
    +      val dataPath = new Path(path)
    +      val fs = dataPath.getFileSystem(conf)
    +      if (fs.exists(dataPath)) {
    +        fs.listStatus(dataPath).filter(!_.getPath.getName.startsWith("_")).map { f =>
    +          val serializableConf = new SerializableConfiguration(conf)
    +          new SimpleCSVReadTask(f.getPath.toUri.toString, serializableConf): ReadTask[Row]
    +        }.toList.asJava
    +      } else {
    +        Collections.emptyList()
    +      }
    +    }
    +  }
    +
    +  class Writer(path: String, conf: Configuration) extends DataSourceV2Writer {
    +    // We can't get the real spark job id here, so we use a timestamp and random UUID
to simulate
    +    // a unique job id.
    +    private val jobId = new SimpleDateFormat("yyyyMMddHHmmss", Locale.US).format(new
Date()) +
    +      "-" + UUID.randomUUID()
    +
    +    override def createWriterFactory(): DataWriterFactory[Row] = {
    +      new SimpleCSVDataWriterFactory(path, jobId, new SerializableConfiguration(conf))
    +    }
    +
    +    override def commit(messages: Array[WriterCommitMessage]): Unit = {
    +      val finalPath = new Path(path)
    +      val jobPath = new Path(new Path(finalPath, "_temporary"), jobId)
    +      val fs = jobPath.getFileSystem(conf)
    +      try {
    +        for (file <- fs.listStatus(jobPath).map(_.getPath)) {
    +          fs.rename(file, new Path(finalPath, file.getName))
    --- End diff --
    
    Treat rename returning 0 as a failure. It's an ugly mess as, say HDFS returns 0 for "missing
source file", other things do it for minor no ops (src==dest). Because of HDFS, I'd have something
like
    
    ```scala
    val dest = new Path(finalPath, file.getName
    if(!rename(file, dest)) {
      throw new IOException(s"failed to rename($file, $dest)")
    }
    ```
    
    Then helps us write the spec and tests for HADOOP-11452 and make public a rename which
throws exceptions on failures


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org


Mime
View raw message