beam-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ieme...@apache.org
Subject [beam] branch spark-runner_structured-streaming updated: Rename SparkPipelineResult to SparkStructuredStreamingPipelineResult This is done to avoid an eventual collision with the one in SparkRunner. However this cannot happen at this moment because it is package private, so it is also done for consistency.
Date Wed, 24 Apr 2019 08:24:09 GMT
This is an automated email from the ASF dual-hosted git repository.

iemejia pushed a commit to branch spark-runner_structured-streaming
in repository https://gitbox.apache.org/repos/asf/beam.git


The following commit(s) were added to refs/heads/spark-runner_structured-streaming by this
push:
     new dafcca8  Rename SparkPipelineResult to SparkStructuredStreamingPipelineResult This
is done to avoid an eventual collision with the one in SparkRunner. However this cannot happen
at this moment because it is package private, so it is also done for consistency.
dafcca8 is described below

commit dafcca8ee7764b6b1908f89f31b9e3138dca2daa
Author: Ismaël Mejía <iemejia@gmail.com>
AuthorDate: Wed Apr 24 10:22:04 2019 +0200

    Rename SparkPipelineResult to SparkStructuredStreamingPipelineResult
    This is done to avoid an eventual collision with the one in SparkRunner.
    However this cannot happen at this moment because it is package private,
    so it is also done for consistency.
---
 ...lt.java => SparkStructuredStreamingPipelineResult.java} |  2 +-
 .../SparkStructuredStreamingRunner.java                    | 14 ++++++++------
 2 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkPipelineResult.java
b/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java
similarity index 96%
rename from runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkPipelineResult.java
rename to runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java
index 48c117d..d0198d4 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkPipelineResult.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingPipelineResult.java
@@ -24,7 +24,7 @@ import org.apache.beam.sdk.metrics.MetricResults;
 import org.joda.time.Duration;
 
 /** Represents a Spark pipeline execution result. */
-class SparkPipelineResult implements PipelineResult {
+class SparkStructuredStreamingPipelineResult implements PipelineResult {
 
   @Nullable // TODO: remove once method will be implemented
   @Override
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
b/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
index acb5615..c63efe4 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/SparkStructuredStreamingRunner.java
@@ -37,17 +37,19 @@ import org.slf4j.LoggerFactory;
  * Beam pipeline with the default options of a single threaded spark instance in local mode,
we
  * would do the following:
  *
- * <p>{@code Pipeline p = [logic for pipeline creation] SparkPipelineResult result
=
- * (SparkPipelineResult) p.run(); }
+ * <p>{@code Pipeline p = [logic for pipeline creation] SparkStructuredStreamingPipelineResult
+ * result = (SparkStructuredStreamingPipelineResult) p.run(); }
  *
  * <p>To create a pipeline runner to run against a different spark cluster, with a
custom master url
  * we would do the following:
  *
  * <p>{@code Pipeline p = [logic for pipeline creation] SparkStructuredStreamingPipelineOptions
  * options = SparkPipelineOptionsFactory.create(); options.setSparkMaster("spark://host:port");
- * SparkPipelineResult result = (SparkPipelineResult) p.run(); }
+ * SparkStructuredStreamingPipelineResult result = (SparkStructuredStreamingPipelineResult)
p.run();
+ * }
  */
-public final class SparkStructuredStreamingRunner extends PipelineRunner<SparkPipelineResult>
{
+public final class SparkStructuredStreamingRunner
+    extends PipelineRunner<SparkStructuredStreamingPipelineResult> {
 
   private static final Logger LOG = LoggerFactory.getLogger(SparkStructuredStreamingRunner.class);
 
@@ -111,12 +113,12 @@ public final class SparkStructuredStreamingRunner extends PipelineRunner<SparkPi
   }
 
   @Override
-  public SparkPipelineResult run(final Pipeline pipeline) {
+  public SparkStructuredStreamingPipelineResult run(final Pipeline pipeline) {
     TranslationContext translationContext = translatePipeline(pipeline);
     // TODO initialise other services: checkpointing, metrics system, listeners, ...
     // TODO pass testMode using pipelineOptions
     translationContext.startPipeline(true);
-    return new SparkPipelineResult();
+    return new SparkStructuredStreamingPipelineResult();
   }
 
   private TranslationContext translatePipeline(Pipeline pipeline) {


Mime
View raw message