beam-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jbono...@apache.org
Subject [2/2] incubator-beam git commit: [BEAM-267] Enable checkstyle in Spark runner
Date Sun, 08 May 2016 06:06:15 GMT
[BEAM-267] Enable checkstyle in Spark runner


Project: http://git-wip-us.apache.org/repos/asf/incubator-beam/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-beam/commit/0f3b0533
Tree: http://git-wip-us.apache.org/repos/asf/incubator-beam/tree/0f3b0533
Diff: http://git-wip-us.apache.org/repos/asf/incubator-beam/diff/0f3b0533

Branch: refs/heads/master
Commit: 0f3b053356f3321d08d4e2ee457a037df778bee4
Parents: ff825b0
Author: Jean-Baptiste Onofré <jbonofre@apache.org>
Authored: Fri May 6 17:47:46 2016 +0200
Committer: Jean-Baptiste Onofré <jbonofre@apache.org>
Committed: Sun May 8 07:59:26 2016 +0200

----------------------------------------------------------------------
 runners/spark/pom.xml                           |   2 -
 .../runners/spark/SparkPipelineOptions.java     |   3 +
 .../beam/runners/spark/SparkPipelineRunner.java |  13 +-
 .../runners/spark/SparkRunnerRegistrar.java     |   5 +-
 .../spark/aggregators/AggAccumParam.java        |   4 +
 .../spark/aggregators/NamedAggregators.java     |  64 +++--
 .../beam/runners/spark/coders/CoderHelpers.java |  13 +-
 .../runners/spark/coders/NullWritableCoder.java |  10 +-
 .../runners/spark/coders/WritableCoder.java     |  19 +-
 .../apache/beam/runners/spark/io/ConsoleIO.java |   7 +
 .../beam/runners/spark/io/CreateStream.java     |   6 +-
 .../apache/beam/runners/spark/io/KafkaIO.java   |  18 +-
 .../beam/runners/spark/io/hadoop/HadoopIO.java  |  27 +-
 .../spark/io/hadoop/ShardNameBuilder.java       |   7 +-
 .../io/hadoop/ShardNameTemplateHelper.java      |   7 +-
 .../io/hadoop/TemplatedAvroKeyOutputFormat.java |   9 +-
 .../TemplatedSequenceFileOutputFormat.java      |   7 +-
 .../io/hadoop/TemplatedTextOutputFormat.java    |   7 +-
 .../runners/spark/translation/DoFnFunction.java |  38 +--
 .../spark/translation/EvaluationContext.java    |  26 +-
 .../spark/translation/MultiDoFnFunction.java    |  42 +--
 .../spark/translation/SparkContextFactory.java  |   3 +
 .../translation/SparkPipelineEvaluator.java     |  13 +-
 .../translation/SparkPipelineTranslator.java    |   3 +-
 .../spark/translation/SparkProcessContext.java  |  72 ++---
 .../spark/translation/SparkRuntimeContext.java  |  51 ++--
 .../spark/translation/TransformEvaluator.java   |  11 +-
 .../spark/translation/TransformTranslator.java  | 271 +++++++++++--------
 .../streaming/StreamingEvaluationContext.java   |  19 +-
 .../streaming/StreamingTransformTranslator.java |  85 +++---
 .../StreamingWindowPipelineDetector.java        |   6 +-
 .../runners/spark/util/BroadcastHelper.java     |  12 +-
 .../beam/runners/spark/util/ByteArray.java      |   7 +-
 .../util/SinglePrimitiveOutputPTransform.java   |   3 +
 .../beam/runners/spark/EmptyInputTest.java      |   6 +
 .../beam/runners/spark/SimpleWordCountTest.java |  21 +-
 .../runners/spark/SparkRunnerRegistrarTest.java |  12 +-
 .../beam/runners/spark/io/AvroPipelineTest.java |   6 +-
 .../beam/runners/spark/io/NumShardsTest.java    |   3 +
 .../io/hadoop/HadoopFileFormatPipelineTest.java |  21 +-
 .../spark/io/hadoop/ShardNameBuilderTest.java   |   3 +
 .../spark/translation/CombineGloballyTest.java  |  11 +-
 .../spark/translation/CombinePerKeyTest.java    |   3 +
 .../spark/translation/DoFnOutputTest.java       |   3 +
 .../translation/MultiOutputWordCountTest.java   |   6 +
 .../spark/translation/SerializationTest.java    |  16 +-
 .../spark/translation/SideEffectsTest.java      |   6 +-
 .../translation/SparkPipelineOptionsTest.java   |   3 +
 .../translation/WindowedWordCountTest.java      |   3 +
 .../streaming/FlattenStreamingTest.java         |   2 +-
 .../streaming/KafkaStreamingTest.java           |   2 +-
 .../streaming/SimpleStreamingWordCountTest.java |   5 +-
 .../streaming/utils/EmbeddedKafkaCluster.java   |   4 +
 .../streaming/utils/PAssertStreaming.java       |   3 +-
 54 files changed, 634 insertions(+), 395 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/pom.xml
----------------------------------------------------------------------
diff --git a/runners/spark/pom.xml b/runners/spark/pom.xml
index e673246..5daf1e1 100644
--- a/runners/spark/pom.xml
+++ b/runners/spark/pom.xml
@@ -211,12 +211,10 @@
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-jar-plugin</artifactId>
       </plugin>
-      <!-- Checkstyle errors for now
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-checkstyle-plugin</artifactId>
       </plugin>
-      -->
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-source-plugin</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkPipelineOptions.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkPipelineOptions.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkPipelineOptions.java
index bdf832b..091382e 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkPipelineOptions.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkPipelineOptions.java
@@ -24,6 +24,9 @@ import org.apache.beam.sdk.options.Description;
 import org.apache.beam.sdk.options.PipelineOptions;
 import org.apache.beam.sdk.options.StreamingOptions;
 
+/**
+ * Spark runner pipeline options.
+ */
 public interface SparkPipelineOptions extends PipelineOptions, StreamingOptions,
                                               ApplicationNameOptions {
   @Description("The url of the spark master to connect to, (e.g. spark://host:port, local[4]).")

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkPipelineRunner.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkPipelineRunner.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkPipelineRunner.java
index 8635cfb..bae4e53 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkPipelineRunner.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkPipelineRunner.java
@@ -120,14 +120,14 @@ public final class SparkPipelineRunner extends PipelineRunner<EvaluationResult>
    */
   @SuppressWarnings("rawtypes")
   @Override
-  public <OT extends POutput, IT extends PInput> OT apply(
-      PTransform<IT, OT> transform, IT input) {
+  public <OutputT extends POutput, InputT extends PInput> OutputT apply(
+      PTransform<InputT, OutputT> transform, InputT input) {
 
     if (transform instanceof GroupByKey) {
-      return (OT) ((PCollection) input).apply(
+      return (OutputT) ((PCollection) input).apply(
           new GroupByKeyViaGroupByKeyOnly((GroupByKey) transform));
     } else if (transform instanceof Create.Values) {
-      return (OT) super.apply(
+      return (OutputT) super.apply(
         new SinglePrimitiveOutputPTransform((Create.Values) transform), input);
     } else {
       return super.apply(transform, input);
@@ -216,6 +216,9 @@ public final class SparkPipelineRunner extends PipelineRunner<EvaluationResult>
     return new StreamingEvaluationContext(jsc, pipeline, jssc, streamingOptions.getTimeout());
   }
 
+  /**
+   * Evaluator on the pipeline.
+   */
   public abstract static class Evaluator implements Pipeline.PipelineVisitor {
     protected static final Logger LOG = LoggerFactory.getLogger(Evaluator.class);
 
@@ -275,7 +278,7 @@ public final class SparkPipelineRunner extends PipelineRunner<EvaluationResult>
       doVisitTransform(node);
     }
 
-    protected abstract <PT extends PTransform<? super PInput, POutput>> void
+    protected abstract <TransformT extends PTransform<? super PInput, POutput>> void
         doVisitTransform(TransformTreeNode node);
 
     @Override

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunnerRegistrar.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunnerRegistrar.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunnerRegistrar.java
index 30142f9..9537ec6 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunnerRegistrar.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunnerRegistrar.java
@@ -18,13 +18,14 @@
 
 package org.apache.beam.runners.spark;
 
-import com.google.auto.service.AutoService;
-import com.google.common.collect.ImmutableList;
 import org.apache.beam.sdk.options.PipelineOptions;
 import org.apache.beam.sdk.options.PipelineOptionsRegistrar;
 import org.apache.beam.sdk.runners.PipelineRunner;
 import org.apache.beam.sdk.runners.PipelineRunnerRegistrar;
 
+import com.google.auto.service.AutoService;
+import com.google.common.collect.ImmutableList;
+
 /**
  * Contains the {@link PipelineRunnerRegistrar} and {@link PipelineOptionsRegistrar} for the
  * {@link SparkPipelineRunner}.

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/aggregators/AggAccumParam.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/aggregators/AggAccumParam.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/aggregators/AggAccumParam.java
index a75aeb3..9ce8b33 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/aggregators/AggAccumParam.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/aggregators/AggAccumParam.java
@@ -20,7 +20,11 @@ package org.apache.beam.runners.spark.aggregators;
 
 import org.apache.spark.AccumulatorParam;
 
+/**
+ * Aggregator accumulator param.
+ */
 public class AggAccumParam implements AccumulatorParam<NamedAggregators> {
+
   @Override
   public NamedAggregators addAccumulator(NamedAggregators current, NamedAggregators added) {
     return current.merge(added);

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/aggregators/NamedAggregators.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/aggregators/NamedAggregators.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/aggregators/NamedAggregators.java
index 64c473e..6ab6dc9 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/aggregators/NamedAggregators.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/aggregators/NamedAggregators.java
@@ -18,6 +18,13 @@
 
 package org.apache.beam.runners.spark.aggregators;
 
+import org.apache.beam.runners.spark.translation.SparkRuntimeContext;
+import org.apache.beam.sdk.coders.CannotProvideCoderException;
+import org.apache.beam.sdk.coders.Coder;
+import org.apache.beam.sdk.transforms.Combine;
+
+import com.google.common.collect.ImmutableList;
+
 import java.io.IOException;
 import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
@@ -25,12 +32,6 @@ import java.io.Serializable;
 import java.util.Map;
 import java.util.TreeMap;
 
-import com.google.common.collect.ImmutableList;
-import org.apache.beam.runners.spark.translation.SparkRuntimeContext;
-import org.apache.beam.sdk.coders.CannotProvideCoderException;
-import org.apache.beam.sdk.coders.Coder;
-import org.apache.beam.sdk.transforms.Combine;
-
 /**
  * This class wraps a map of named aggregators. Spark expects that all accumulators be declared
  * before a job is launched. Dataflow allows aggregators to be used and incremented on the fly.
@@ -96,8 +97,10 @@ public class NamedAggregators implements Serializable {
    * so require some casting.
    */
   @SuppressWarnings("unchecked")
-  private static <A, B, C> State<A, B, C> merge(State<?, ?, ?> s1, State<?, ?, ?> s2) {
-    return ((State<A, B, C>) s1).merge((State<A, B, C>) s2);
+  private static <InputT, InterT, OutputT> State<InputT, InterT, OutputT> merge(
+      State<?, ?, ?> s1,
+      State<?, ?, ?> s2) {
+    return ((State<InputT, InterT, OutputT>) s1).merge((State<InputT, InterT, OutputT>) s2);
   }
 
   @Override
@@ -110,38 +113,39 @@ public class NamedAggregators implements Serializable {
   }
 
   /**
-   * @param <IN>    Input data type
-   * @param <INTER> Intermediate data type (useful for averages)
-   * @param <OUT>   Output data type
+   * @param <InputT>    Input data type
+   * @param <InterT> Intermediate data type (useful for averages)
+   * @param <OutputT>   Output data type
    */
-  public interface State<IN, INTER, OUT> extends Serializable {
+  public interface State<InputT, InterT, OutputT> extends Serializable {
     /**
      * @param element new element to update state
      */
-    void update(IN element);
+    void update(InputT element);
 
-    State<IN, INTER, OUT> merge(State<IN, INTER, OUT> other);
+    State<InputT, InterT, OutputT> merge(State<InputT, InterT, OutputT> other);
 
-    INTER current();
+    InterT current();
 
-    OUT render();
+    OutputT render();
 
-    Combine.CombineFn<IN, INTER, OUT> getCombineFn();
+    Combine.CombineFn<InputT, InterT, OutputT> getCombineFn();
   }
 
   /**
    * =&gt; combineFunction in data flow.
    */
-  public static class CombineFunctionState<IN, INTER, OUT> implements State<IN, INTER, OUT> {
+  public static class CombineFunctionState<InputT, InterT, OutpuT>
+      implements State<InputT, InterT, OutpuT> {
 
-    private Combine.CombineFn<IN, INTER, OUT> combineFn;
-    private Coder<IN> inCoder;
+    private Combine.CombineFn<InputT, InterT, OutpuT> combineFn;
+    private Coder<InputT> inCoder;
     private SparkRuntimeContext ctxt;
-    private transient INTER state;
+    private transient InterT state;
 
     public CombineFunctionState(
-        Combine.CombineFn<IN, INTER, OUT> combineFn,
-        Coder<IN> inCoder,
+        Combine.CombineFn<InputT, InterT, OutpuT> combineFn,
+        Coder<InputT> inCoder,
         SparkRuntimeContext ctxt) {
       this.combineFn = combineFn;
       this.inCoder = inCoder;
@@ -150,28 +154,28 @@ public class NamedAggregators implements Serializable {
     }
 
     @Override
-    public void update(IN element) {
+    public void update(InputT element) {
       combineFn.addInput(state, element);
     }
 
     @Override
-    public State<IN, INTER, OUT> merge(State<IN, INTER, OUT> other) {
+    public State<InputT, InterT, OutpuT> merge(State<InputT, InterT, OutpuT> other) {
       this.state = combineFn.mergeAccumulators(ImmutableList.of(current(), other.current()));
       return this;
     }
 
     @Override
-    public INTER current() {
+    public InterT current() {
       return state;
     }
 
     @Override
-    public OUT render() {
+    public OutpuT render() {
       return combineFn.extractOutput(state);
     }
 
     @Override
-    public Combine.CombineFn<IN, INTER, OUT> getCombineFn() {
+    public Combine.CombineFn<InputT, InterT, OutpuT> getCombineFn() {
       return combineFn;
     }
 
@@ -190,8 +194,8 @@ public class NamedAggregators implements Serializable {
     @SuppressWarnings("unchecked")
     private void readObject(ObjectInputStream ois) throws IOException, ClassNotFoundException {
       ctxt = (SparkRuntimeContext) ois.readObject();
-      combineFn = (Combine.CombineFn<IN, INTER, OUT>) ois.readObject();
-      inCoder = (Coder<IN>) ois.readObject();
+      combineFn = (Combine.CombineFn<InputT, InterT, OutpuT>) ois.readObject();
+      inCoder = (Coder<InputT>) ois.readObject();
       try {
         state = combineFn.getAccumulatorCoder(ctxt.getCoderRegistry(), inCoder)
             .decode(ois, Coder.Context.NESTED);

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/CoderHelpers.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/CoderHelpers.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/CoderHelpers.java
index 7dc6af6..07587fc 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/CoderHelpers.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/CoderHelpers.java
@@ -18,17 +18,20 @@
 
 package org.apache.beam.runners.spark.coders;
 
+import org.apache.beam.runners.spark.util.ByteArray;
+import org.apache.beam.sdk.coders.Coder;
+
+import com.google.common.collect.Iterables;
+
+import org.apache.spark.api.java.function.Function;
+import org.apache.spark.api.java.function.PairFunction;
+
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.util.LinkedList;
 import java.util.List;
 
-import com.google.common.collect.Iterables;
-import org.apache.beam.runners.spark.util.ByteArray;
-import org.apache.beam.sdk.coders.Coder;
-import org.apache.spark.api.java.function.Function;
-import org.apache.spark.api.java.function.PairFunction;
 import scala.Tuple2;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/NullWritableCoder.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/NullWritableCoder.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/NullWritableCoder.java
index 529d67b..7cff325 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/NullWritableCoder.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/NullWritableCoder.java
@@ -18,13 +18,17 @@
 
 package org.apache.beam.runners.spark.coders;
 
-import java.io.InputStream;
-import java.io.OutputStream;
+import org.apache.beam.sdk.coders.Coder;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
-import org.apache.beam.sdk.coders.Coder;
 import org.apache.hadoop.io.NullWritable;
 
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ * Simple writable coder for Null.
+ */
 public final class NullWritableCoder extends WritableCoder<NullWritable> {
   private static final long serialVersionUID = 1L;
 

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java
index f2836fe..4719e46 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java
@@ -18,6 +18,16 @@
 
 package org.apache.beam.runners.spark.coders;
 
+import org.apache.beam.sdk.coders.Coder;
+import org.apache.beam.sdk.coders.CoderException;
+import org.apache.beam.sdk.coders.StandardCoder;
+import org.apache.beam.sdk.util.CloudObject;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Writable;
+
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
@@ -26,15 +36,6 @@ import java.io.OutputStream;
 import java.lang.reflect.InvocationTargetException;
 import java.util.List;
 
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import org.apache.beam.sdk.coders.Coder;
-import org.apache.beam.sdk.coders.CoderException;
-import org.apache.beam.sdk.coders.StandardCoder;
-import org.apache.beam.sdk.util.CloudObject;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.Writable;
-
 /**
  * A {@code WritableCoder} is a {@link Coder} for a Java class that implements {@link Writable}.
  *

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/io/ConsoleIO.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/ConsoleIO.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/ConsoleIO.java
index 41dc367..eefea77 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/ConsoleIO.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/ConsoleIO.java
@@ -29,6 +29,9 @@ public final class ConsoleIO {
   private ConsoleIO() {
   }
 
+  /**
+   * Write on the console.
+   */
   public static final class Write {
 
     private Write() {
@@ -42,6 +45,10 @@ public final class ConsoleIO {
       return new Unbound<>(num);
     }
 
+    /**
+     * {@link PTransform} writing {@link PCollection} on the console.
+     * @param <T>
+     */
     public static class Unbound<T> extends PTransform<PCollection<T>, PDone> {
 
       private final int num;

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/io/CreateStream.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/CreateStream.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/CreateStream.java
index 917f8a0..e7a9971 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/CreateStream.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/CreateStream.java
@@ -17,12 +17,13 @@
  */
 package org.apache.beam.runners.spark.io;
 
-import com.google.common.base.Preconditions;
 import org.apache.beam.sdk.transforms.PTransform;
 import org.apache.beam.sdk.util.WindowingStrategy;
 import org.apache.beam.sdk.values.PCollection;
 import org.apache.beam.sdk.values.PInput;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Create an input stream from Queue.
  *
@@ -44,6 +45,9 @@ public final class CreateStream<T> {
     return new QueuedValues<>(queuedValues);
   }
 
+  /**
+   * {@link PTransform} for queueing values.
+   */
   public static final class QueuedValues<T> extends PTransform<PInput, PCollection<T>> {
 
     private final Iterable<Iterable<T>> queuedValues;

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/io/KafkaIO.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/KafkaIO.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/KafkaIO.java
index 1592bec..a97d86e 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/KafkaIO.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/KafkaIO.java
@@ -17,17 +17,19 @@
  */
 package org.apache.beam.runners.spark.io;
 
-import java.util.Map;
-import java.util.Set;
-
-import com.google.common.base.Preconditions;
-import kafka.serializer.Decoder;
 import org.apache.beam.sdk.transforms.PTransform;
 import org.apache.beam.sdk.util.WindowingStrategy;
 import org.apache.beam.sdk.values.KV;
 import org.apache.beam.sdk.values.PCollection;
 import org.apache.beam.sdk.values.PInput;
 
+import com.google.common.base.Preconditions;
+
+import java.util.Map;
+import java.util.Set;
+
+import kafka.serializer.Decoder;
+
 /**
  * Read stream from Kafka.
  */
@@ -36,6 +38,9 @@ public final class KafkaIO {
   private KafkaIO() {
   }
 
+  /**
+   * Read operation from Kafka topics.
+   */
   public static final class Read {
 
     private Read() {
@@ -62,6 +67,9 @@ public final class KafkaIO {
       return new Unbound<>(keyDecoder, valueDecoder, key, value, topics, kafkaParams);
     }
 
+    /**
+     * A {@link PTransform} reading from Kafka topics and providing {@link PCollection}.
+     */
     public static class Unbound<K, V> extends PTransform<PInput, PCollection<KV<K, V>>> {
 
       private final Class<? extends Decoder<K>> keyDecoderClass;

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/HadoopIO.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/HadoopIO.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/HadoopIO.java
index 5b50d3e..00c10d4 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/HadoopIO.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/HadoopIO.java
@@ -17,10 +17,6 @@
  */
 package org.apache.beam.runners.spark.io.hadoop;
 
-import java.util.HashMap;
-import java.util.Map;
-
-import com.google.common.base.Preconditions;
 import org.apache.beam.sdk.io.ShardNameTemplate;
 import org.apache.beam.sdk.transforms.PTransform;
 import org.apache.beam.sdk.util.WindowingStrategy;
@@ -28,14 +24,26 @@ import org.apache.beam.sdk.values.KV;
 import org.apache.beam.sdk.values.PCollection;
 import org.apache.beam.sdk.values.PDone;
 import org.apache.beam.sdk.values.PInput;
+
+import com.google.common.base.Preconditions;
+
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Spark native HadoopIO.
+ */
 public final class HadoopIO {
 
   private HadoopIO() {
   }
 
+  /**
+   * Read operation from HDFS.
+   */
   public static final class Read {
 
     private Read() {
@@ -46,6 +54,11 @@ public final class HadoopIO {
       return new Bound<>(filepattern, format, key, value);
     }
 
+    /**
+     * A {@link PTransform} reading bounded collection of data from HDFS.
+     * @param <K>
+     * @param <V>
+     */
     public static class Bound<K, V> extends PTransform<PInput, PCollection<KV<K, V>>> {
 
       private final String filepattern;
@@ -95,6 +108,9 @@ public final class HadoopIO {
 
   }
 
+  /**
+   * Write operation on HDFS.
+   */
   public static final class Write {
 
     private Write() {
@@ -105,6 +121,9 @@ public final class HadoopIO {
       return new Bound<>(filenamePrefix, format, key, value);
     }
 
+    /**
+     * A {@link PTransform} writing {@link PCollection} on HDFS.
+     */
     public static class Bound<K, V> extends PTransform<PCollection<KV<K, V>>, PDone> {
 
       /** The filename to write to. */

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameBuilder.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameBuilder.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameBuilder.java
index c768340..6b36427 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameBuilder.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameBuilder.java
@@ -18,11 +18,14 @@
 
 package org.apache.beam.runners.spark.io.hadoop;
 
+import org.apache.hadoop.fs.Path;
+
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.hadoop.fs.Path;
-
+/**
+ * Shard name builder.
+ */
 public final class ShardNameBuilder {
 
   private ShardNameBuilder() {

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateHelper.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateHelper.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateHelper.java
index 2267ccb..d06b016 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateHelper.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateHelper.java
@@ -20,8 +20,6 @@ package org.apache.beam.runners.spark.io.hadoop;
 
 import static org.apache.beam.runners.spark.io.hadoop.ShardNameBuilder.replaceShardNumber;
 
-import java.io.IOException;
-
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.TaskID;
@@ -30,6 +28,11 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+
+/**
+ * Shard name template helper.
+ */
 public final class ShardNameTemplateHelper {
 
   private static final Logger LOG = LoggerFactory.getLogger(ShardNameTemplateHelper.class);

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedAvroKeyOutputFormat.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedAvroKeyOutputFormat.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedAvroKeyOutputFormat.java
index b755928..f747e7b 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedAvroKeyOutputFormat.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedAvroKeyOutputFormat.java
@@ -18,14 +18,17 @@
 
 package org.apache.beam.runners.spark.io.hadoop;
 
-import java.io.IOException;
-import java.io.OutputStream;
-
 import org.apache.avro.mapreduce.AvroKeyOutputFormat;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * Templated Avro key output format.
+ */
 public class TemplatedAvroKeyOutputFormat<T> extends AvroKeyOutputFormat<T>
     implements ShardNameTemplateAware {
 

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedSequenceFileOutputFormat.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedSequenceFileOutputFormat.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedSequenceFileOutputFormat.java
index 35b6163..bd2ee4d 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedSequenceFileOutputFormat.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedSequenceFileOutputFormat.java
@@ -18,13 +18,16 @@
 
 package org.apache.beam.runners.spark.io.hadoop;
 
-import java.io.IOException;
-
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
 
+import java.io.IOException;
+
+/**
+ * Templated sequence file output format.
+ */
 public class TemplatedSequenceFileOutputFormat<K, V> extends SequenceFileOutputFormat<K, V>
     implements ShardNameTemplateAware {
 

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedTextOutputFormat.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedTextOutputFormat.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedTextOutputFormat.java
index 8f0c0d2..8725a95 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedTextOutputFormat.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/TemplatedTextOutputFormat.java
@@ -18,13 +18,16 @@
 
 package org.apache.beam.runners.spark.io.hadoop;
 
-import java.io.IOException;
-
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
 
+import java.io.IOException;
+
+/**
+ * Templates text output format.
+ */
 public class TemplatedTextOutputFormat<K, V> extends TextOutputFormat<K, V>
     implements ShardNameTemplateAware {
 

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/DoFnFunction.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/DoFnFunction.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/DoFnFunction.java
index fbc9e98..b5888bd 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/DoFnFunction.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/DoFnFunction.java
@@ -18,26 +18,28 @@
 
 package org.apache.beam.runners.spark.translation;
 
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
 import org.apache.beam.runners.spark.util.BroadcastHelper;
 import org.apache.beam.sdk.transforms.DoFn;
 import org.apache.beam.sdk.util.WindowedValue;
 import org.apache.beam.sdk.values.TupleTag;
+
 import org.apache.spark.api.java.function.FlatMapFunction;
 
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
 /**
  * Dataflow's Do functions correspond to Spark's FlatMap functions.
  *
- * @param <I> Input element type.
- * @param <O> Output element type.
+ * @param <InputT> Input element type.
+ * @param <OutputT> Output element type.
  */
-public class DoFnFunction<I, O> implements FlatMapFunction<Iterator<WindowedValue<I>>,
-    WindowedValue<O>> {
-  private final DoFn<I, O> mFunction;
+public class DoFnFunction<InputT, OutputT>
+    implements FlatMapFunction<Iterator<WindowedValue<InputT>>,
+    WindowedValue<OutputT>> {
+  private final DoFn<InputT, OutputT> mFunction;
   private final SparkRuntimeContext mRuntimeContext;
   private final Map<TupleTag<?>, BroadcastHelper<?>> mSideInputs;
 
@@ -46,7 +48,7 @@ public class DoFnFunction<I, O> implements FlatMapFunction<Iterator<WindowedValu
    * @param runtime    Runtime to apply function in.
    * @param sideInputs Side inputs used in DoFunction.
    */
-  public DoFnFunction(DoFn<I, O> fn,
+  public DoFnFunction(DoFn<InputT, OutputT> fn,
                SparkRuntimeContext runtime,
                Map<TupleTag<?>, BroadcastHelper<?>> sideInputs) {
     this.mFunction = fn;
@@ -55,7 +57,7 @@ public class DoFnFunction<I, O> implements FlatMapFunction<Iterator<WindowedValu
   }
 
   @Override
-  public Iterable<WindowedValue<O>> call(Iterator<WindowedValue<I>> iter) throws
+  public Iterable<WindowedValue<OutputT>> call(Iterator<WindowedValue<InputT>> iter) throws
       Exception {
     ProcCtxt ctxt = new ProcCtxt(mFunction, mRuntimeContext, mSideInputs);
     ctxt.setup();
@@ -63,23 +65,23 @@ public class DoFnFunction<I, O> implements FlatMapFunction<Iterator<WindowedValu
     return ctxt.getOutputIterable(iter, mFunction);
   }
 
-  private class ProcCtxt extends SparkProcessContext<I, O, WindowedValue<O>> {
+  private class ProcCtxt extends SparkProcessContext<InputT, OutputT, WindowedValue<OutputT>> {
 
-    private final List<WindowedValue<O>> outputs = new LinkedList<>();
+    private final List<WindowedValue<OutputT>> outputs = new LinkedList<>();
 
-    ProcCtxt(DoFn<I, O> fn, SparkRuntimeContext runtimeContext, Map<TupleTag<?>,
+    ProcCtxt(DoFn<InputT, OutputT> fn, SparkRuntimeContext runtimeContext, Map<TupleTag<?>,
         BroadcastHelper<?>> sideInputs) {
       super(fn, runtimeContext, sideInputs);
     }
 
     @Override
-    public synchronized void output(O o) {
+    public synchronized void output(OutputT o) {
       outputs.add(windowedValue != null ? windowedValue.withValue(o) :
           WindowedValue.valueInGlobalWindow(o));
     }
 
     @Override
-    public synchronized void output(WindowedValue<O> o) {
+    public synchronized void output(WindowedValue<OutputT> o) {
       outputs.add(o);
     }
 
@@ -89,7 +91,7 @@ public class DoFnFunction<I, O> implements FlatMapFunction<Iterator<WindowedValu
     }
 
     @Override
-    protected Iterator<WindowedValue<O>> getOutputIterator() {
+    protected Iterator<WindowedValue<OutputT>> getOutputIterator() {
       return outputs.iterator();
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/EvaluationContext.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/EvaluationContext.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/EvaluationContext.java
index 6d49bd3..d737f5e 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/EvaluationContext.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/EvaluationContext.java
@@ -20,14 +20,6 @@ package org.apache.beam.runners.spark.translation;
 
 import static com.google.common.base.Preconditions.checkArgument;
 
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import com.google.common.base.Function;
-import com.google.common.collect.Iterables;
 import org.apache.beam.runners.spark.EvaluationResult;
 import org.apache.beam.runners.spark.coders.CoderHelpers;
 import org.apache.beam.sdk.Pipeline;
@@ -46,9 +38,19 @@ import org.apache.beam.sdk.values.PCollectionView;
 import org.apache.beam.sdk.values.PInput;
 import org.apache.beam.sdk.values.POutput;
 import org.apache.beam.sdk.values.PValue;
+
+import com.google.common.base.Function;
+import com.google.common.collect.Iterables;
+
 import org.apache.spark.api.java.JavaRDDLike;
 import org.apache.spark.api.java.JavaSparkContext;
 
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 
 /**
  * Evaluation context allows us to define how pipeline instructions.
@@ -151,19 +153,19 @@ public class EvaluationContext implements EvaluationResult {
     return currentTransform;
   }
 
-  protected <I extends PInput> I getInput(PTransform<I, ?> transform) {
+  protected <T extends PInput> T getInput(PTransform<T, ?> transform) {
     checkArgument(currentTransform != null && currentTransform.getTransform() == transform,
         "can only be called with current transform");
     @SuppressWarnings("unchecked")
-    I input = (I) currentTransform.getInput();
+    T input = (T) currentTransform.getInput();
     return input;
   }
 
-  protected <O extends POutput> O getOutput(PTransform<?, O> transform) {
+  protected <T extends POutput> T getOutput(PTransform<?, T> transform) {
     checkArgument(currentTransform != null && currentTransform.getTransform() == transform,
         "can only be called with current transform");
     @SuppressWarnings("unchecked")
-    O output = (O) currentTransform.getOutput();
+    T output = (T) currentTransform.getOutput();
     return output;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/MultiDoFnFunction.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/MultiDoFnFunction.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/MultiDoFnFunction.java
index 2641e31..daa767d 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/MultiDoFnFunction.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/MultiDoFnFunction.java
@@ -18,39 +18,42 @@
 
 package org.apache.beam.runners.spark.translation;
 
-import java.util.Iterator;
-import java.util.Map;
+import org.apache.beam.runners.spark.util.BroadcastHelper;
+import org.apache.beam.sdk.transforms.DoFn;
+import org.apache.beam.sdk.util.WindowedValue;
+import org.apache.beam.sdk.values.TupleTag;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.LinkedListMultimap;
 import com.google.common.collect.Multimap;
-import org.apache.beam.runners.spark.util.BroadcastHelper;
-import org.apache.beam.sdk.transforms.DoFn;
-import org.apache.beam.sdk.util.WindowedValue;
-import org.apache.beam.sdk.values.TupleTag;
+
 import org.apache.spark.api.java.function.PairFlatMapFunction;
 import org.joda.time.Instant;
+
+import java.util.Iterator;
+import java.util.Map;
+
 import scala.Tuple2;
 
 /**
  * DoFunctions ignore side outputs. MultiDoFunctions deal with side outputs by enriching the
  * underlying data with multiple TupleTags.
  *
- * @param <I> Input type for DoFunction.
- * @param <O> Output type for DoFunction.
+ * @param <InputT> Input type for DoFunction.
+ * @param <OutputT> Output type for DoFunction.
  */
-class MultiDoFnFunction<I, O>
-    implements PairFlatMapFunction<Iterator<WindowedValue<I>>, TupleTag<?>, WindowedValue<?>> {
-  private final DoFn<I, O> mFunction;
+class MultiDoFnFunction<InputT, OutputT>
+    implements PairFlatMapFunction<Iterator<WindowedValue<InputT>>, TupleTag<?>, WindowedValue<?>> {
+  private final DoFn<InputT, OutputT> mFunction;
   private final SparkRuntimeContext mRuntimeContext;
-  private final TupleTag<O> mMainOutputTag;
+  private final TupleTag<OutputT> mMainOutputTag;
   private final Map<TupleTag<?>, BroadcastHelper<?>> mSideInputs;
 
   MultiDoFnFunction(
-      DoFn<I, O> fn,
+      DoFn<InputT, OutputT> fn,
       SparkRuntimeContext runtimeContext,
-      TupleTag<O> mainOutputTag,
+      TupleTag<OutputT> mainOutputTag,
       Map<TupleTag<?>, BroadcastHelper<?>> sideInputs) {
     this.mFunction = fn;
     this.mRuntimeContext = runtimeContext;
@@ -60,29 +63,30 @@ class MultiDoFnFunction<I, O>
 
   @Override
   public Iterable<Tuple2<TupleTag<?>, WindowedValue<?>>>
-      call(Iterator<WindowedValue<I>> iter) throws Exception {
+      call(Iterator<WindowedValue<InputT>> iter) throws Exception {
     ProcCtxt ctxt = new ProcCtxt(mFunction, mRuntimeContext, mSideInputs);
     mFunction.startBundle(ctxt);
     ctxt.setup();
     return ctxt.getOutputIterable(iter, mFunction);
   }
 
-  private class ProcCtxt extends SparkProcessContext<I, O, Tuple2<TupleTag<?>, WindowedValue<?>>> {
+  private class ProcCtxt
+      extends SparkProcessContext<InputT, OutputT, Tuple2<TupleTag<?>, WindowedValue<?>>> {
 
     private final Multimap<TupleTag<?>, WindowedValue<?>> outputs = LinkedListMultimap.create();
 
-    ProcCtxt(DoFn<I, O> fn, SparkRuntimeContext runtimeContext, Map<TupleTag<?>,
+    ProcCtxt(DoFn<InputT, OutputT> fn, SparkRuntimeContext runtimeContext, Map<TupleTag<?>,
         BroadcastHelper<?>> sideInputs) {
       super(fn, runtimeContext, sideInputs);
     }
 
     @Override
-    public synchronized void output(O o) {
+    public synchronized void output(OutputT o) {
       outputs.put(mMainOutputTag, windowedValue.withValue(o));
     }
 
     @Override
-    public synchronized void output(WindowedValue<O> o) {
+    public synchronized void output(WindowedValue<OutputT> o) {
       outputs.put(mMainOutputTag, o);
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkContextFactory.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkContextFactory.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkContextFactory.java
index 2bc8a7b..225afb8 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkContextFactory.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkContextFactory.java
@@ -22,6 +22,9 @@ import org.apache.spark.SparkConf;
 import org.apache.spark.api.java.JavaSparkContext;
 import org.apache.spark.serializer.KryoSerializer;
 
+/**
+ * The Spark context factory.
+ */
 public final class SparkContextFactory {
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkPipelineEvaluator.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkPipelineEvaluator.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkPipelineEvaluator.java
index 0f47af6..609c413 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkPipelineEvaluator.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkPipelineEvaluator.java
@@ -38,16 +38,17 @@ public final class SparkPipelineEvaluator extends SparkPipelineRunner.Evaluator
   }
 
   @Override
-  protected <PT extends PTransform<? super PInput, POutput>> void doVisitTransform(TransformTreeNode
+  protected <TransformT extends PTransform<? super PInput, POutput>>
+  void doVisitTransform(TransformTreeNode
       node) {
     @SuppressWarnings("unchecked")
-    PT transform = (PT) node.getTransform();
+    TransformT transform = (TransformT) node.getTransform();
     @SuppressWarnings("unchecked")
-    Class<PT> transformClass = (Class<PT>) (Class<?>) transform.getClass();
-    @SuppressWarnings("unchecked") TransformEvaluator<PT> evaluator =
-        (TransformEvaluator<PT>) translator.translate(transformClass);
+    Class<TransformT> transformClass = (Class<TransformT>) (Class<?>) transform.getClass();
+    @SuppressWarnings("unchecked") TransformEvaluator<TransformT> evaluator =
+        (TransformEvaluator<TransformT>) translator.translate(transformClass);
     LOG.info("Evaluating {}", transform);
-    AppliedPTransform<PInput, POutput, PT> appliedTransform =
+    AppliedPTransform<PInput, POutput, TransformT> appliedTransform =
         AppliedPTransform.of(node.getFullName(), node.getInput(), node.getOutput(), transform);
     ctxt.setCurrentTransform(appliedTransform);
     evaluator.evaluate(transform, ctxt);

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkPipelineTranslator.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkPipelineTranslator.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkPipelineTranslator.java
index 77849a9..997940b 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkPipelineTranslator.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkPipelineTranslator.java
@@ -26,5 +26,6 @@ public interface SparkPipelineTranslator {
 
   boolean hasTranslation(Class<? extends PTransform<?, ?>> clazz);
 
-  <PT extends PTransform<?, ?>> TransformEvaluator<PT> translate(Class<PT> clazz);
+  <TransformT extends PTransform<?, ?>> TransformEvaluator<TransformT>
+  translate(Class<TransformT> clazz);
 }

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkProcessContext.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkProcessContext.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkProcessContext.java
index 5d4ece6..4f90a12 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkProcessContext.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkProcessContext.java
@@ -18,13 +18,6 @@
 
 package org.apache.beam.runners.spark.translation;
 
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.Map;
-
-import com.google.common.collect.AbstractIterator;
-import com.google.common.collect.Iterables;
 import org.apache.beam.runners.spark.util.BroadcastHelper;
 import org.apache.beam.sdk.coders.Coder;
 import org.apache.beam.sdk.options.PipelineOptions;
@@ -40,21 +33,34 @@ import org.apache.beam.sdk.util.state.InMemoryStateInternals;
 import org.apache.beam.sdk.util.state.StateInternals;
 import org.apache.beam.sdk.values.PCollectionView;
 import org.apache.beam.sdk.values.TupleTag;
+
+import com.google.common.collect.AbstractIterator;
+import com.google.common.collect.Iterables;
+
 import org.joda.time.Instant;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public abstract class SparkProcessContext<I, O, V> extends DoFn<I, O>.ProcessContext {
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ * Spark runner process context.
+ */
+public abstract class SparkProcessContext<InputT, OutputT, ValueT>
+    extends DoFn<InputT, OutputT>.ProcessContext {
 
   private static final Logger LOG = LoggerFactory.getLogger(SparkProcessContext.class);
 
-  private final DoFn<I, O> fn;
+  private final DoFn<InputT, OutputT> fn;
   private final SparkRuntimeContext mRuntimeContext;
   private final Map<TupleTag<?>, BroadcastHelper<?>> mSideInputs;
 
-  protected WindowedValue<I> windowedValue;
+  protected WindowedValue<InputT> windowedValue;
 
-  SparkProcessContext(DoFn<I, O> fn,
+  SparkProcessContext(DoFn<InputT, OutputT> fn,
       SparkRuntimeContext runtime,
       Map<TupleTag<?>, BroadcastHelper<?>> sideInputs) {
     fn.super();
@@ -82,9 +88,9 @@ public abstract class SparkProcessContext<I, O, V> extends DoFn<I, O>.ProcessCon
   }
 
   @Override
-  public abstract void output(O output);
+  public abstract void output(OutputT output);
 
-  public abstract void output(WindowedValue<O> output);
+  public abstract void output(WindowedValue<OutputT> output);
 
   @Override
   public <T> void sideOutput(TupleTag<T> tupleTag, T t) {
@@ -104,19 +110,20 @@ public abstract class SparkProcessContext<I, O, V> extends DoFn<I, O>.ProcessCon
   }
 
   @Override
-  public <AI, AO> Aggregator<AI, AO> createAggregatorInternal(
+  public <AggregatprInputT, AggregatorOutputT>
+  Aggregator<AggregatprInputT, AggregatorOutputT> createAggregatorInternal(
       String named,
-      Combine.CombineFn<AI, ?, AO> combineFn) {
+      Combine.CombineFn<AggregatprInputT, ?, AggregatorOutputT> combineFn) {
     return mRuntimeContext.createAggregator(named, combineFn);
   }
 
   @Override
-  public I element() {
+  public InputT element() {
     return windowedValue.getValue();
   }
 
   @Override
-  public void outputWithTimestamp(O output, Instant timestamp) {
+  public void outputWithTimestamp(OutputT output, Instant timestamp) {
     output(WindowedValue.of(output, timestamp,
         windowedValue.getWindows(), windowedValue.getPane()));
   }
@@ -141,8 +148,8 @@ public abstract class SparkProcessContext<I, O, V> extends DoFn<I, O>.ProcessCon
   }
 
   @Override
-  public WindowingInternals<I, O> windowingInternals() {
-    return new WindowingInternals<I, O>() {
+  public WindowingInternals<InputT, OutputT> windowingInternals() {
+    return new WindowingInternals<InputT, OutputT>() {
 
       @Override
       public Collection<? extends BoundedWindow> windows() {
@@ -150,7 +157,7 @@ public abstract class SparkProcessContext<I, O, V> extends DoFn<I, O>.ProcessCon
       }
 
       @Override
-      public void outputWindowedValue(O output, Instant timestamp, Collection<?
+      public void outputWindowedValue(OutputT output, Instant timestamp, Collection<?
           extends BoundedWindow> windows, PaneInfo paneInfo) {
         output(WindowedValue.of(output, timestamp, windows, paneInfo));
       }
@@ -190,33 +197,33 @@ public abstract class SparkProcessContext<I, O, V> extends DoFn<I, O>.ProcessCon
   }
 
   protected abstract void clearOutput();
-  protected abstract Iterator<V> getOutputIterator();
+  protected abstract Iterator<ValueT> getOutputIterator();
 
-  protected Iterable<V> getOutputIterable(final Iterator<WindowedValue<I>> iter,
-      final DoFn<I, O> doFn) {
-    return new Iterable<V>() {
+  protected Iterable<ValueT> getOutputIterable(final Iterator<WindowedValue<InputT>> iter,
+                                               final DoFn<InputT, OutputT> doFn) {
+    return new Iterable<ValueT>() {
       @Override
-      public Iterator<V> iterator() {
+      public Iterator<ValueT> iterator() {
         return new ProcCtxtIterator(iter, doFn);
       }
     };
   }
 
-  private class ProcCtxtIterator extends AbstractIterator<V> {
+  private class ProcCtxtIterator extends AbstractIterator<ValueT> {
 
-    private final Iterator<WindowedValue<I>> inputIterator;
-    private final DoFn<I, O> doFn;
-    private Iterator<V> outputIterator;
+    private final Iterator<WindowedValue<InputT>> inputIterator;
+    private final DoFn<InputT, OutputT> doFn;
+    private Iterator<ValueT> outputIterator;
     private boolean calledFinish;
 
-    ProcCtxtIterator(Iterator<WindowedValue<I>> iterator, DoFn<I, O> doFn) {
+    ProcCtxtIterator(Iterator<WindowedValue<InputT>> iterator, DoFn<InputT, OutputT> doFn) {
       this.inputIterator = iterator;
       this.doFn = doFn;
       this.outputIterator = getOutputIterator();
     }
 
     @Override
-    protected V computeNext() {
+    protected ValueT computeNext() {
       // Process each element from the (input) iterator, which produces, zero, one or more
       // output elements (of type V) in the output iterator. Note that the output
       // collection (and iterator) is reset between each call to processElement, so the
@@ -253,6 +260,9 @@ public abstract class SparkProcessContext<I, O, V> extends DoFn<I, O>.ProcessCon
     }
   }
 
+  /**
+   * Spark process runtime exception.
+   */
   public static class SparkProcessException extends RuntimeException {
     SparkProcessException(Throwable t) {
       super(t);

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkRuntimeContext.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkRuntimeContext.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkRuntimeContext.java
index ea125de..46f5b33 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkRuntimeContext.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/SparkRuntimeContext.java
@@ -18,15 +18,6 @@
 
 package org.apache.beam.runners.spark.translation;
 
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-
-import com.fasterxml.jackson.core.JsonProcessingException;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.common.collect.ImmutableList;
 import org.apache.beam.runners.spark.aggregators.AggAccumParam;
 import org.apache.beam.runners.spark.aggregators.NamedAggregators;
 import org.apache.beam.sdk.Pipeline;
@@ -41,9 +32,20 @@ import org.apache.beam.sdk.transforms.Max;
 import org.apache.beam.sdk.transforms.Min;
 import org.apache.beam.sdk.transforms.Sum;
 import org.apache.beam.sdk.values.TypeDescriptor;
+
+import com.google.common.collect.ImmutableList;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.spark.Accumulator;
 import org.apache.spark.api.java.JavaSparkContext;
 
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
 
 /**
  * The SparkRuntimeContext allows us to define useful features on the client side before our
@@ -122,22 +124,22 @@ public class SparkRuntimeContext implements Serializable {
    *
    * @param named     Name of aggregator.
    * @param combineFn Combine function used in aggregation.
-   * @param <IN>      Type of inputs to aggregator.
-   * @param <INTER>   Intermediate data type
-   * @param <OUT>     Type of aggregator outputs.
+   * @param <InputT>      Type of inputs to aggregator.
+   * @param <InterT>   Intermediate data type
+   * @param <OutputT>     Type of aggregator outputs.
    * @return Specified aggregator
    */
-  public synchronized <IN, INTER, OUT> Aggregator<IN, OUT> createAggregator(
+  public synchronized <InputT, InterT, OutputT> Aggregator<InputT, OutputT> createAggregator(
       String named,
-      Combine.CombineFn<? super IN, INTER, OUT> combineFn) {
+      Combine.CombineFn<? super InputT, InterT, OutputT> combineFn) {
     @SuppressWarnings("unchecked")
-    Aggregator<IN, OUT> aggregator = (Aggregator<IN, OUT>) aggregators.get(named);
+    Aggregator<InputT, OutputT> aggregator = (Aggregator<InputT, OutputT>) aggregators.get(named);
     if (aggregator == null) {
       @SuppressWarnings("unchecked")
-      NamedAggregators.CombineFunctionState<IN, INTER, OUT> state =
+      NamedAggregators.CombineFunctionState<InputT, InterT, OutputT> state =
           new NamedAggregators.CombineFunctionState<>(
-              (Combine.CombineFn<IN, INTER, OUT>) combineFn,
-              (Coder<IN>) getCoder(combineFn),
+              (Combine.CombineFn<InputT, InterT, OutputT>) combineFn,
+              (Coder<InputT>) getCoder(combineFn),
               this);
       accum.add(new NamedAggregators(named, state));
       aggregator = new SparkAggregator<>(named, state);
@@ -186,13 +188,14 @@ public class SparkRuntimeContext implements Serializable {
   /**
    * Initialize spark aggregators exactly once.
    *
-   * @param <IN> Type of element fed in to aggregator.
+   * @param <InputT> Type of element fed in to aggregator.
    */
-  private static class SparkAggregator<IN, OUT> implements Aggregator<IN, OUT>, Serializable {
+  private static class SparkAggregator<InputT, OutputT>
+      implements Aggregator<InputT, OutputT>, Serializable {
     private final String name;
-    private final NamedAggregators.State<IN, ?, OUT> state;
+    private final NamedAggregators.State<InputT, ?, OutputT> state;
 
-    SparkAggregator(String name, NamedAggregators.State<IN, ?, OUT> state) {
+    SparkAggregator(String name, NamedAggregators.State<InputT, ?, OutputT> state) {
       this.name = name;
       this.state = state;
     }
@@ -203,12 +206,12 @@ public class SparkRuntimeContext implements Serializable {
     }
 
     @Override
-    public void addValue(IN elem) {
+    public void addValue(InputT elem) {
       state.update(elem);
     }
 
     @Override
-    public Combine.CombineFn<IN, ?, OUT> getCombineFn() {
+    public Combine.CombineFn<InputT, ?, OutputT> getCombineFn() {
       return state.getCombineFn();
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformEvaluator.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformEvaluator.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformEvaluator.java
index 30ab076..c5c7128 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformEvaluator.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformEvaluator.java
@@ -18,10 +18,13 @@
 
 package org.apache.beam.runners.spark.translation;
 
-import java.io.Serializable;
-
 import org.apache.beam.sdk.transforms.PTransform;
 
-public interface TransformEvaluator<PT extends PTransform<?, ?>> extends Serializable {
-  void evaluate(PT transform, EvaluationContext context);
+import java.io.Serializable;
+
+/**
+ * Describe a {@link PTransform} evaluator.
+ */
+public interface TransformEvaluator<TransformT extends PTransform<?, ?>> extends Serializable {
+  void evaluate(TransformT transform, EvaluationContext context);
 }

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/0f3b0533/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformTranslator.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformTranslator.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformTranslator.java
index 0366856..b462d35 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformTranslator.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TransformTranslator.java
@@ -23,19 +23,6 @@ import static org.apache.beam.runners.spark.io.hadoop.ShardNameBuilder.getOutput
 import static org.apache.beam.runners.spark.io.hadoop.ShardNameBuilder.getOutputFileTemplate;
 import static org.apache.beam.runners.spark.io.hadoop.ShardNameBuilder.replaceShardCount;
 
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import org.apache.avro.mapred.AvroKey;
-import org.apache.avro.mapreduce.AvroJob;
-import org.apache.avro.mapreduce.AvroKeyInputFormat;
 import org.apache.beam.runners.spark.coders.CoderHelpers;
 import org.apache.beam.runners.spark.io.hadoop.HadoopIO;
 import org.apache.beam.runners.spark.io.hadoop.ShardNameTemplateHelper;
@@ -68,6 +55,14 @@ import org.apache.beam.sdk.values.PCollectionList;
 import org.apache.beam.sdk.values.PCollectionTuple;
 import org.apache.beam.sdk.values.PCollectionView;
 import org.apache.beam.sdk.values.TupleTag;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import org.apache.avro.mapred.AvroKey;
+import org.apache.avro.mapreduce.AvroJob;
+import org.apache.avro.mapreduce.AvroKeyInputFormat;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Text;
@@ -81,6 +76,14 @@ import org.apache.spark.api.java.function.Function;
 import org.apache.spark.api.java.function.Function2;
 import org.apache.spark.api.java.function.PairFlatMapFunction;
 import org.apache.spark.api.java.function.PairFunction;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
 import scala.Tuple2;
 
 /**
@@ -91,6 +94,9 @@ public final class TransformTranslator {
   private TransformTranslator() {
   }
 
+  /**
+   * Getter of the field.
+   */
   public static class FieldGetter {
     private final Map<String, Field> fields;
 
@@ -157,14 +163,16 @@ public final class TransformTranslator {
 
   private static final FieldGetter GROUPED_FG = new FieldGetter(Combine.GroupedValues.class);
 
-  private static <K, VI, VO> TransformEvaluator<Combine.GroupedValues<K, VI, VO>> grouped() {
-    return new TransformEvaluator<Combine.GroupedValues<K, VI, VO>>() {
+  private static <K, InputT, OutputT> TransformEvaluator<Combine.GroupedValues<K, InputT, OutputT>>
+  grouped() {
+    return new TransformEvaluator<Combine.GroupedValues<K, InputT, OutputT>>() {
       @Override
-      public void evaluate(Combine.GroupedValues<K, VI, VO> transform, EvaluationContext context) {
-        Combine.KeyedCombineFn<K, VI, ?, VO> keyed = GROUPED_FG.get("fn", transform);
+      public void evaluate(Combine.GroupedValues<K, InputT, OutputT> transform,
+                           EvaluationContext context) {
+        Combine.KeyedCombineFn<K, InputT, ?, OutputT> keyed = GROUPED_FG.get("fn", transform);
         @SuppressWarnings("unchecked")
-        JavaRDDLike<WindowedValue<KV<K, Iterable<VI>>>, ?> inRDD =
-            (JavaRDDLike<WindowedValue<KV<K, Iterable<VI>>>, ?>) context.getInputRDD(transform);
+        JavaRDDLike<WindowedValue<KV<K, Iterable<InputT>>>, ?> inRDD =
+            (JavaRDDLike<WindowedValue<KV<K, Iterable<InputT>>>, ?>) context.getInputRDD(transform);
         context.setOutputRDD(transform,
             inRDD.map(new KVFunction<>(keyed)));
       }
@@ -173,19 +181,21 @@ public final class TransformTranslator {
 
   private static final FieldGetter COMBINE_GLOBALLY_FG = new FieldGetter(Combine.Globally.class);
 
-  private static <I, A, O> TransformEvaluator<Combine.Globally<I, O>> combineGlobally() {
-    return new TransformEvaluator<Combine.Globally<I, O>>() {
+  private static <InputT, AccumT, OutputT> TransformEvaluator<Combine.Globally<InputT, OutputT>>
+  combineGlobally() {
+    return new TransformEvaluator<Combine.Globally<InputT, OutputT>>() {
 
       @Override
-      public void evaluate(Combine.Globally<I, O> transform, EvaluationContext context) {
-        final Combine.CombineFn<I, A, O> globally = COMBINE_GLOBALLY_FG.get("fn", transform);
+      public void evaluate(Combine.Globally<InputT, OutputT> transform, EvaluationContext context) {
+        final Combine.CombineFn<InputT, AccumT, OutputT> globally =
+            COMBINE_GLOBALLY_FG.get("fn", transform);
 
         @SuppressWarnings("unchecked")
-        JavaRDDLike<WindowedValue<I>, ?> inRdd =
-            (JavaRDDLike<WindowedValue<I>, ?>) context.getInputRDD(transform);
+        JavaRDDLike<WindowedValue<InputT>, ?> inRdd =
+            (JavaRDDLike<WindowedValue<InputT>, ?>) context.getInputRDD(transform);
 
-        final Coder<I> iCoder = context.getInput(transform).getCoder();
-        final Coder<A> aCoder;
+        final Coder<InputT> iCoder = context.getInput(transform).getCoder();
+        final Coder<AccumT> aCoder;
         try {
           aCoder = globally.getAccumulatorCoder(
               context.getPipeline().getCoderRegistry(), iCoder);
@@ -196,86 +206,92 @@ public final class TransformTranslator {
         // Use coders to convert objects in the PCollection to byte arrays, so they
         // can be transferred over the network for the shuffle.
         JavaRDD<byte[]> inRddBytes = inRdd
-            .map(WindowingHelpers.<I>unwindowFunction())
+            .map(WindowingHelpers.<InputT>unwindowFunction())
             .map(CoderHelpers.toByteFunction(iCoder));
 
-        /*A*/ byte[] acc = inRddBytes.aggregate(
+        /*AccumT*/ byte[] acc = inRddBytes.aggregate(
             CoderHelpers.toByteArray(globally.createAccumulator(), aCoder),
-            new Function2</*A*/ byte[], /*I*/ byte[], /*A*/ byte[]>() {
+            new Function2</*AccumT*/ byte[], /*InputT*/ byte[], /*AccumT*/ byte[]>() {
               @Override
-              public /*A*/ byte[] call(/*A*/ byte[] ab, /*I*/ byte[] ib) throws Exception {
-                A a = CoderHelpers.fromByteArray(ab, aCoder);
-                I i = CoderHelpers.fromByteArray(ib, iCoder);
+              public /*AccumT*/ byte[] call(/*AccumT*/ byte[] ab, /*InputT*/ byte[] ib)
+                  throws Exception {
+                AccumT a = CoderHelpers.fromByteArray(ab, aCoder);
+                InputT i = CoderHelpers.fromByteArray(ib, iCoder);
                 return CoderHelpers.toByteArray(globally.addInput(a, i), aCoder);
               }
             },
-            new Function2</*A*/ byte[], /*A*/ byte[], /*A*/ byte[]>() {
+            new Function2</*AccumT*/ byte[], /*AccumT*/ byte[], /*AccumT*/ byte[]>() {
               @Override
-              public /*A*/ byte[] call(/*A*/ byte[] a1b, /*A*/ byte[] a2b) throws Exception {
-                A a1 = CoderHelpers.fromByteArray(a1b, aCoder);
-                A a2 = CoderHelpers.fromByteArray(a2b, aCoder);
+              public /*AccumT*/ byte[] call(/*AccumT*/ byte[] a1b, /*AccumT*/ byte[] a2b)
+                  throws Exception {
+                AccumT a1 = CoderHelpers.fromByteArray(a1b, aCoder);
+                AccumT a2 = CoderHelpers.fromByteArray(a2b, aCoder);
                 // don't use Guava's ImmutableList.of as values may be null
-                List<A> accumulators = Collections.unmodifiableList(Arrays.asList(a1, a2));
-                A merged = globally.mergeAccumulators(accumulators);
+                List<AccumT> accumulators = Collections.unmodifiableList(Arrays.asList(a1, a2));
+                AccumT merged = globally.mergeAccumulators(accumulators);
                 return CoderHelpers.toByteArray(merged, aCoder);
               }
             }
         );
-        O output = globally.extractOutput(CoderHelpers.fromByteArray(acc, aCoder));
+        OutputT output = globally.extractOutput(CoderHelpers.fromByteArray(acc, aCoder));
 
-        Coder<O> coder = context.getOutput(transform).getCoder();
+        Coder<OutputT> coder = context.getOutput(transform).getCoder();
         JavaRDD<byte[]> outRdd = context.getSparkContext().parallelize(
             // don't use Guava's ImmutableList.of as output may be null
             CoderHelpers.toByteArrays(Collections.singleton(output), coder));
         context.setOutputRDD(transform, outRdd.map(CoderHelpers.fromByteFunction(coder))
-            .map(WindowingHelpers.<O>windowFunction()));
+            .map(WindowingHelpers.<OutputT>windowFunction()));
       }
     };
   }
 
   private static final FieldGetter COMBINE_PERKEY_FG = new FieldGetter(Combine.PerKey.class);
 
-  private static <K, VI, VA, VO> TransformEvaluator<Combine.PerKey<K, VI, VO>> combinePerKey() {
-    return new TransformEvaluator<Combine.PerKey<K, VI, VO>>() {
+  private static <K, InputT, AccumT, OutputT>
+  TransformEvaluator<Combine.PerKey<K, InputT, OutputT>> combinePerKey() {
+    return new TransformEvaluator<Combine.PerKey<K, InputT, OutputT>>() {
       @Override
-      public void evaluate(Combine.PerKey<K, VI, VO> transform, EvaluationContext context) {
-        final Combine.KeyedCombineFn<K, VI, VA, VO> keyed =
+      public void evaluate(Combine.PerKey<K, InputT, OutputT>
+                               transform, EvaluationContext context) {
+        final Combine.KeyedCombineFn<K, InputT, AccumT, OutputT> keyed =
             COMBINE_PERKEY_FG.get("fn", transform);
         @SuppressWarnings("unchecked")
-        JavaRDDLike<WindowedValue<KV<K, VI>>, ?> inRdd =
-            (JavaRDDLike<WindowedValue<KV<K, VI>>, ?>) context.getInputRDD(transform);
+        JavaRDDLike<WindowedValue<KV<K, InputT>>, ?> inRdd =
+            (JavaRDDLike<WindowedValue<KV<K, InputT>>, ?>) context.getInputRDD(transform);
 
         @SuppressWarnings("unchecked")
-        KvCoder<K, VI> inputCoder = (KvCoder<K, VI>) context.getInput(transform).getCoder();
+        KvCoder<K, InputT> inputCoder = (KvCoder<K, InputT>)
+            context.getInput(transform).getCoder();
         Coder<K> keyCoder = inputCoder.getKeyCoder();
-        Coder<VI> viCoder = inputCoder.getValueCoder();
-        Coder<VA> vaCoder;
+        Coder<InputT> viCoder = inputCoder.getValueCoder();
+        Coder<AccumT> vaCoder;
         try {
           vaCoder = keyed.getAccumulatorCoder(
               context.getPipeline().getCoderRegistry(), keyCoder, viCoder);
         } catch (CannotProvideCoderException e) {
           throw new IllegalStateException("Could not determine coder for accumulator", e);
         }
-        Coder<KV<K, VI>> kviCoder = KvCoder.of(keyCoder, viCoder);
-        Coder<KV<K, VA>> kvaCoder = KvCoder.of(keyCoder, vaCoder);
+        Coder<KV<K, InputT>> kviCoder = KvCoder.of(keyCoder, viCoder);
+        Coder<KV<K, AccumT>> kvaCoder = KvCoder.of(keyCoder, vaCoder);
 
         // We need to duplicate K as both the key of the JavaPairRDD as well as inside the value,
         // since the functions passed to combineByKey don't receive the associated key of each
         // value, and we need to map back into methods in Combine.KeyedCombineFn, which each
-        // require the key in addition to the VI's and VA's being merged/accumulated. Once Spark
-        // provides a way to include keys in the arguments of combine/merge functions, we won't
-        // need to duplicate the keys anymore.
+        // require the key in addition to the InputT's and AccumT's being merged/accumulated.
+        // Once Spark provides a way to include keys in the arguments of combine/merge functions,
+        // we won't need to duplicate the keys anymore.
 
         // Key has to bw windowed in order to group by window as well
-        JavaPairRDD<WindowedValue<K>, WindowedValue<KV<K, VI>>> inRddDuplicatedKeyPair =
+        JavaPairRDD<WindowedValue<K>, WindowedValue<KV<K, InputT>>> inRddDuplicatedKeyPair =
             inRdd.flatMapToPair(
-                new PairFlatMapFunction<WindowedValue<KV<K, VI>>, WindowedValue<K>,
-                    WindowedValue<KV<K, VI>>>() {
+                new PairFlatMapFunction<WindowedValue<KV<K, InputT>>, WindowedValue<K>,
+                    WindowedValue<KV<K, InputT>>>() {
                   @Override
                   public Iterable<Tuple2<WindowedValue<K>,
-                      WindowedValue<KV<K, VI>>>> call(WindowedValue<KV<K, VI>> kv) {
+                      WindowedValue<KV<K, InputT>>>>
+                  call(WindowedValue<KV<K, InputT>> kv) {
                       List<Tuple2<WindowedValue<K>,
-                          WindowedValue<KV<K, VI>>>> tuple2s =
+                          WindowedValue<KV<K, InputT>>>> tuple2s =
                           Lists.newArrayListWithCapacity(kv.getWindows().size());
                       for (BoundedWindow boundedWindow: kv.getWindows()) {
                         WindowedValue<K> wk = WindowedValue.of(kv.getValue().getKey(),
@@ -289,10 +305,10 @@ public final class TransformTranslator {
         final WindowedValue.FullWindowedValueCoder<K> wkCoder =
                 WindowedValue.FullWindowedValueCoder.of(keyCoder,
                 context.getInput(transform).getWindowingStrategy().getWindowFn().windowCoder());
-        final WindowedValue.FullWindowedValueCoder<KV<K, VI>> wkviCoder =
+        final WindowedValue.FullWindowedValueCoder<KV<K, InputT>> wkviCoder =
                 WindowedValue.FullWindowedValueCoder.of(kviCoder,
                 context.getInput(transform).getWindowingStrategy().getWindowFn().windowCoder());
-        final WindowedValue.FullWindowedValueCoder<KV<K, VA>> wkvaCoder =
+        final WindowedValue.FullWindowedValueCoder<KV<K, AccumT>> wkvaCoder =
                 WindowedValue.FullWindowedValueCoder.of(kvaCoder,
                 context.getInput(transform).getWindowingStrategy().getWindowFn().windowCoder());
 
@@ -301,58 +317,69 @@ public final class TransformTranslator {
         JavaPairRDD<ByteArray, byte[]> inRddDuplicatedKeyPairBytes = inRddDuplicatedKeyPair
             .mapToPair(CoderHelpers.toByteFunction(wkCoder, wkviCoder));
 
-        // The output of combineByKey will be "VA" (accumulator) types rather than "VO" (final
-        // output types) since Combine.CombineFn only provides ways to merge VAs, and no way
-        // to merge VOs.
-        JavaPairRDD</*K*/ ByteArray, /*KV<K, VA>*/ byte[]> accumulatedBytes =
+        // The output of combineByKey will be "AccumT" (accumulator)
+        // types rather than "OutputT" (final output types) since Combine.CombineFn
+        // only provides ways to merge VAs, and no way to merge VOs.
+        JavaPairRDD</*K*/ ByteArray, /*KV<K, AccumT>*/ byte[]> accumulatedBytes =
             inRddDuplicatedKeyPairBytes.combineByKey(
-            new Function</*KV<K, VI>*/ byte[], /*KV<K, VA>*/ byte[]>() {
+            new Function</*KV<K, InputT>*/ byte[], /*KV<K, AccumT>*/ byte[]>() {
               @Override
-              public /*KV<K, VA>*/ byte[] call(/*KV<K, VI>*/ byte[] input) {
-                WindowedValue<KV<K, VI>> wkvi = CoderHelpers.fromByteArray(input, wkviCoder);
-                VA va = keyed.createAccumulator(wkvi.getValue().getKey());
+              public /*KV<K, AccumT>*/ byte[] call(/*KV<K, InputT>*/ byte[] input) {
+                WindowedValue<KV<K, InputT>> wkvi =
+                    CoderHelpers.fromByteArray(input, wkviCoder);
+                AccumT va = keyed.createAccumulator(wkvi.getValue().getKey());
                 va = keyed.addInput(wkvi.getValue().getKey(), va, wkvi.getValue().getValue());
-                WindowedValue<KV<K, VA>> wkva =
+                WindowedValue<KV<K, AccumT>> wkva =
                     WindowedValue.of(KV.of(wkvi.getValue().getKey(), va), wkvi.getTimestamp(),
                     wkvi.getWindows(), wkvi.getPane());
                 return CoderHelpers.toByteArray(wkva, wkvaCoder);
               }
             },
-            new Function2</*KV<K, VA>*/ byte[], /*KV<K, VI>*/ byte[], /*KV<K, VA>*/ byte[]>() {
+            new Function2</*KV<K, AccumT>*/ byte[],
+                /*KV<K, InputT>*/ byte[],
+                /*KV<K, AccumT>*/ byte[]>() {
               @Override
-              public /*KV<K, VA>*/ byte[] call(/*KV<K, VA>*/ byte[] acc,
-                  /*KV<K, VI>*/ byte[] input) {
-                WindowedValue<KV<K, VA>> wkva = CoderHelpers.fromByteArray(acc, wkvaCoder);
-                WindowedValue<KV<K, VI>> wkvi = CoderHelpers.fromByteArray(input, wkviCoder);
-                VA va = keyed.addInput(wkva.getValue().getKey(), wkva.getValue().getValue(),
+              public /*KV<K, AccumT>*/ byte[] call(/*KV<K, AccumT>*/ byte[] acc,
+                  /*KV<K, InputT>*/ byte[] input) {
+                WindowedValue<KV<K, AccumT>> wkva =
+                    CoderHelpers.fromByteArray(acc, wkvaCoder);
+                WindowedValue<KV<K, InputT>> wkvi =
+                    CoderHelpers.fromByteArray(input, wkviCoder);
+                AccumT va =
+                    keyed.addInput(wkva.getValue().getKey(), wkva.getValue().getValue(),
                     wkvi.getValue().getValue());
                 wkva = WindowedValue.of(KV.of(wkva.getValue().getKey(), va), wkva.getTimestamp(),
                     wkva.getWindows(), wkva.getPane());
                 return CoderHelpers.toByteArray(wkva, wkvaCoder);
               }
             },
-            new Function2</*KV<K, VA>*/ byte[], /*KV<K, VA>*/ byte[], /*KV<K, VA>*/ byte[]>() {
+            new Function2</*KV<K, AccumT>*/ byte[],
+                /*KV<K, AccumT>*/ byte[],
+                /*KV<K, AccumT>*/ byte[]>() {
               @Override
-              public /*KV<K, VA>*/ byte[] call(/*KV<K, VA>*/ byte[] acc1,
-                  /*KV<K, VA>*/ byte[] acc2) {
-                WindowedValue<KV<K, VA>> wkva1 = CoderHelpers.fromByteArray(acc1, wkvaCoder);
-                WindowedValue<KV<K, VA>> wkva2 = CoderHelpers.fromByteArray(acc2, wkvaCoder);
-                VA va = keyed.mergeAccumulators(wkva1.getValue().getKey(),
+              public /*KV<K, AccumT>*/ byte[] call(/*KV<K, AccumT>*/ byte[] acc1,
+                  /*KV<K, AccumT>*/ byte[] acc2) {
+                WindowedValue<KV<K, AccumT>> wkva1 =
+                    CoderHelpers.fromByteArray(acc1, wkvaCoder);
+                WindowedValue<KV<K, AccumT>> wkva2 =
+                    CoderHelpers.fromByteArray(acc2, wkvaCoder);
+                AccumT va = keyed.mergeAccumulators(wkva1.getValue().getKey(),
                     // don't use Guava's ImmutableList.of as values may be null
                     Collections.unmodifiableList(Arrays.asList(wkva1.getValue().getValue(),
                     wkva2.getValue().getValue())));
-                WindowedValue<KV<K, VA>> wkva = WindowedValue.of(KV.of(wkva1.getValue().getKey(),
+                WindowedValue<KV<K, AccumT>> wkva =
+                    WindowedValue.of(KV.of(wkva1.getValue().getKey(),
                     va), wkva1.getTimestamp(), wkva1.getWindows(), wkva1.getPane());
                 return CoderHelpers.toByteArray(wkva, wkvaCoder);
               }
             });
 
-        JavaPairRDD<WindowedValue<K>, WindowedValue<VO>> extracted = accumulatedBytes
+        JavaPairRDD<WindowedValue<K>, WindowedValue<OutputT>> extracted = accumulatedBytes
             .mapToPair(CoderHelpers.fromByteFunction(wkCoder, wkvaCoder))
             .mapValues(
-                new Function<WindowedValue<KV<K, VA>>, WindowedValue<VO>>() {
+                new Function<WindowedValue<KV<K, AccumT>>, WindowedValue<OutputT>>() {
                   @Override
-                  public WindowedValue<VO> call(WindowedValue<KV<K, VA>> acc) {
+                  public WindowedValue<OutputT> call(WindowedValue<KV<K, AccumT>> acc) {
                     return WindowedValue.of(keyed.extractOutput(acc.getValue().getKey(),
                         acc.getValue().getValue()), acc.getTimestamp(),
                         acc.getWindows(), acc.getPane());
@@ -361,12 +388,14 @@ public final class TransformTranslator {
 
         context.setOutputRDD(transform,
             fromPair(extracted)
-            .map(new Function<KV<WindowedValue<K>, WindowedValue<VO>>, WindowedValue<KV<K, VO>>>() {
+            .map(new Function<KV<WindowedValue<K>, WindowedValue<OutputT>>,
+                WindowedValue<KV<K, OutputT>>>() {
               @Override
-              public WindowedValue<KV<K, VO>> call(KV<WindowedValue<K>, WindowedValue<VO>> kwvo)
+              public WindowedValue<KV<K, OutputT>> call(KV<WindowedValue<K>,
+                  WindowedValue<OutputT>> kwvo)
                   throws Exception {
-                WindowedValue<VO> wvo = kwvo.getValue();
-                KV<K, VO> kvo = KV.of(kwvo.getKey().getValue(), wvo.getValue());
+                WindowedValue<OutputT> wvo = kwvo.getValue();
+                KV<K, OutputT> kvo = KV.of(kwvo.getKey().getValue(), wvo.getValue());
                 return WindowedValue.of(kvo, wvo.getTimestamp(), wvo.getWindows(), wvo.getPane());
               }
             }));
@@ -374,18 +403,20 @@ public final class TransformTranslator {
     };
   }
 
-  private static final class KVFunction<K, VI, VO>
-      implements Function<WindowedValue<KV<K, Iterable<VI>>>, WindowedValue<KV<K, VO>>> {
-    private final Combine.KeyedCombineFn<K, VI, ?, VO> keyed;
+  private static final class KVFunction<K, InputT, OutputT>
+      implements Function<WindowedValue<KV<K, Iterable<InputT>>>,
+      WindowedValue<KV<K, OutputT>>> {
+    private final Combine.KeyedCombineFn<K, InputT, ?, OutputT> keyed;
 
-     KVFunction(Combine.KeyedCombineFn<K, VI, ?, VO> keyed) {
+     KVFunction(Combine.KeyedCombineFn<K, InputT, ?, OutputT> keyed) {
       this.keyed = keyed;
     }
 
     @Override
-    public WindowedValue<KV<K, VO>> call(WindowedValue<KV<K, Iterable<VI>>> windowedKv)
+    public WindowedValue<KV<K, OutputT>> call(WindowedValue<KV<K,
+        Iterable<InputT>>> windowedKv)
         throws Exception {
-      KV<K, Iterable<VI>> kv = windowedKv.getValue();
+      KV<K, Iterable<InputT>> kv = windowedKv.getValue();
       return WindowedValue.of(KV.of(kv.getKey(), keyed.apply(kv.getKey(), kv.getValue())),
           windowedKv.getTimestamp(), windowedKv.getWindows(), windowedKv.getPane());
     }
@@ -409,17 +440,17 @@ public final class TransformTranslator {
     });
   }
 
-  private static <I, O> TransformEvaluator<ParDo.Bound<I, O>> parDo() {
-    return new TransformEvaluator<ParDo.Bound<I, O>>() {
+  private static <InputT, OutputT> TransformEvaluator<ParDo.Bound<InputT, OutputT>> parDo() {
+    return new TransformEvaluator<ParDo.Bound<InputT, OutputT>>() {
       @Override
-      public void evaluate(ParDo.Bound<I, O> transform, EvaluationContext context) {
-        DoFnFunction<I, O> dofn =
+      public void evaluate(ParDo.Bound<InputT, OutputT> transform, EvaluationContext context) {
+        DoFnFunction<InputT, OutputT> dofn =
             new DoFnFunction<>(transform.getFn(),
                 context.getRuntimeContext(),
                 getSideInputs(transform.getSideInputs(), context));
         @SuppressWarnings("unchecked")
-        JavaRDDLike<WindowedValue<I>, ?> inRDD =
-            (JavaRDDLike<WindowedValue<I>, ?>) context.getInputRDD(transform);
+        JavaRDDLike<WindowedValue<InputT>, ?> inRDD =
+            (JavaRDDLike<WindowedValue<InputT>, ?>) context.getInputRDD(transform);
         context.setOutputRDD(transform, inRDD.mapPartitions(dofn));
       }
     };
@@ -427,20 +458,20 @@ public final class TransformTranslator {
 
   private static final FieldGetter MULTIDO_FG = new FieldGetter(ParDo.BoundMulti.class);
 
-  private static <I, O> TransformEvaluator<ParDo.BoundMulti<I, O>> multiDo() {
-    return new TransformEvaluator<ParDo.BoundMulti<I, O>>() {
+  private static <InputT, OutputT> TransformEvaluator<ParDo.BoundMulti<InputT, OutputT>> multiDo() {
+    return new TransformEvaluator<ParDo.BoundMulti<InputT, OutputT>>() {
       @Override
-      public void evaluate(ParDo.BoundMulti<I, O> transform, EvaluationContext context) {
-        TupleTag<O> mainOutputTag = MULTIDO_FG.get("mainOutputTag", transform);
-        MultiDoFnFunction<I, O> multifn = new MultiDoFnFunction<>(
+      public void evaluate(ParDo.BoundMulti<InputT, OutputT> transform, EvaluationContext context) {
+        TupleTag<OutputT> mainOutputTag = MULTIDO_FG.get("mainOutputTag", transform);
+        MultiDoFnFunction<InputT, OutputT> multifn = new MultiDoFnFunction<>(
             transform.getFn(),
             context.getRuntimeContext(),
             mainOutputTag,
             getSideInputs(transform.getSideInputs(), context));
 
         @SuppressWarnings("unchecked")
-        JavaRDDLike<WindowedValue<I>, ?> inRDD =
-            (JavaRDDLike<WindowedValue<I>, ?>) context.getInputRDD(transform);
+        JavaRDDLike<WindowedValue<InputT>, ?> inRDD =
+            (JavaRDDLike<WindowedValue<InputT>, ?>) context.getInputRDD(transform);
         JavaPairRDD<TupleTag<?>, WindowedValue<?>> all = inRDD
             .mapPartitionsToPair(multifn)
             .cache();
@@ -716,10 +747,12 @@ public final class TransformTranslator {
     };
   }
 
-  private static <R, W> TransformEvaluator<View.CreatePCollectionView<R, W>> createPCollView() {
-    return new TransformEvaluator<View.CreatePCollectionView<R, W>>() {
+  private static <ReadT, WriteT> TransformEvaluator<View.CreatePCollectionView<ReadT, WriteT>>
+  createPCollView() {
+    return new TransformEvaluator<View.CreatePCollectionView<ReadT, WriteT>>() {
       @Override
-      public void evaluate(View.CreatePCollectionView<R, W> transform, EvaluationContext context) {
+      public void evaluate(View.CreatePCollectionView<ReadT, WriteT> transform,
+                           EvaluationContext context) {
         Iterable<? extends WindowedValue<?>> iter =
             context.getWindowedValues(context.getInput(transform));
         context.setPView(context.getOutput(transform), iter);
@@ -787,10 +820,11 @@ public final class TransformTranslator {
     EVALUATORS.put(Window.Bound.class, window());
   }
 
-  public static <PT extends PTransform<?, ?>> TransformEvaluator<PT>
-  getTransformEvaluator(Class<PT> clazz) {
+  public static <TransformT extends PTransform<?, ?>> TransformEvaluator<TransformT>
+  getTransformEvaluator(Class<TransformT> clazz) {
     @SuppressWarnings("unchecked")
-    TransformEvaluator<PT> transform = (TransformEvaluator<PT>) EVALUATORS.get(clazz);
+    TransformEvaluator<TransformT> transform =
+        (TransformEvaluator<TransformT>) EVALUATORS.get(clazz);
     if (transform == null) {
       throw new IllegalStateException("No TransformEvaluator registered for " + clazz);
     }
@@ -808,7 +842,8 @@ public final class TransformTranslator {
     }
 
     @Override
-    public <PT extends PTransform<?, ?>> TransformEvaluator<PT> translate(Class<PT> clazz) {
+    public <TransformT extends PTransform<?, ?>> TransformEvaluator<TransformT> translate(
+        Class<TransformT> clazz) {
       return getTransformEvaluator(clazz);
     }
   }



Mime
View raw message