flink-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From aljos...@apache.org
Subject [12/23] flink git commit: [FLINK-6107] Enable Javadoc checks in streaming checkstyle
Date Wed, 26 Apr 2017 10:07:34 GMT
[FLINK-6107] Enable Javadoc checks in streaming checkstyle


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/2581a7b8
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/2581a7b8
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/2581a7b8

Branch: refs/heads/master
Commit: 2581a7b88baf777c5cdd1cccbffb861ac1e454fa
Parents: 80d1c06
Author: Aljoscha Krettek <aljoscha.krettek@gmail.com>
Authored: Sun Mar 19 11:21:17 2017 +0100
Committer: Aljoscha Krettek <aljoscha.krettek@gmail.com>
Committed: Wed Apr 26 10:36:50 2017 +0200

----------------------------------------------------------------------
 .../MultiplexingStreamRecordSerializer.java     |   4 +
 .../streamrecord/StreamRecordSerializer.java    |   3 +-
 .../flink/streaming/api/TimeCharacteristic.java |  64 +++---
 .../streaming/api/checkpoint/Checkpointed.java  |   5 +-
 .../checkpoint/CheckpointedAsynchronously.java  |   3 +-
 .../api/checkpoint/CheckpointedFunction.java    |  15 +-
 .../api/checkpoint/CheckpointedRestoring.java   |   2 +-
 .../api/checkpoint/ListCheckpointed.java        |  11 +-
 .../api/collector/selector/DirectedOutput.java  |   5 +-
 .../selector/OutputSelectorWrapper.java         |  28 ---
 .../api/datastream/AllWindowedStream.java       |  78 +++----
 .../api/datastream/AsyncDataStream.java         |   8 +-
 .../api/datastream/CoGroupedStreams.java        |  23 +-
 .../api/datastream/ConnectedStreams.java        |   4 +-
 .../streaming/api/datastream/DataStream.java    | 101 ++++-----
 .../api/datastream/DataStreamSink.java          |  11 +-
 .../api/datastream/IterativeStream.java         |  15 +-
 .../streaming/api/datastream/JoinedStreams.java |  17 +-
 .../streaming/api/datastream/KeyedStream.java   |  28 ++-
 .../datastream/SingleOutputStreamOperator.java  |  56 ++---
 .../api/datastream/StreamProjection.java        |   6 +-
 .../api/datastream/WindowedStream.java          |  87 +++-----
 .../api/environment/CheckpointConfig.java       |  18 +-
 .../Flip6LocalStreamEnvironment.java            |   2 +-
 .../api/environment/LocalStreamEnvironment.java |   2 +-
 .../environment/RemoteStreamEnvironment.java    |  13 +-
 .../environment/StreamContextEnvironment.java   |   5 +
 .../environment/StreamExecutionEnvironment.java | 217 +++++++++----------
 .../api/environment/StreamPlanEnvironment.java  |   4 +
 .../functions/AscendingTimestampExtractor.java  |   2 +-
 .../api/functions/TimestampExtractor.java       |  12 +-
 .../aggregation/AggregationFunction.java        |   7 +-
 .../aggregation/ComparableAggregator.java       |   4 +
 .../api/functions/aggregation/Comparator.java   |   3 +
 .../functions/aggregation/SumAggregator.java    |   3 +
 .../api/functions/aggregation/SumFunction.java  |  17 +-
 .../api/functions/async/RichAsyncFunction.java  |   7 +-
 .../async/collector/AsyncCollector.java         |  11 +-
 .../api/functions/co/CoProcessFunction.java     |   4 +-
 .../api/functions/sink/RichSinkFunction.java    |   3 +
 .../api/functions/sink/SocketClientSink.java    |  10 +-
 .../api/functions/sink/WriteFormat.java         |   2 +-
 .../ContinuousFileMonitoringFunction.java       |   5 +-
 .../source/FileMonitoringFunction.java          |   6 +
 .../api/functions/source/FileReadFunction.java  |   3 +
 .../functions/source/FromElementsFunction.java  |  15 +-
 .../functions/source/FromIteratorFunction.java  |   3 +
 .../source/FromSplittableIteratorFunction.java  |   3 +
 .../source/InputFormatSourceFunction.java       |   3 +
 .../source/MessageAcknowledgingSourceBase.java  |  30 +--
 ...ltipleIdsMessageAcknowledgingSourceBase.java |   8 +-
 .../source/SocketTextStreamFunction.java        |  19 +-
 .../api/functions/source/SourceFunction.java    |   2 -
 .../source/TimestampedFileInputSplit.java       |  13 +-
 .../timestamps/AscendingTimestampExtractor.java |   2 +-
 .../AggregateApplyAllWindowFunction.java        |   2 +-
 .../windowing/AggregateApplyWindowFunction.java |   2 +-
 .../windowing/FoldApplyAllWindowFunction.java   |   6 +-
 .../FoldApplyProcessAllWindowFunction.java      |   7 +-
 .../FoldApplyProcessWindowFunction.java         |   7 +-
 .../windowing/FoldApplyWindowFunction.java      |   6 +-
 .../windowing/PassThroughAllWindowFunction.java |   5 +-
 .../windowing/PassThroughWindowFunction.java    |   5 +-
 .../windowing/ProcessAllWindowFunction.java     |   2 +-
 .../windowing/ProcessWindowFunction.java        |   2 +-
 .../windowing/ReduceApplyAllWindowFunction.java |   6 +-
 .../ReduceApplyProcessAllWindowFunction.java    |   7 +-
 .../ReduceApplyProcessWindowFunction.java       |   7 +-
 .../windowing/ReduceApplyWindowFunction.java    |   6 +-
 .../ReduceIterableAllWindowFunction.java        |  48 ----
 .../windowing/ReduceIterableWindowFunction.java |  48 ----
 .../windowing/RichAllWindowFunction.java        |   6 +-
 .../windowing/delta/CosineDistance.java         |   5 +-
 .../windowing/delta/EuclideanDistance.java      |   5 +-
 .../delta/ExtractionAwareDeltaFunction.java     |   4 +-
 .../delta/extractor/ArrayFromTuple.java         |   3 -
 .../delta/extractor/ConcatenatedExtract.java    |   3 -
 .../windowing/delta/extractor/Extractor.java    |   2 +-
 .../delta/extractor/FieldFromArray.java         |   6 +-
 .../delta/extractor/FieldFromTuple.java         |  11 +-
 .../delta/extractor/FieldsFromArray.java        |   4 +-
 .../delta/extractor/FieldsFromTuple.java        |   7 +-
 .../streaming/api/graph/JSONGenerator.java      |   3 +
 .../flink/streaming/api/graph/StreamConfig.java |   4 +
 .../flink/streaming/api/graph/StreamGraph.java  |   8 +-
 .../api/graph/StreamGraphGenerator.java         |  50 ++---
 .../api/graph/StreamGraphHasherV2.java          |   9 +-
 .../api/operators/AbstractStreamOperator.java   |  60 +++--
 .../operators/AbstractUdfStreamOperator.java    |   5 +-
 .../api/operators/HeapInternalTimerService.java |   2 +-
 .../api/operators/KeyedProcessOperator.java     |   4 +
 .../api/operators/ProcessOperator.java          |   4 +
 .../streaming/api/operators/StreamFilter.java   |   3 +
 .../streaming/api/operators/StreamFlatMap.java  |   3 +
 .../api/operators/StreamGroupedFold.java        |   4 +
 .../api/operators/StreamGroupedReduce.java      |   5 +
 .../streaming/api/operators/StreamMap.java      |   3 +
 .../streaming/api/operators/StreamOperator.java |  15 +-
 .../streaming/api/operators/StreamProject.java  |   5 +-
 .../streaming/api/operators/StreamSink.java     |   3 +
 .../api/operators/StreamSourceContexts.java     |  19 +-
 .../api/operators/StreamingRuntimeContext.java  |  11 +-
 .../api/operators/async/AsyncWaitOperator.java  |  22 +-
 .../streaming/api/operators/async/Emitter.java  |   8 +-
 .../async/queue/OrderedStreamElementQueue.java  |  10 +-
 .../async/queue/StreamElementQueueEntry.java    |   1 -
 .../async/queue/StreamRecordQueueEntry.java     |   4 +-
 .../queue/UnorderedStreamElementQueue.java      |  14 +-
 .../api/operators/co/CoProcessOperator.java     |   4 +
 .../api/operators/co/CoStreamFlatMap.java       |   4 +
 .../streaming/api/operators/co/CoStreamMap.java |   4 +
 .../operators/co/KeyedCoProcessOperator.java    |   4 +
 .../CoFeedbackTransformation.java               |  11 +-
 .../transformations/FeedbackTransformation.java |  19 +-
 .../transformations/OneInputTransformation.java |   2 +-
 .../PartitionTransformation.java                |   5 +-
 .../transformations/SelectTransformation.java   |   5 +-
 .../api/transformations/SinkTransformation.java |   2 +-
 .../transformations/SourceTransformation.java   |   2 +-
 .../transformations/SplitTransformation.java    |   7 +-
 .../transformations/StreamTransformation.java   |  56 +++--
 .../transformations/TwoInputTransformation.java |   2 +-
 .../transformations/UnionTransformation.java    |   8 +-
 .../streaming/api/watermark/Watermark.java      |  12 +-
 .../assigners/BaseAlignedWindowAssigner.java    |   5 +-
 .../assigners/EventTimeSessionWindows.java      |   5 +-
 .../api/windowing/assigners/GlobalWindows.java  |   7 +-
 .../assigners/MergingWindowAssigner.java        |   2 +-
 .../assigners/ProcessingTimeSessionWindows.java |   5 +-
 .../assigners/SlidingEventTimeWindows.java      |   3 +-
 .../assigners/SlidingProcessingTimeWindows.java |   5 +-
 .../assigners/TumblingEventTimeWindows.java     |   3 +-
 .../TumblingProcessingTimeWindows.java          |   5 +-
 .../api/windowing/assigners/WindowAssigner.java |   3 +-
 .../api/windowing/evictors/CountEvictor.java    |   2 +-
 .../api/windowing/evictors/DeltaEvictor.java    |   5 +-
 .../api/windowing/evictors/Evictor.java         |  12 +-
 .../api/windowing/evictors/TimeEvictor.java     |   2 +-
 .../streaming/api/windowing/time/Time.java      |   6 +-
 .../api/windowing/triggers/DeltaTrigger.java    |   3 +-
 .../windowing/triggers/EventTimeTrigger.java    |   3 +-
 .../api/windowing/triggers/PurgingTrigger.java  |   7 +-
 .../api/windowing/windows/GlobalWindow.java     |   9 +-
 .../api/windowing/windows/TimeWindow.java       |   2 +-
 .../streaming/api/windowing/windows/Window.java |   5 +-
 .../streaming/runtime/io/BarrierBuffer.java     |  45 ++--
 .../streaming/runtime/io/BarrierTracker.java    |  24 +-
 .../runtime/io/BlockingQueueBroker.java         |  10 +-
 .../streaming/runtime/io/BufferSpiller.java     |  40 ++--
 .../runtime/io/StreamInputProcessor.java        |  15 +-
 .../runtime/io/StreamRecordWriter.java          |  10 +-
 .../runtime/io/StreamTwoInputProcessor.java     |  15 +-
 .../streaming/runtime/io/StreamingReader.java   |  30 ---
 ...ractAlignedProcessingTimeWindowOperator.java |   3 +
 .../windowing/AbstractKeyedTimePanes.java       |   4 +-
 .../windowing/AccumulatingKeyedTimePanes.java   |   7 +-
 ...ccumulatingProcessingTimeWindowOperator.java |   3 +
 .../windowing/AggregatingKeyedTimePanes.java    |   7 +-
 ...AggregatingProcessingTimeWindowOperator.java |   3 +
 .../windowing/EvictingWindowOperator.java       |   6 +-
 .../runtime/operators/windowing/KeyMap.java     |  34 +--
 .../operators/windowing/TimestampedValue.java   |   8 +-
 .../operators/windowing/WindowOperator.java     |  15 +-
 .../functions/InternalWindowFunction.java       |   5 +
 .../ConfigurableStreamPartitioner.java          |   2 +-
 .../runtime/partitioner/GlobalPartitioner.java  |   2 +-
 .../runtime/partitioner/StreamPartitioner.java  |   3 +
 .../runtime/streamrecord/LatencyMarker.java     |   8 +-
 .../streamrecord/StreamElementSerializer.java   |   6 +-
 .../runtime/streamrecord/StreamRecord.java      |   6 +-
 .../streamstatus/StatusWatermarkValve.java      |  28 ++-
 .../runtime/streamstatus/StreamStatus.java      |  10 +-
 .../runtime/tasks/AsynchronousException.java    |   2 +-
 .../runtime/tasks/OneInputStreamTask.java       |   3 +
 .../streaming/runtime/tasks/OperatorChain.java  |   1 -
 .../runtime/tasks/ProcessingTimeCallback.java   |   8 +-
 .../runtime/tasks/SourceStreamTask.java         |   4 +-
 .../runtime/tasks/StreamIterationHead.java      |   4 +
 .../runtime/tasks/StreamIterationTail.java      |   4 +
 .../streaming/runtime/tasks/StreamTask.java     |  65 +++---
 .../tasks/SystemProcessingTimeService.java      |   7 +-
 .../streaming/runtime/tasks/TimerException.java |   2 +-
 .../runtime/tasks/TwoInputStreamTask.java       |   3 +
 .../streaming/runtime/tasks/package-info.java   |   4 +-
 .../TypeInformationSerializationSchema.java     |  14 +-
 .../streaming/util/typeutils/FieldAccessor.java |  13 +-
 .../api/operators/StreamProjectTest.java        |   2 +-
 .../windowing/EvictingWindowOperatorTest.java   |  11 +-
 .../operators/windowing/TimeWindowTest.java     |   2 +-
 .../tasks/OneInputStreamTaskTestHarness.java    |   2 +-
 .../tasks/SourceStreamTaskStoppingTest.java     |   2 +-
 .../runtime/tasks/SourceStreamTaskTest.java     |   2 +-
 .../runtime/tasks/StreamMockEnvironment.java    |   2 +-
 .../tasks/TwoInputStreamTaskTestHarness.java    |   2 +-
 .../streaming/util/EvenOddOutputSelector.java   |   2 +-
 .../apache/flink/streaming/util/NoOpIntMap.java |   2 +-
 .../flink/streaming/util/TestHarnessUtil.java   |   2 +-
 .../util/TwoInputStreamOperatorTestHarness.java |   2 +-
 tools/maven/strict-checkstyle.xml               |  42 ++--
 199 files changed, 1139 insertions(+), 1198 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/migration/streaming/runtime/streamrecord/MultiplexingStreamRecordSerializer.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/migration/streaming/runtime/streamrecord/MultiplexingStreamRecordSerializer.java b/flink-streaming-java/src/main/java/org/apache/flink/migration/streaming/runtime/streamrecord/MultiplexingStreamRecordSerializer.java
index a0f5a60..db9977b 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/migration/streaming/runtime/streamrecord/MultiplexingStreamRecordSerializer.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/migration/streaming/runtime/streamrecord/MultiplexingStreamRecordSerializer.java
@@ -29,6 +29,10 @@ import java.io.IOException;
 
 import static java.util.Objects.requireNonNull;
 
+/**
+ * Legacy multiplexing {@link TypeSerializer} for stream records, watermarks and other stream
+ * elements.
+ */
 public class MultiplexingStreamRecordSerializer<T> extends TypeSerializer<StreamElement> {
 
 

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/migration/streaming/runtime/streamrecord/StreamRecordSerializer.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/migration/streaming/runtime/streamrecord/StreamRecordSerializer.java b/flink-streaming-java/src/main/java/org/apache/flink/migration/streaming/runtime/streamrecord/StreamRecordSerializer.java
index 0235ab8..885ece5 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/migration/streaming/runtime/streamrecord/StreamRecordSerializer.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/migration/streaming/runtime/streamrecord/StreamRecordSerializer.java
@@ -31,8 +31,7 @@ import java.io.IOException;
  * Serializer for {@link StreamRecord}. This version ignores timestamps and only deals with
  * the element.
  *
- * <p>
- * {@link MultiplexingStreamRecordSerializer} is a version that deals with timestamps and also
+ * <p>{@link MultiplexingStreamRecordSerializer} is a version that deals with timestamps and also
  * multiplexes {@link org.apache.flink.streaming.api.watermark.Watermark Watermarks} in the same
  * stream with {@link StreamRecord StreamRecords}.
  *

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/TimeCharacteristic.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/TimeCharacteristic.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/TimeCharacteristic.java
index df2e4b9..9ad5879 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/TimeCharacteristic.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/TimeCharacteristic.java
@@ -33,10 +33,11 @@ public enum TimeCharacteristic {
 	 * to determine the current time of the data stream. Processing-time windows trigger based
 	 * on wall-clock time and include whatever elements happen to have arrived at the operator at
 	 * that point in time.
-	 * <p>
-	 * Using processing time for window operations results in general in quite non-deterministic results,
-	 * because the contents of the windows depends on the speed in which elements arrive. It is, however,
-	 * the cheapest method of forming windows and the method that introduces the least latency.
+	 *
+	 * <p>Using processing time for window operations results in general in quite non-deterministic
+	 * results, because the contents of the windows depends on the speed in which elements arrive.
+	 * It is, however, the cheapest method of forming windows and the method that introduces the
+	 * least latency.
 	 */
 	ProcessingTime,
 
@@ -45,41 +46,42 @@ public enum TimeCharacteristic {
 	 * when the element enters the Flink streaming data flow. Operations like windows group the
 	 * elements based on that time, meaning that processing speed within the streaming dataflow
 	 * does not affect windowing, but only the speed at which sources receive elements.
-	 * <p>
-	 * Ingestion time is often a good compromise between processing time and event time.
+	 *
+	 * <p>Ingestion time is often a good compromise between processing time and event time.
 	 * It does not need and special manual form of watermark generation, and events are typically
 	 * not too much out-or-order when they arrive at operators; in fact, out-of-orderness can 
-	 * only be introduced by streaming shuffles or split/join/union operations. The fact that elements
-	 * are not very much out-of-order means that the latency increase is moderate, compared to event
+	 * only be introduced by streaming shuffles or split/join/union operations. The fact that
+	 * elements are not very much out-of-order means that the latency increase is moderate,
+	 * compared to event
 	 * time.
 	 */
 	IngestionTime,
 
 	/**
 	 * Event time means that the time of each individual element in the stream (also called event)
-	 * is determined by the event's individual custom timestamp. These timestamps either exist in the
-	 * elements from before they entered the Flink streaming dataflow, or are user-assigned at the sources.
-	 * The big implication of this is that it allows for elements to arrive in the sources and in
-	 * all operators out of order, meaning that elements with earlier timestamps may arrive after
-	 * elements with later timestamps.
-	 * <p>
-	 * Operators that window or order data with respect to event time must buffer data until they can
-	 * be sure that all timestamps for a certain time interval have been received. This is handled by
-	 * the so called "time watermarks".
-	 * <p>
-	 * Operations based on event time are very predictable - the result of windowing operations
-	 * is typically identical no matter when the window is executed and how fast the streams operate.
-	 * At the same time, the buffering and tracking of event time is also costlier than operating
-	 * with processing time, and typically also introduces more latency. The amount of extra
-	 * cost depends mostly on how much out of order the elements arrive, i.e., how long the time span
-	 * between the arrival of early and late elements is. With respect to the "time watermarks", this
-	 * means that the cost typically depends on how early or late the watermarks can be generated
-	 * for their timestamp.
-	 * <p>
-	 * In relation to {@link #IngestionTime}, the event time is similar, but refers the the event's
-	 * original time, rather than the time assigned at the data source. Practically, that means that
-	 * event time has generally more meaning, but also that it takes longer to determine that all
-	 * elements for a certain time have arrived.
+	 * is determined by the event's individual custom timestamp. These timestamps either exist in
+	 * the elements from before they entered the Flink streaming dataflow, or are user-assigned at
+	 * the sources. The big implication of this is that it allows for elements to arrive in the
+	 * sources and in all operators out of order, meaning that elements with earlier timestamps may
+	 * arrive after elements with later timestamps.
+	 *
+	 * <p>Operators that window or order data with respect to event time must buffer data until they
+	 * can be sure that all timestamps for a certain time interval have been received. This is
+	 * handled by the so called "time watermarks".
+	 *
+	 * <p>Operations based on event time are very predictable - the result of windowing operations
+	 * is typically identical no matter when the window is executed and how fast the streams
+	 * operate. At the same time, the buffering and tracking of event time is also costlier than
+	 * operating with processing time, and typically also introduces more latency. The amount of
+	 * extra cost depends mostly on how much out of order the elements arrive, i.e., how long the
+	 * time span between the arrival of early and late elements is. With respect to the
+	 * "time watermarks", this means that the cost typically depends on how early or late the
+	 * watermarks can be generated for their timestamp.
+	 *
+	 * <p>In relation to {@link #IngestionTime}, the event time is similar, but refers the the
+	 * event's original time, rather than the time assigned at the data source. Practically, that
+	 * means that event time has generally more meaning, but also that it takes longer to determine
+	 * that all elements for a certain time have arrived.
 	 */
 	EventTime
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/Checkpointed.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/Checkpointed.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/Checkpointed.java
index dd93462..2479e0a 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/Checkpointed.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/Checkpointed.java
@@ -28,14 +28,11 @@ import java.io.Serializable;
  * and return a snapshot of their state, which will be checkpointed.
  * 
  * <h1>Deprecation and Replacement</h1>
- *
  * The short cut replacement for this interface is via {@link ListCheckpointed} and works
  * as shown in the example below. The {@code ListCheckpointed} interface returns a list of
  * elements (
- * 
- * 
  *
- * <pre>{@code
+ * <p><pre>{@code
  * public class ExampleFunction<T> implements MapFunction<T, T>, ListCheckpointed<Integer> {
  *
  *     private int count;

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedAsynchronously.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedAsynchronously.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedAsynchronously.java
index 4bafd90..4be522c 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedAsynchronously.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedAsynchronously.java
@@ -29,12 +29,11 @@ import java.io.Serializable;
  * this interface has not been implemented.
  * 
  * <h1>Deprecation and Replacement</h1>
- * 
  * The shortcut replacement for this interface is via {@link ListCheckpointed} and works
  * as shown in the example below. Please refer to the JavaDocs of {@link ListCheckpointed} for
  * a more detailed description of how to use the new interface.
  * 
- * <pre>{@code
+ * <p><pre>{@code
  * public class ExampleFunction<T> implements MapFunction<T, T>, ListCheckpointed<Integer> {
  * 
  *     private int count;

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedFunction.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedFunction.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedFunction.java
index 51ac5db..556e957 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedFunction.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedFunction.java
@@ -36,7 +36,6 @@ import org.apache.flink.runtime.state.FunctionSnapshotContext;
  * abstraction represented by this interface.
  * 
  * <h1>Initialization</h1>
- * 
  * The {@link CheckpointedFunction#initializeState(FunctionInitializationContext)} is called when
  * the parallel instance of the transformation function is created during distributed execution.
  * The method gives access to the {@link FunctionInitializationContext} which in turn gives access
@@ -44,13 +43,13 @@ import org.apache.flink.runtime.state.FunctionSnapshotContext;
  * 
  * <p>The {@code OperatorStateStore} and {@code KeyedStateStore} give access to the data structures
  * in which state should be stored for Flink to transparently manage and checkpoint it, such as
- * {@link org.apache.flink.api.common.state.ValueState} or {@link org.apache.flink.api.common.state.ListState}.
-
- * <p><i>Note:</i> The {@code KeyedStateStore} can only be used when the transformation supports
+ * {@link org.apache.flink.api.common.state.ValueState} or
+ * {@link org.apache.flink.api.common.state.ListState}.
+ *
+ * <p><b>Note:</b> The {@code KeyedStateStore} can only be used when the transformation supports
  * <i>keyed state</i>, i.e., when it is applied on a keyed stream (after a {@code keyBy(...)}).
  * 
  * <h1>Snapshot</h1>
- * 
  * The {@link CheckpointedFunction#snapshotState(FunctionSnapshotContext)} is called whenever a
  * checkpoint takes a state snapshot of the transformation function. Inside this method, functions typically
  * make sure that the checkpointed data structures (obtained in the initialization phase) are up
@@ -61,7 +60,6 @@ import org.apache.flink.runtime.state.FunctionSnapshotContext;
  * external systems.
  *
  * <h1>Example</h1>
- * 
  * The code example below illustrates how to use this interface for a function that keeps counts
  * of events per key and per parallel partition (parallel instance of the transformation function
  * during distributed execution).
@@ -69,7 +67,7 @@ import org.apache.flink.runtime.state.FunctionSnapshotContext;
  * adding up the counters of partitions that get merged on scale-down. Note that this is a
  * toy example, but should illustrate the basic skeleton for a stateful function.
  * 
- * <pre>{@code
+ * <p><pre>{@code
  * public class MyFunction<T> implements MapFunction<T, T>, CheckpointedFunction {
  *
  *     private ReducingState<Long> countPerKey;
@@ -112,18 +110,15 @@ import org.apache.flink.runtime.state.FunctionSnapshotContext;
  * <hr>
  * 
  * <h1><a name="shortcuts">Shortcuts</a></h1>
- * 
  * There are various ways that transformation functions can use state without implementing the
  * full-fledged {@code CheckpointedFunction} interface:
  * 
  * <h4>Operator State</h4>
- *
  * Checkpointing some state that is part of the function object itself is possible in a simpler way
  * by directly implementing the {@link ListCheckpointed} interface. 
  * That mechanism is similar to the previously used {@link Checkpointed} interface.
  * 
  * <h4>Keyed State</h4>
- * 
  * Access to keyed state is possible via the {@link RuntimeContext}'s methods:
  * <pre>{@code
  * public class CountPerKeyFunction<T> extends RichMapFunction<T, T> {

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedRestoring.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedRestoring.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedRestoring.java
index 1fd652f..cfaa505 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedRestoring.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedRestoring.java
@@ -35,7 +35,7 @@ public interface CheckpointedRestoring<T extends Serializable> {
 	 * Restores the state of the function or operator to that of a previous checkpoint.
 	 * This method is invoked when a function is executed as part of a recovery run.
 	 *
-	 * Note that restoreState() is called before open().
+	 * <p>Note that restoreState() is called before open().
 	 *
 	 * @param state The state to be restored.
 	 */

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/ListCheckpointed.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/ListCheckpointed.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/ListCheckpointed.java
index 84a9700..ab18c58 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/ListCheckpointed.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/ListCheckpointed.java
@@ -38,9 +38,8 @@ import java.util.List;
  * of the state objects, or have multiple named states.
  * 
  * <h2>State Redistribution</h2>
- * 
  * State redistribution happens when the parallelism of the operator is changed.
- * State redistribution of <i>operator state<i> (to which category the state handled by this
+ * State redistribution of <i>operator state</i> (to which category the state handled by this
  * interface belongs) always goes through a checkpoint, so it appears
  * to the transformation functions like a failure/recovery combination, where recovery happens
  * with a different parallelism.
@@ -59,7 +58,7 @@ import java.util.List;
  * +----+----+   +----+----+   +----+
  * </pre>
  * 
- * Recovering the checkpoint with <i>parallelism = 5</i> yields the following state assignment:
+ * <p>Recovering the checkpoint with <i>parallelism = 5</i> yields the following state assignment:
  * <pre>
  * func_1   func_2   func_3   func_4   func_5
  * +----+   +----+   +----+   +----+   +----+
@@ -76,10 +75,8 @@ import java.util.List;
  * </pre>
  * 
  * <h2>Example</h2>
- * 
  * The following example illustrates how to implement a {@code MapFunction} that counts all elements
- * passing through it, keeping the total count accurate under re-scaling  (changes or parallelism).
- * 
+ * passing through it, keeping the total count accurate under re-scaling  (changes or parallelism):
  * <pre>{@code
  * public class CountingFunction<T> implements MapFunction<T, Tuple2<T, Long>>, ListCheckpointed<Long> {
  * 
@@ -122,7 +119,7 @@ public interface ListCheckpointed<T extends Serializable> {
 	 * the {@link ListCheckpointed class docs} for an illustration how list-style state
 	 * redistribution works.
 	 * 
-	 * <p> As special case, the returned list may be null or empty (if the operator has no state)
+	 * <p>As special case, the returned list may be null or empty (if the operator has no state)
 	 * or it may contain a single element (if the operator state is indivisible).
 	 *
 	 * @param checkpointId The ID of the checkpoint - a unique and monotonously increasing value. 

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/collector/selector/DirectedOutput.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/collector/selector/DirectedOutput.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/collector/selector/DirectedOutput.java
index 6339506..a709d45 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/collector/selector/DirectedOutput.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/collector/selector/DirectedOutput.java
@@ -35,7 +35,10 @@ import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
 import org.apache.flink.util.OutputTag;
 import org.apache.flink.util.XORShiftRandom;
 
-
+/**
+ * Wrapping {@link Output} that forwards to other {@link Output Outputs } based on a list of
+ * {@link OutputSelector OutputSelectors}.
+ */
 public class DirectedOutput<OUT> implements Output<StreamRecord<OUT>> {
 	
 	protected final OutputSelector<OUT>[] outputSelectors;

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/collector/selector/OutputSelectorWrapper.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/collector/selector/OutputSelectorWrapper.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/collector/selector/OutputSelectorWrapper.java
deleted file mode 100644
index 24f6018..0000000
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/collector/selector/OutputSelectorWrapper.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.streaming.api.collector.selector;
-
-import java.io.Serializable;
-
-import org.apache.flink.annotation.PublicEvolving;
-
-@PublicEvolving
-public interface OutputSelectorWrapper<OUT> extends Serializable {
-
-	void sendOutputs(OUT record);
-}

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
index 50f0f85..7c0ebd2 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
@@ -75,15 +75,13 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * {@link org.apache.flink.streaming.api.windowing.assigners.WindowAssigner}. Window emission
  * is triggered based on a {@link org.apache.flink.streaming.api.windowing.triggers.Trigger}.
  *
- * <p>
- * If an {@link org.apache.flink.streaming.api.windowing.evictors.Evictor} is specified it will be
+ * <p>If an {@link org.apache.flink.streaming.api.windowing.evictors.Evictor} is specified it will be
  * used to evict elements from the window after
  * evaluation was triggered by the {@code Trigger} but before the actual evaluation of the window.
  * When using an evictor window performance will degrade significantly, since
  * pre-aggregation of window results cannot be used.
  *
- * <p>
- * Note that the {@code AllWindowedStream} is purely and API construct, during runtime
+ * <p>Note that the {@code AllWindowedStream} is purely and API construct, during runtime
  * the {@code AllWindowedStream} will be collapsed together with the
  * operation over the window into one single operation.
  *
@@ -93,10 +91,10 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
 @Public
 public class AllWindowedStream<T, W extends Window> {
 
-	/** The keyed data stream that is windowed by this stream */
+	/** The keyed data stream that is windowed by this stream. */
 	private final KeyedStream<T, Byte> input;
 
-	/** The window assigner */
+	/** The window assigner. */
 	private final WindowAssigner<? super T, W> windowAssigner;
 
 	/** The trigger that is used for window evaluation/emission. */
@@ -171,8 +169,7 @@ public class AllWindowedStream<T, W extends Window> {
 	/**
 	 * Sets the {@code Evictor} that should be used to evict elements from a window before emission.
 	 *
-	 * <p>
-	 * Note: When using an evictor window performance will degrade significantly, since
+	 * <p>Note: When using an evictor window performance will degrade significantly, since
 	 * incremental aggregation of window results cannot be used.
 	 */
 	@PublicEvolving
@@ -195,8 +192,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * of the window for each key individually. The output of the reduce function is interpreted
 	 * as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * This window will try and incrementally aggregate data as much as the window policies permit.
+	 * <p>This window will try and incrementally aggregate data as much as the window policies permit.
 	 * For example, tumbling time windows can aggregate the data, meaning that only one element per
 	 * key is stored. Sliding time windows will aggregate on the granularity of the slide interval,
 	 * so a few elements are stored per key (one per slide interval).
@@ -227,8 +223,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window for each key individually. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Arriving data is incrementally aggregated using the given reducer.
+	 * <p>Arriving data is incrementally aggregated using the given reducer.
 	 *
 	 * @param reduceFunction The reduce function that is used for incremental aggregation.
 	 * @param function The window function.
@@ -251,8 +246,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window for each key individually. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Arriving data is incrementally aggregated using the given reducer.
+	 * <p>Arriving data is incrementally aggregated using the given reducer.
 	 *
 	 * @param reduceFunction The reduce function that is used for incremental aggregation.
 	 * @param function The window function.
@@ -326,8 +320,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window for each key individually. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Arriving data is incrementally aggregated using the given reducer.
+	 * <p>Arriving data is incrementally aggregated using the given reducer.
 	 *
 	 * @param reduceFunction The reduce function that is used for incremental aggregation.
 	 * @param function The process window function.
@@ -349,8 +342,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window for each key individually. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Arriving data is incrementally aggregated using the given reducer.
+	 * <p>Arriving data is incrementally aggregated using the given reducer.
 	 *
 	 * @param reduceFunction The reduce function that is used for incremental aggregation.
 	 * @param function The process window function.
@@ -796,8 +788,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window for each key individually. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Arriving data is incrementally aggregated using the given fold function.
+	 * <p>Arriving data is incrementally aggregated using the given fold function.
 	 *
 	 * @param initialValue The initial value of the fold.
 	 * @param foldFunction The fold function that is used for incremental aggregation.
@@ -821,8 +812,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window for each key individually. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Arriving data is incrementally aggregated using the given fold function.
+	 * <p>Arriving data is incrementally aggregated using the given fold function.
 	 *
 	 * @param initialValue The initial value of the fold.
 	 * @param foldFunction The fold function that is used for incremental aggregation.
@@ -904,8 +894,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window for each key individually. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Arriving data is incrementally aggregated using the given fold function.
+	 * <p>Arriving data is incrementally aggregated using the given fold function.
 	 *
 	 * @param initialValue The initial value of the fold.
 	 * @param foldFunction The fold function that is used for incremental aggregation.
@@ -929,8 +918,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window for each key individually. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Arriving data is incrementally aggregated using the given fold function.
+	 * <p>Arriving data is incrementally aggregated using the given fold function.
 	 *
 	 * @param initialValue The initial value of the fold.
 	 * @param foldFunction The fold function that is used for incremental aggregation.
@@ -1016,8 +1004,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Not that this function requires that all data in the windows is buffered until the window
+	 * <p>Not that this function requires that all data in the windows is buffered until the window
 	 * is evaluated, as the function provides no means of incremental aggregation.
 	 *
 	 * @param function The window function.
@@ -1036,8 +1023,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Not that this function requires that all data in the windows is buffered until the window
+	 * <p>Not that this function requires that all data in the windows is buffered until the window
 	 * is evaluated, as the function provides no means of incremental aggregation.
 	 *
 	 * @param function The window function.
@@ -1054,8 +1040,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Not that this function requires that all data in the windows is buffered until the window
+	 * <p>Not that this function requires that all data in the windows is buffered until the window
 	 * is evaluated, as the function provides no means of incremental aggregation.
 	 *
 	 * @param function The process window function.
@@ -1075,8 +1060,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Not that this function requires that all data in the windows is buffered until the window
+	 * <p>Not that this function requires that all data in the windows is buffered until the window
 	 * is evaluated, as the function provides no means of incremental aggregation.
 	 *
 	 * @param function The process window function.
@@ -1146,8 +1130,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window for each key individually. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Arriving data is incrementally aggregated using the given reducer.
+	 * <p>Arriving data is incrementally aggregated using the given reducer.
 	 *
 	 * @param reduceFunction The reduce function that is used for incremental aggregation.
 	 * @param function The window function.
@@ -1169,8 +1152,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window for each key individually. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Arriving data is incrementally aggregated using the given reducer.
+	 * <p>Arriving data is incrementally aggregated using the given reducer.
 	 *
 	 * @param reduceFunction The reduce function that is used for incremental aggregation.
 	 * @param function The window function.
@@ -1246,8 +1228,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window for each key individually. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Arriving data is incrementally aggregated using the given fold function.
+	 * <p>Arriving data is incrementally aggregated using the given fold function.
 	 *
 	 * @param initialValue The initial value of the fold.
 	 * @param foldFunction The fold function that is used for incremental aggregation.
@@ -1270,8 +1251,7 @@ public class AllWindowedStream<T, W extends Window> {
 	 * evaluation of the window for each key individually. The output of the window function is
 	 * interpreted as a regular non-windowed stream.
 	 *
-	 * <p>
-	 * Arriving data is incrementally aggregated using the given fold function.
+	 * <p>Arriving data is incrementally aggregated using the given fold function.
 	 *
 	 * @param initialValue The initial value of the fold.
 	 * @param foldFunction The fold function that is used for incremental aggregation.
@@ -1364,10 +1344,8 @@ public class AllWindowedStream<T, W extends Window> {
 	 * Applies an aggregation that sums every window of the pojo data stream at
 	 * the given field for every window.
 	 *
-	 * <p>
-	 * A field expression is either
-	 * the name of a public field or a getter method with parentheses of the
-	 * stream's underlying type. A dot can be used to drill down into objects,
+	 * <p>A field expression is either the name of a public field or a getter method with
+	 * parentheses of the stream's underlying type. A dot can be used to drill down into objects,
 	 * as in {@code "field1.getInnerField2()" }.
 	 *
 	 * @param field The field to sum
@@ -1392,11 +1370,9 @@ public class AllWindowedStream<T, W extends Window> {
 	 * Applies an aggregation that that gives the minimum value of the pojo data
 	 * stream at the given field expression for every window.
 	 *
-	 * <p>
-	 * A field
-	 * expression is either the name of a public field or a getter method with
-	 * parentheses of the {@link DataStream}S underlying type. A dot can be used
-	 * to drill down into objects, as in {@code "field1.getInnerField2()" }.
+	 * <p>A field expression is either the name of a public field or a getter method with
+	 * parentheses of the {@link DataStream}S underlying type. A dot can be used to drill down into
+	 * objects, as in {@code "field1.getInnerField2()" }.
 	 *
 	 * @param field The field expression based on which the aggregation will be applied.
 	 * @return The transformed DataStream.

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AsyncDataStream.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AsyncDataStream.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AsyncDataStream.java
index 8132d28..8461d2c 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AsyncDataStream.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AsyncDataStream.java
@@ -28,8 +28,8 @@ import java.util.concurrent.TimeUnit;
 
 /**
  * A helper class to apply {@link AsyncFunction} to a data stream.
- * <p>
- * <pre>{@code
+ *
+ * <p><pre>{@code
  * DataStream<String> input = ...
  * AsyncFunction<String, Tuple<String, String>> asyncFunc = ...
  *
@@ -40,6 +40,10 @@ import java.util.concurrent.TimeUnit;
 
 @PublicEvolving
 public class AsyncDataStream {
+
+	/**
+	 * Output mode for asynchronous operations.
+	 */
 	public enum OutputMode { ORDERED, UNORDERED }
 
 	private static final int DEFAULT_QUEUE_CAPACITY = 100;

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
index 62d032d..5c0dbf9 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
@@ -47,17 +47,13 @@ import static java.util.Objects.requireNonNull;
  *{@code CoGroupedStreams} represents two {@link DataStream DataStreams} that have been co-grouped.
  * A streaming co-group operation is evaluated over elements in a window.
  *
- * <p>
- * To finalize co-group operation you also need to specify a {@link KeySelector} for
+ * <p>To finalize co-group operation you also need to specify a {@link KeySelector} for
  * both the first and second input and a {@link WindowAssigner}.
  *
- * <p>
- * Note: Right now, the groups are being built in memory so you need to ensure that they don't
+ * <p>Note: Right now, the groups are being built in memory so you need to ensure that they don't
  * get too big. Otherwise the JVM might crash.
  *
- * <p>
- * Example:
- *
+ * <p>Example:
  * <pre> {@code
  * DataStream<Tuple2<String, Integer>> one = ...;
  * DataStream<Tuple2<String, Integer>> two = ...;
@@ -72,14 +68,15 @@ import static java.util.Objects.requireNonNull;
 @Public
 public class CoGroupedStreams<T1, T2> {
 
-	/** The first input stream */
+	/** The first input stream. */
 	private final DataStream<T1> input1;
 
-	/** The second input stream */
+	/** The second input stream. */
 	private final DataStream<T2> input2;
 
 	/**
-	 * Creates new CoGroped data streams, which are the first step towards building a streaming co-group.
+	 * Creates new CoGroped data streams, which are the first step towards building a streaming
+	 * co-group.
 	 * 
 	 * @param input1 The first data stream.
 	 * @param input2 The second data stream.
@@ -209,10 +206,10 @@ public class CoGroupedStreams<T1, T2> {
 		}
 
 		/**
-		 * Sets the {@code Evictor} that should be used to evict elements from a window before emission.
+		 * Sets the {@code Evictor} that should be used to evict elements from a window before
+		 * emission.
 		 *
-		 * <p>
-		 * Note: When using an evictor window performance will degrade significantly, since
+		 * <p>Note: When using an evictor window performance will degrade significantly, since
 		 * pre-aggregation of window results cannot be used.
 		 */
 		@PublicEvolving

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
index 9fe3a4c..e884140 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/ConnectedStreams.java
@@ -90,7 +90,7 @@ public class ConnectedStreams<IN1, IN2> {
 	}
 
 	/**
-	 * Gets the type of the first input
+	 * Gets the type of the first input.
 	 *
 	 * @return The type of the first input
 	 */
@@ -99,7 +99,7 @@ public class ConnectedStreams<IN1, IN2> {
 	}
 
 	/**
-	 * Gets the type of the second input
+	 * Gets the type of the second input.
 	 *
 	 * @return The type of the second input
 	 */

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
index fc70416..43a1aea 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
@@ -186,7 +186,7 @@ public class DataStream<T> {
 
 	/**
 	 * Returns the {@link StreamExecutionEnvironment} that was used to create this
-	 * {@link DataStream}
+	 * {@link DataStream}.
 	 *
 	 * @return The Execution Environment
 	 */
@@ -253,7 +253,6 @@ public class DataStream<T> {
 	}
 
 	/**
-	 *
 	 * It creates a new {@link KeyedStream} that uses the provided key for partitioning
 	 * its operator states. 
 	 *
@@ -304,8 +303,8 @@ public class DataStream<T> {
 	/**
 	 * Partitions a tuple DataStream on the specified key fields using a custom partitioner.
 	 * This method takes the key position to partition on, and a partitioner that accepts the key type.
-	 * <p>
-	 * Note: This method works only on single field keys.
+	 *
+	 * <p>Note: This method works only on single field keys.
 	 *
 	 * @param partitioner The partitioner to assign partitions to keys.
 	 * @param field The field index on which the DataStream is to partitioned.
@@ -319,8 +318,8 @@ public class DataStream<T> {
 	/**
 	 * Partitions a POJO DataStream on the specified key fields using a custom partitioner.
 	 * This method takes the key expression to partition on, and a partitioner that accepts the key type.
-	 * <p>
-	 * Note: This method works only on single field keys.
+	 *
+	 * <p>Note: This method works only on single field keys.
 	 *
 	 * @param partitioner The partitioner to assign partitions to keys.
 	 * @param field The expression for the field on which the DataStream is to partitioned.
@@ -336,8 +335,8 @@ public class DataStream<T> {
 	 * Partitions a DataStream on the key returned by the selector, using a custom partitioner.
 	 * This method takes the key selector to get the key to partition on, and a partitioner that
 	 * accepts the key type.
-	 * <p>
-	 * Note: This method works only on single field keys, i.e. the selector cannot return tuples
+	 *
+	 * <p>Note: This method works only on single field keys, i.e. the selector cannot return tuples
 	 * of fields.
 	 *
 	 * @param partitioner
@@ -453,16 +452,16 @@ public class DataStream<T> {
 	 * input of the iteration and treat the input and feedback streams as a
 	 * {@link ConnectedStreams} be calling
 	 * {@link IterativeStream#withFeedbackType(TypeInformation)}
-	 * <p>
-	 * A common usage pattern for streaming iterations is to use output
+	 *
+	 * <p>A common usage pattern for streaming iterations is to use output
 	 * splitting to send a part of the closing data stream to the head. Refer to
 	 * {@link #split(OutputSelector)} for more information.
-	 * <p>
-	 * The iteration edge will be partitioned the same way as the first input of
+	 *
+	 * <p>The iteration edge will be partitioned the same way as the first input of
 	 * the iteration head unless it is changed in the
 	 * {@link IterativeStream#closeWith(DataStream)} call.
-	 * <p>
-	 * By default a DataStream with iteration will never terminate, but the user
+	 *
+	 * <p>By default a DataStream with iteration will never terminate, but the user
 	 * can use the maxWaitTime parameter to set a max waiting time for the
 	 * iteration head. If no data received in the set time, the stream
 	 * terminates.
@@ -485,16 +484,16 @@ public class DataStream<T> {
 	 * input of the iteration and treat the input and feedback streams as a
 	 * {@link ConnectedStreams} be calling
 	 * {@link IterativeStream#withFeedbackType(TypeInformation)}
-	 * <p>
-	 * A common usage pattern for streaming iterations is to use output
+	 *
+	 * <p>A common usage pattern for streaming iterations is to use output
 	 * splitting to send a part of the closing data stream to the head. Refer to
 	 * {@link #split(OutputSelector)} for more information.
-	 * <p>
-	 * The iteration edge will be partitioned the same way as the first input of
+	 *
+	 * <p>The iteration edge will be partitioned the same way as the first input of
 	 * the iteration head unless it is changed in the
 	 * {@link IterativeStream#closeWith(DataStream)} call.
-	 * <p>
-	 * By default a DataStream with iteration will never terminate, but the user
+	 *
+	 * <p>By default a DataStream with iteration will never terminate, but the user
 	 * can use the maxWaitTime parameter to set a max waiting time for the
 	 * iteration head. If no data received in the set time, the stream
 	 * terminates.
@@ -634,8 +633,7 @@ public class DataStream<T> {
 	 * Initiates a Project transformation on a {@link Tuple} {@link DataStream}.<br>
 	 * <b>Note: Only Tuple DataStreams can be projected.</b>
 	 *
-	 * <p>
-	 * The transformation projects each Tuple of the DataSet onto a (sub)set of
+	 * <p>The transformation projects each Tuple of the DataSet onto a (sub)set of
 	 * fields.
 	 *
 	 * @param fieldIndexes
@@ -671,13 +669,11 @@ public class DataStream<T> {
 	/**
 	 * Windows this {@code DataStream} into tumbling time windows.
 	 *
-	 * <p>
-	 * This is a shortcut for either {@code .window(TumblingEventTimeWindows.of(size))} or
+	 * <p>This is a shortcut for either {@code .window(TumblingEventTimeWindows.of(size))} or
 	 * {@code .window(TumblingProcessingTimeWindows.of(size))} depending on the time characteristic
 	 * set using
 	 *
-	 * <p>
-	 * Note: This operation can be inherently non-parallel since all elements have to pass through
+	 * <p>Note: This operation can be inherently non-parallel since all elements have to pass through
 	 * the same operator instance. (Only for special cases, such as aligned time windows is
 	 * it possible to perform this operation in parallel).
 	 *
@@ -696,14 +692,12 @@ public class DataStream<T> {
 	/**
 	 * Windows this {@code DataStream} into sliding time windows.
 	 *
-	 * <p>
-	 * This is a shortcut for either {@code .window(SlidingEventTimeWindows.of(size, slide))} or
+	 * <p>This is a shortcut for either {@code .window(SlidingEventTimeWindows.of(size, slide))} or
 	 * {@code .window(SlidingProcessingTimeWindows.of(size, slide))} depending on the time characteristic
 	 * set using
 	 * {@link org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#setStreamTimeCharacteristic(org.apache.flink.streaming.api.TimeCharacteristic)}
 	 *
-	 * <p>
-	 * Note: This operation can be inherently non-parallel since all elements have to pass through
+	 * <p>Note: This operation can be inherently non-parallel since all elements have to pass through
 	 * the same operator instance. (Only for special cases, such as aligned time windows is
 	 * it possible to perform this operation in parallel).
 	 *
@@ -720,8 +714,7 @@ public class DataStream<T> {
 	/**
 	 * Windows this {@code DataStream} into tumbling count windows.
 	 *
-	 * <p>
-	 * Note: This operation can be inherently non-parallel since all elements have to pass through
+	 * <p>Note: This operation can be inherently non-parallel since all elements have to pass through
 	 * the same operator instance. (Only for special cases, such as aligned time windows is
 	 * it possible to perform this operation in parallel).
 	 *
@@ -734,8 +727,7 @@ public class DataStream<T> {
 	/**
 	 * Windows this {@code DataStream} into sliding count windows.
 	 *
-	 * <p>
-	 * Note: This operation can be inherently non-parallel since all elements have to pass through
+	 * <p>Note: This operation can be inherently non-parallel since all elements have to pass through
 	 * the same operator instance. (Only for special cases, such as aligned time windows is
 	 * it possible to perform this operation in parallel).
 	 *
@@ -754,13 +746,11 @@ public class DataStream<T> {
 	 * {@link org.apache.flink.streaming.api.windowing.assigners.WindowAssigner}. The grouping of
 	 * elements is done both by key and by window.
 	 *
-	 * <p>
-	 * A {@link org.apache.flink.streaming.api.windowing.triggers.Trigger} can be defined to specify
+	 * <p>A {@link org.apache.flink.streaming.api.windowing.triggers.Trigger} can be defined to specify
 	 * when windows are evaluated. However, {@code WindowAssigners} have a default {@code Trigger}
 	 * that is used if a {@code Trigger} is not specified.
 	 *
-	 * <p>
-	 * Note: This operation can be inherently non-parallel since all elements have to pass through
+	 * <p>Note: This operation can be inherently non-parallel since all elements have to pass through
 	 * the same operator instance. (Only for special cases, such as aligned time windows is
 	 * it possible to perform this operation in parallel).
 	 *
@@ -780,8 +770,7 @@ public class DataStream<T> {
 	 * Extracts a timestamp from an element and assigns it as the internal timestamp of that element.
 	 * The internal timestamps are, for example, used to to event-time window operations.
 	 *
-	 * <p>
-	 * If you know that the timestamps are strictly increasing you can use an
+	 * <p>If you know that the timestamps are strictly increasing you can use an
 	 * {@link AscendingTimestampExtractor}. Otherwise,
 	 * you should provide a {@link TimestampExtractor} that also implements
 	 * {@link TimestampExtractor#getCurrentWatermark()} to keep track of watermarks.
@@ -902,9 +891,7 @@ public class DataStream<T> {
 	/**
 	 * Writes a DataStream to the standard output stream (stdout).
 	 *
-	 * <p>
-	 * For each element of the DataStream the result of
-	 * {@link Object#toString()} is written.
+	 * <p>For each element of the DataStream the result of {@link Object#toString()} is written.
 	 *
 	 * @return The closed DataStream.
 	 */
@@ -917,9 +904,7 @@ public class DataStream<T> {
 	/**
 	 * Writes a DataStream to the standard output stream (stderr).
 	 *
-	 * <p>
-	 * For each element of the DataStream the result of
-	 * {@link Object#toString()} is written.
+	 * <p>For each element of the DataStream the result of {@link Object#toString()} is written.
 	 *
 	 * @return The closed DataStream.
 	 */
@@ -932,9 +917,7 @@ public class DataStream<T> {
 	/**
 	 * Writes a DataStream to the file specified by path in text format.
 	 *
-	 * <p>
-	 * For every element of the DataStream the result of {@link Object#toString()}
-	 * is written.
+	 * <p>For every element of the DataStream the result of {@link Object#toString()} is written.
 	 *
 	 * @param path
 	 *            The path pointing to the location the text file is written to.
@@ -950,9 +933,7 @@ public class DataStream<T> {
 	/**
 	 * Writes a DataStream to the file specified by path in text format.
 	 *
-	 * <p>
-	 * For every element of the DataStream the result of {@link Object#toString()}
-	 * is written.
+	 * <p>For every element of the DataStream the result of {@link Object#toString()} is written.
 	 *
 	 * @param path
 	 *            The path pointing to the location the text file is written to
@@ -973,8 +954,7 @@ public class DataStream<T> {
 	/**
 	 * Writes a DataStream to the file specified by the path parameter.
 	 *
-	 * <p>
-	 * For every field of an element of the DataStream the result of {@link Object#toString()}
+	 * <p>For every field of an element of the DataStream the result of {@link Object#toString()}
 	 * is written. This method can only be used on data streams of tuples.
 	 *
 	 * @param path
@@ -991,8 +971,7 @@ public class DataStream<T> {
 	/**
 	 * Writes a DataStream to the file specified by the path parameter.
 	 *
-	 * <p>
-	 * For every field of an element of the DataStream the result of {@link Object#toString()}
+	 * <p>For every field of an element of the DataStream the result of {@link Object#toString()}
 	 * is written. This method can only be used on data streams of tuples.
 	 *
 	 * @param path
@@ -1012,8 +991,7 @@ public class DataStream<T> {
 	 * Writes a DataStream to the file specified by the path parameter. The
 	 * writing is performed periodically every millis milliseconds.
 	 *
-	 * <p>
-	 * For every field of an element of the DataStream the result of {@link Object#toString()}
+	 * <p>For every field of an element of the DataStream the result of {@link Object#toString()}
 	 * is written. This method can only be used on data streams of tuples.
 	 *
 	 * @param path
@@ -1073,9 +1051,10 @@ public class DataStream<T> {
 	/**
 	 * Writes the dataStream into an output, described by an OutputFormat.
 	 *
-	 * The output is not participating in Flink's checkpointing!
+	 * <p>The output is not participating in Flink's checkpointing!
 	 *
-	 * For writing to a file system periodically, the use of the "flink-connector-filesystem" is recommended.
+	 * <p>For writing to a file system periodically, the use of the "flink-connector-filesystem"
+	 * is recommended.
 	 *
 	 * @param format The output format
 	 * @return The closed DataStream
@@ -1121,7 +1100,7 @@ public class DataStream<T> {
 	}
 
 	/**
-	 * Internal function for setting the partitioner for the DataStream
+	 * Internal function for setting the partitioner for the DataStream.
 	 *
 	 * @param partitioner
 	 *            Partitioner to set.

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
index b0e78d7..d098c98 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStreamSink.java
@@ -80,19 +80,17 @@ public class DataStreamSink<T> {
 
 	/**
 	 * Sets an user provided hash for this operator. This will be used AS IS the create the JobVertexID.
-	 * <p/>
+	 *
 	 * <p>The user provided hash is an alternative to the generated hashes, that is considered when identifying an
 	 * operator through the default hash mechanics fails (e.g. because of changes between Flink versions).
-	 * <p/>
+	 *
 	 * <p><strong>Important</strong>: this should be used as a workaround or for trouble shooting. The provided hash
 	 * needs to be unique per transformation and job. Otherwise, job submission will fail. Furthermore, you cannot
 	 * assign user-specified hash to intermediate nodes in an operator chain and trying so will let your job fail.
 	 *
-	 * <p>
-	 * A use case for this is in migration between Flink versions or changing the jobs in a way that changes the
+	 * <p>A use case for this is in migration between Flink versions or changing the jobs in a way that changes the
 	 * automatically generated hashes. In this case, providing the previous hashes directly through this method (e.g.
 	 * obtained from old logs) can help to reestablish a lost mapping from states to their target operator.
-	 * <p/>
 	 *
 	 * @param uidHash The user provided hash for this operator. This will become the JobVertexID, which is shown in the
 	 *                 logs and web ui.
@@ -158,8 +156,7 @@ public class DataStreamSink<T> {
 	 * Turns off chaining for this operator so thread co-location will not be
 	 * used as an optimization.
 	 *
-	 * <p>
-	 * Chaining can be turned off for the whole
+	 * <p>Chaining can be turned off for the whole
 	 * job by {@link org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#disableOperatorChaining()}
 	 * however it is not advised for performance considerations.
 	 *

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/IterativeStream.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/IterativeStream.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/IterativeStream.java
index 0698725..c6426a6 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/IterativeStream.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/IterativeStream.java
@@ -53,8 +53,7 @@ public class IterativeStream<T> extends SingleOutputStreamOperator<T> {
 	 * Closes the iteration. This method defines the end of the iterative
 	 * program part that will be fed back to the start of the iteration.
 	 *
-	 * <p>
-	 * A common usage pattern for streaming iterations is to use output
+	 * <p>A common usage pattern for streaming iterations is to use output
 	 * splitting to send a part of the closing data stream to the head. Refer to
 	 * {@link DataStream#split(org.apache.flink.streaming.api.collector.selector.OutputSelector)}
 	 * for more information.
@@ -86,8 +85,7 @@ public class IterativeStream<T> extends SingleOutputStreamOperator<T> {
 	 * co-transformations on the input and feedback stream, as in a
 	 * {@link ConnectedStreams}.
 	 *
-	 * <p>
-	 * For type safety the user needs to define the feedback type
+	 * <p>For type safety the user needs to define the feedback type
 	 * 
 	 * @param feedbackTypeString
 	 *            String describing the type information of the feedback stream.
@@ -102,8 +100,7 @@ public class IterativeStream<T> extends SingleOutputStreamOperator<T> {
 	 * co-transformations on the input and feedback stream, as in a
 	 * {@link ConnectedStreams}.
 	 *
-	 * <p>
-	 * For type safety the user needs to define the feedback type
+	 * <p>For type safety the user needs to define the feedback type
 	 * 
 	 * @param feedbackTypeClass
 	 *            Class of the elements in the feedback stream.
@@ -118,8 +115,7 @@ public class IterativeStream<T> extends SingleOutputStreamOperator<T> {
 	 * co-transformations on the input and feedback stream, as in a
 	 * {@link ConnectedStreams}.
 	 *
-	 * <p>
-	 * For type safety the user needs to define the feedback type
+	 * <p>For type safety the user needs to define the feedback type
 	 * 
 	 * @param feedbackType
 	 *            The type information of the feedback stream.
@@ -135,8 +131,7 @@ public class IterativeStream<T> extends SingleOutputStreamOperator<T> {
 	 * iteration and the feedback of the iteration are connected as in a
 	 * {@link ConnectedStreams}.
 	 *
-	 * <p>
-	 * The user can distinguish between the two inputs using co-transformation,
+	 * <p>The user can distinguish between the two inputs using co-transformation,
 	 * thus eliminating the need for mapping the inputs and outputs to a common
 	 * type.
 	 * 

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
index ed1cbd7..40059cf 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
@@ -39,17 +39,13 @@ import static java.util.Objects.requireNonNull;
  *{@code JoinedStreams} represents two {@link DataStream DataStreams} that have been joined.
  * A streaming join operation is evaluated over elements in a window.
  *
- * <p>
- * To finalize the join operation you also need to specify a {@link KeySelector} for
+ * <p>To finalize the join operation you also need to specify a {@link KeySelector} for
  * both the first and second input and a {@link WindowAssigner}.
  *
- * <p>
- * Note: Right now, the the join is being evaluated in memory so you need to ensure that the number
+ * <p>Note: Right now, the the join is being evaluated in memory so you need to ensure that the number
  * of elements per key does not get too high. Otherwise the JVM might crash.
  *
- * <p>
- * Example:
- *
+ * <p>Example:
  * <pre> {@code
  * DataStream<Tuple2<String, Integer>> one = ...;
  * DataStream<Tuple2<String, Integer>> twp = ...;
@@ -64,10 +60,10 @@ import static java.util.Objects.requireNonNull;
 @Public
 public class JoinedStreams<T1, T2> {
 
-	/** The first input stream */
+	/** The first input stream. */
 	private final DataStream<T1> input1;
 
-	/** The second input stream */
+	/** The second input stream. */
 	private final DataStream<T2> input2;
 
 	/**
@@ -206,8 +202,7 @@ public class JoinedStreams<T1, T2> {
 		/**
 		 * Sets the {@code Evictor} that should be used to evict elements from a window before emission.
 		 *
-		 * <p>
-		 * Note: When using an evictor window performance will degrade significantly, since
+		 * <p>Note: When using an evictor window performance will degrade significantly, since
 		 * pre-aggregation of window results cannot be used.
 		 */
 		@PublicEvolving

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
index 860aac6..27545c2 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java
@@ -79,9 +79,8 @@ import java.util.UUID;
  * {@code DataStream} are also possible on a {@code KeyedStream}, with the exception of
  * partitioning methods such as shuffle, forward and keyBy.
  *
- * <p>
- * Reduce-style operations, such as {@link #reduce}, {@link #sum} and {@link #fold} work on elements
- * that have the same key.
+ * <p>Reduce-style operations, such as {@link #reduce}, {@link #sum} and {@link #fold} work on
+ * elements that have the same key.
  *
  * @param <T> The type of the elements in the Keyed Stream.
  * @param <KEY> The type of the key in the Keyed Stream.
@@ -89,10 +88,12 @@ import java.util.UUID;
 @Public
 public class KeyedStream<T, KEY> extends DataStream<T> {
 
-	/** The key selector that can get the key by which the stream if partitioned from the elements */
+	/**
+	 * The key selector that can get the key by which the stream if partitioned from the elements.
+	 */
 	private final KeySelector<T, KEY> keySelector;
 
-	/** The type of the key by which the stream is partitioned */
+	/** The type of the key by which the stream is partitioned. */
 	private final TypeInformation<KEY> keyType;
 	
 	/**
@@ -313,8 +314,7 @@ public class KeyedStream<T, KEY> extends DataStream<T> {
 	/**
 	 * Windows this {@code KeyedStream} into tumbling time windows.
 	 *
-	 * <p>
-	 * This is a shortcut for either {@code .window(TumblingEventTimeWindows.of(size))} or
+	 * <p>This is a shortcut for either {@code .window(TumblingEventTimeWindows.of(size))} or
 	 * {@code .window(TumblingProcessingTimeWindows.of(size))} depending on the time characteristic
 	 * set using
 	 * {@link org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#setStreamTimeCharacteristic(org.apache.flink.streaming.api.TimeCharacteristic)}
@@ -332,10 +332,9 @@ public class KeyedStream<T, KEY> extends DataStream<T> {
 	/**
 	 * Windows this {@code KeyedStream} into sliding time windows.
 	 *
-	 * <p>
-	 * This is a shortcut for either {@code .window(SlidingEventTimeWindows.of(size, slide))} or
-	 * {@code .window(SlidingProcessingTimeWindows.of(size, slide))} depending on the time characteristic
-	 * set using
+	 * <p>This is a shortcut for either {@code .window(SlidingEventTimeWindows.of(size, slide))} or
+	 * {@code .window(SlidingProcessingTimeWindows.of(size, slide))} depending on the time
+	 * characteristic set using
 	 * {@link org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#setStreamTimeCharacteristic(org.apache.flink.streaming.api.TimeCharacteristic)}
 	 *
 	 * @param size The size of the window.
@@ -374,10 +373,9 @@ public class KeyedStream<T, KEY> extends DataStream<T> {
 	 * over a key grouped stream. Elements are put into windows by a {@link WindowAssigner}. The
 	 * grouping of elements is done both by key and by window.
 	 *
-	 * <p>
-	 * A {@link org.apache.flink.streaming.api.windowing.triggers.Trigger} can be defined to specify
-	 * when windows are evaluated. However, {@code WindowAssigners} have a default {@code Trigger}
-	 * that is used if a {@code Trigger} is not specified.
+	 * <p>A {@link org.apache.flink.streaming.api.windowing.triggers.Trigger} can be defined to
+	 * specify when windows are evaluated. However, {@code WindowAssigners} have a default
+	 * {@code Trigger} that is used if a {@code Trigger} is not specified.
 	 *
 	 * @param assigner The {@code WindowAssigner} that assigns elements to windows.
 	 * @return The trigger windows data stream.

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
index 9c5d9f0..5f063de 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
@@ -101,23 +101,26 @@ public class SingleOutputStreamOperator<T> extends DataStream<T> {
 	}
 
 	/**
-	 * Sets an user provided hash for this operator. This will be used AS IS the create the JobVertexID.
-	 * <p/>
-	 * <p>The user provided hash is an alternative to the generated hashes, that is considered when identifying an
-	 * operator through the default hash mechanics fails (e.g. because of changes between Flink versions).
-	 * <p/>
-	 * <p><strong>Important</strong>: this should be used as a workaround or for trouble shooting. The provided hash
-	 * needs to be unique per transformation and job. Otherwise, job submission will fail. Furthermore, you cannot
-	 * assign user-specified hash to intermediate nodes in an operator chain and trying so will let your job fail.
+	 * Sets an user provided hash for this operator. This will be used AS IS the create the
+	 * JobVertexID.
+	 *
+	 * <p>The user provided hash is an alternative to the generated hashes, that is considered when
+	 * identifying an operator through the default hash mechanics fails (e.g. because of changes
+	 * between Flink versions).
 	 *
-	 * <p>
-	 * A use case for this is in migration between Flink versions or changing the jobs in a way that changes the
-	 * automatically generated hashes. In this case, providing the previous hashes directly through this method (e.g.
-	 * obtained from old logs) can help to reestablish a lost mapping from states to their target operator.
+	 * <p><strong>Important</strong>: this should be used as a workaround or for trouble shooting.
+	 * The provided hash needs to be unique per transformation and job. Otherwise, job submission
+	 * will fail. Furthermore, you cannot assign user-specified hash to intermediate nodes in an
+	 * operator chain and trying so will let your job fail.
+	 *
+	 * <p>A use case for this is in migration between Flink versions or changing the jobs in a way
+	 * that changes the automatically generated hashes. In this case, providing the previous hashes
+	 * directly through this method (e.g. obtained from old logs) can help to reestablish a lost
+	 * mapping from states to their target operator.
 	 * <p/>
 	 *
-	 * @param uidHash The user provided hash for this operator. This will become the JobVertexID, which is shown in the
-	 *                 logs and web ui.
+	 * @param uidHash The user provided hash for this operator. This will become the JobVertexID,
+	 *                  which is shown in the logs and web ui.
 	 * @return The operator with the user provided hash.
 	 */
 	@PublicEvolving
@@ -148,7 +151,7 @@ public class SingleOutputStreamOperator<T> extends DataStream<T> {
 	/**
 	 * Sets the maximum parallelism of this operator.
 	 *
-	 * The maximum parallelism specifies the upper bound for dynamic scaling. It also defines the
+	 * <p>The maximum parallelism specifies the upper bound for dynamic scaling. It also defines the
 	 * number of key groups used for partitioned state.
 	 *
 	 * @param maxParallelism Maximum parallelism
@@ -253,11 +256,12 @@ public class SingleOutputStreamOperator<T> extends DataStream<T> {
 	}
 
 	/**
-	 * Turns off chaining for this operator so thread co-location will not be
-	 * used as an optimization.
-	 * <p> Chaining can be turned off for the whole
-	 * job by {@link StreamExecutionEnvironment#disableOperatorChaining()}
-	 * however it is not advised for performance considerations.
+	 * Turns off chaining for this operator so thread co-location will not be used as an
+	 * optimization.
+	 *
+	 * <p>Chaining can be turned off for the whole job by
+	 * {@link StreamExecutionEnvironment#disableOperatorChaining()} however it is not advised for
+	 * performance considerations.
 	 * 
 	 * @return The operator with chaining disabled
 	 */
@@ -358,13 +362,11 @@ public class SingleOutputStreamOperator<T> extends DataStream<T> {
 	/**
 	 * Adds a type information hint about the return type of this operator. 
 	 * 
-	 * <p>
-	 * Type hints are important in cases where the Java compiler
-	 * throws away generic type information necessary for efficient execution.
+	 * <p>Type hints are important in cases where the Java compiler throws away generic type
+	 * information necessary for efficient execution.
 	 * 
-	 * <p>
-	 * This method takes a type information string that will be parsed. A type information string can contain the following
-	 * types:
+	 * <p>This method takes a type information string that will be parsed. A type information string
+	 * can contain the following types:
 	 *
 	 * <ul>
 	 * <li>Basic types such as <code>Integer</code>, <code>String</code>, etc.
@@ -383,7 +385,7 @@ public class SingleOutputStreamOperator<T> extends DataStream<T> {
 	 * <li>Enum types such as <code>Enum&lt;org.my.CustomEnum&gt;</code></li>
 	 * </ul>
 	 *
-	 * Example:
+	 * <p>Example:
 	 * <code>"Tuple2&lt;String,Tuple2&lt;Integer,org.my.MyJob$Pojo&lt;word=String&gt;&gt;&gt;"</code>
 	 *
 	 * @param typeInfoString

http://git-wip-us.apache.org/repos/asf/flink/blob/2581a7b8/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/StreamProjection.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/StreamProjection.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/StreamProjection.java
index 8537318..e1e56fd 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/StreamProjection.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/StreamProjection.java
@@ -49,6 +49,10 @@ import org.apache.flink.api.java.typeutils.TupleTypeInfo;
 import org.apache.flink.streaming.api.operators.StreamProject;
 import org.apache.flink.util.Preconditions;
 
+/**
+ * The result of {@link DataStream#project(int...)}. This can be used to add more fields to the
+ * projection.
+ */
 @PublicEvolving
 public class StreamProjection<IN> {
 
@@ -77,7 +81,7 @@ public class StreamProjection<IN> {
 
 	/**
 	 * Chooses a projectTupleX according to the length of
-	 * {@link org.apache.flink.streaming.api.datastream.StreamProjection#fieldIndexes}
+	 * {@link org.apache.flink.streaming.api.datastream.StreamProjection#fieldIndexes}.
 	 *
 	 * @return The projected DataStream.
 	 * @see org.apache.flink.api.java.operators.ProjectOperator.Projection


Mime
View raw message