kafka-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From guozh...@apache.org
Subject [kafka] branch trunk updated: KAFKA-6813: Remove deprecated APIs in KIP-182, Part I (#4919)
Date Tue, 08 May 2018 00:34:40 GMT
This is an automated email from the ASF dual-hosted git repository.

guozhang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 2b5a594  KAFKA-6813: Remove deprecated APIs in KIP-182, Part I (#4919)
2b5a594 is described below

commit 2b5a59406622affa1e333e073546f075f59b4ac9
Author: Guozhang Wang <wangguoz@gmail.com>
AuthorDate: Mon May 7 17:34:34 2018 -0700

    KAFKA-6813: Remove deprecated APIs in KIP-182, Part I (#4919)
    
    I'm breaking KAFKA-6813 into a couple of "smaller" PRs and this is the first one. It focused on:
    
    Remove deprecated APIs in KStream, KTable, KGroupedStream, KGroupedTable, SessionWindowedKStream, TimeWindowedKStream.
    
    Also found a couple of overlooked bugs while working on them:
    
    2.a) In KTable.filter / mapValues without the additional parameter indicating the materialized stores, originally we will not materialize the store. After KIP-182 we mistakenly diverge the semantics: for KTable.mapValues it is still the case, for KTable.filter we will always materialize.
    
    2.b) In XXStream/Table.reduce/count, we used to try to reuse the serdes since their types are pre-known (for reduce it is the same types for both key / value, for count it is the same types for key, and Long for value). This was somehow lost in the past refactoring.
    
    2.c) We are enforcing to cast a Serde<V> to Serde<VR> for XXStream / Table.aggregate, for which the returned value type is NOT known, such the enforced casting should not be applied and we should require users to provide us the value serde if they believe the default ones are not applicable.
    
    2.d) Whenever we are creating a new MaterializedInternal we are effectively incrementing the suffix index for the store / processor-node names. However in some places this MaterializedInternal is only used for validation, so the resulted processor-node / store suffix is not monotonic.
    
    Reviewers: Matthias J. Sax <matthias@confluent.io>, Bill Bejeck <bill@confluent.io>
---
 .../kafka/streams/kstream/KGroupedStream.java      | 1416 ++------------------
 .../kafka/streams/kstream/KGroupedTable.java       |  610 +--------
 .../org/apache/kafka/streams/kstream/KStream.java  | 1197 ++---------------
 .../org/apache/kafka/streams/kstream/KTable.java   |  837 +-----------
 .../streams/kstream/SessionWindowedKStream.java    |   37 +-
 .../kafka/streams/kstream/TimeWindowedKStream.java |   47 +-
 .../streams/kstream/internals/AbstractStream.java  |    6 -
 .../internals/GroupedStreamAggregateBuilder.java   |    8 +
 .../kstream/internals/KGroupedStreamImpl.java      |  394 +-----
 .../kstream/internals/KGroupedTableImpl.java       |  154 +--
 .../streams/kstream/internals/KStreamImpl.java     |  255 +---
 .../streams/kstream/internals/KTableImpl.java      |  276 +---
 .../internals/SessionWindowedKStreamImpl.java      |  102 +-
 .../kstream/internals/TimeWindowedKStreamImpl.java |   72 +-
 .../KStreamAggregationDedupIntegrationTest.java    |   22 +-
 .../KStreamAggregationIntegrationTest.java         |   41 +-
 ...StreamsFineGrainedAutoResetIntegrationTest.java |    9 +-
 .../integration/RegexSourceIntegrationTest.java    |   19 +-
 .../kafka/streams/kstream/KStreamBuilderTest.java  |    8 +-
 .../internals/InternalStreamsBuilderTest.java      |    4 +-
 .../kstream/internals/KGroupedStreamImplTest.java  |  257 +---
 .../kstream/internals/KGroupedTableImplTest.java   |   38 +-
 .../streams/kstream/internals/KStreamImplTest.java |   19 +-
 .../internals/KStreamWindowAggregateTest.java      |   13 +-
 .../kstream/internals/KTableAggregateTest.java     |   20 +-
 .../kstream/internals/KTableFilterTest.java        |   43 +-
 .../streams/kstream/internals/KTableImplTest.java  |   28 +-
 .../internals/KTableKTableInnerJoinTest.java       |   24 +-
 .../internals/KTableKTableLeftJoinTest.java        |    5 +-
 .../internals/SessionWindowedKStreamImplTest.java  |   27 +-
 .../internals/TimeWindowedKStreamImplTest.java     |    5 +-
 .../apache/kafka/streams/perf/YahooBenchmark.java  |    9 +-
 .../internals/StreamsMetadataStateTest.java        |   10 +-
 33 files changed, 695 insertions(+), 5317 deletions(-)

diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/KGroupedStream.java b/streams/src/main/java/org/apache/kafka/streams/kstream/KGroupedStream.java
index d8589e2..53a2be7 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/KGroupedStream.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/KGroupedStream.java
@@ -17,15 +17,13 @@
 package org.apache.kafka.streams.kstream;
 
 import org.apache.kafka.common.annotation.InterfaceStability;
-import org.apache.kafka.common.serialization.Serde;
 import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.streams.KafkaStreams;
 import org.apache.kafka.streams.KeyValue;
 import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.Topology;
 import org.apache.kafka.streams.state.KeyValueStore;
 import org.apache.kafka.streams.state.QueryableStoreType;
-import org.apache.kafka.streams.state.SessionStore;
-import org.apache.kafka.streams.state.WindowStore;
 
 /**
  * {@code KGroupedStream} is an abstraction of a <i>grouped</i> record stream of {@link KeyValue} pairs.
@@ -48,49 +46,6 @@ public interface KGroupedStream<K, V> {
     /**
      * Count the number of records in this stream by the grouped key.
      * Records with {@code null} key or value are ignored.
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * that can be queried using the provided {@code queryableStoreName}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // counting words
-     * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
-     * String key = "some-word";
-     * Long countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * Therefore, the store name must be a valid Kafka topic name and cannot contain characters other than ASCII
-     * alphanumerics, '.', '_' and '-'.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param queryableStoreName the name of the underlying {@link KTable} state store; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} then this will be equivalent to {@link KGroupedStream#count()}.
-     * @return a {@link KTable} that contains "update" records with unmodified keys and {@link Long} values that
-     * represent the latest (rolling) count (i.e., number of records) for each key
-     * @deprecated use {@link #count(Materialized) count(Materialized.as(queryableStoreName))}
-     */
-    @Deprecated
-    KTable<K, Long> count(final String queryableStoreName);
-
-    /**
-     * Count the number of records in this stream by the grouped key.
-     * Records with {@code null} key or value are ignored.
      * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view).
      * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
      * <p>
@@ -107,7 +62,8 @@ public interface KGroupedStream<K, V> {
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
      * and "-changelog" is a fixed suffix.
      * Note that the internal store name may not be queriable through Interactive Queries.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @return a {@link KTable} that contains "update" records with unmodified keys and {@link Long} values that
      * represent the latest (rolling) count (i.e., number of records) for each key
@@ -118,7 +74,7 @@ public interface KGroupedStream<K, V> {
      * Count the number of records in this stream by the grouped key.
      * Records with {@code null} key or value are ignored.
      * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * provided by the given {@code storeSupplier}.
+     * provided by the given store name in {@code materialized}.
      * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
      * <p>
      * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
@@ -130,10 +86,9 @@ public interface KGroupedStream<K, V> {
      * <p>
      * To query the local {@link KeyValueStore} it must be obtained via
      * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * Use {@link org.apache.kafka.streams.processor.StateStore#name()} to get the store name:
      * <pre>{@code
      * KafkaStreams streams = ... // counting words
-     * String queryableStoreName = storeSupplier.name();
+     * String queryableStoreName = "storeName"; // the store name should be the name of the store as defined by the Materialized instance
      * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
      * String key = "some-word";
      * Long countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
@@ -141,39 +96,16 @@ public interface KGroupedStream<K, V> {
      * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
      * query the value of the key on a parallel running instance of your Kafka Streams application.
      *
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
-     * @return a {@link KTable} that contains "update" records with unmodified keys and {@link Long} values that
-     * represent the latest (rolling) count (i.e., number of records) for each key
-     * @deprecated use {@link #count(Materialized) count(Materialized.as(KeyValueByteStoreSupplier))}
-     */
-    @Deprecated
-    KTable<K, Long> count(final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier);
-
-    /**
-     * Count the number of records in this stream by the grouped key.
-     * Records with {@code null} key or value are ignored.
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * provided by the given {@code materialized}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
      * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * <pre>{@code
-     * KafkaStreams streams = ... // counting words
-     * String queryableStoreName = "count-store"; // the queryableStoreName should be the name of the store as defined by the Materialized instance
-     * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
-     * String key = "some-word";
-     * Long countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
+     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
+     * Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII
+     * alphanumerics, '.', '_' and '-'.
+     * The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is
+     * user-specified in {@link StreamsConfig} via parameter
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is the
+     * provide store name defined in {@code Materialized}, and "-changelog" is a fixed suffix.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param materialized  an instance of {@link Materialized} used to materialize a state store. Cannot be {@code null}.
      *                      Note: the valueSerde will be automatically set to {@link org.apache.kafka.common.serialization.Serdes#Long() Serdes#Long()}
@@ -183,248 +115,12 @@ public interface KGroupedStream<K, V> {
      */
     KTable<K, Long> count(final Materialized<K, Long, KeyValueStore<Bytes, byte[]>> materialized);
 
-    /**
-     * Count the number of records in this stream by the grouped key and the defined windows.
-     * Records with {@code null} key or value are ignored.
-     * The specified {@code windows} define either hopping time windows that can be overlapping or tumbling (c.f.
-     * {@link TimeWindows}) or they define landmark windows (c.f. {@link UnlimitedWindows}).
-     * The result is written into a local windowed {@link KeyValueStore} (which is basically an ever-updating
-     * materialized view) that can be queried using the provided {@code queryableStoreName}.
-     * Windows are retained until their retention time expires (c.f. {@link Windows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local windowed {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // counting words
-     * ReadOnlyWindowStore<String,Long> localWindowStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>windowStore());
-     * String key = "some-word";
-     * long fromTime = ...;
-     * long toTime = ...;
-     * WindowStoreIterator<Long> countForWordsForWindows = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * Therefore, the store name must be a valid Kafka topic name and cannot contain characters other than ASCII
-     * alphanumerics, '.', '_' and '-'.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param windows   the specification of the aggregation {@link Windows}
-     * @param queryableStoreName the name of the underlying {@link KTable} state store; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} then this will be equivalent to {@link KGroupedStream#count(Windows)}.
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys and {@link Long} values
-     * that represent the latest (rolling) count (i.e., number of records) for each key within a window.
-     * @deprecated use {@link #windowedBy(Windows) windowedBy(windows)} followed by
-     * {@link TimeWindowedKStream#count(Materialized) count(Materialized.as(queryableStoreName))}
-     */
-    @Deprecated
-    <W extends Window> KTable<Windowed<K>, Long> count(final Windows<W> windows,
-                                                       final String queryableStoreName);
-
-    /**
-     * Count the number of records in this stream by the grouped key and the defined windows.
-     * Records with {@code null} key or value are ignored.
-     * The specified {@code windows} define either hopping time windows that can be overlapping or tumbling (c.f.
-     * {@link TimeWindows}) or they define landmark windows (c.f. {@link UnlimitedWindows}).
-     * The result is written into a local windowed {@link KeyValueStore} (which is basically an ever-updating
-     * materialized view) that can be queried using the provided {@code queryableName}.
-     * Windows are retained until their retention time expires (c.f. {@link Windows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
-     * and "-changelog" is a fixed suffix.
-     * Note that the internal store name may not be queriable through Interactive Queries.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param windows   the specification of the aggregation {@link Windows}
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys and {@link Long} values
-     * that represent the latest (rolling) count (i.e., number of records) for each key within a window
-     * @deprecated use {@link #windowedBy(Windows) windowedBy(windows)} followed by {@link TimeWindowedKStream#count() count()}
-     */
-    @Deprecated
-    <W extends Window> KTable<Windowed<K>, Long> count(final Windows<W> windows);
-
-    /**
-     * Count the number of records in this stream by the grouped key and the defined windows.
-     * Records with {@code null} key or value are ignored.
-     * The specified {@code windows} define either hopping time windows that can be overlapping or tumbling (c.f.
-     * {@link TimeWindows}) or they define landmark windows (c.f. {@link UnlimitedWindows}).
-     * The result is written into a local windowed {@link KeyValueStore} (which is basically an ever-updating
-     * materialized view) provided by the given {@code storeSupplier}.
-     * Windows are retained until their retention time expires (c.f. {@link Windows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local windowed {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * Use {@link org.apache.kafka.streams.processor.StateStoreSupplier#name()} to get the store name:
-     * <pre>{@code
-     * KafkaStreams streams = ... // counting words
-     * String queryableStoreName = storeSupplier.name();
-     * ReadOnlyWindowStore<String,Long> localWindowStore = streams.store(queryableName, QueryableStoreTypes.<String, Long>windowStore());
-     * String key = "some-word";
-     * long fromTime = ...;
-     * long toTime = ...;
-     * WindowStoreIterator<Long> countForWordsForWindows = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     *
-     * @param windows       the specification of the aggregation {@link Windows}
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys and {@link Long} values
-     * that represent the latest (rolling) count (i.e., number of records) for each key within a window
-     * @deprecated use {@link #windowedBy(Windows) windowedBy(windows)} followed by
-     * {@link TimeWindowedKStream#count(Materialized) count(Materialized.as(KeyValueByteStoreSupplier))}
-     */
-    @Deprecated
-    <W extends Window> KTable<Windowed<K>, Long> count(final Windows<W> windows,
-                                                       final org.apache.kafka.streams.processor.StateStoreSupplier<WindowStore> storeSupplier);
-
-
-    /**
-     * Count the number of records in this stream by the grouped key into {@link SessionWindows}.
-     * Records with {@code null} key or value are ignored.
-     * The result is written into a local {@link SessionStore} (which is basically an ever-updating
-     * materialized view) that can be queried using the provided {@code queryableStoreName}.
-     * SessionWindows are retained until their retention time expires (c.f. {@link SessionWindows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local windowed {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * <pre>{@code
-     * KafkaStreams streams = ... // compute sum
-     * ReadOnlySessionStore<String,Long> localWindowStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>ReadOnlySessionStore<String, Long>);
-     * String key = "some-key";
-     * KeyValueIterator<Windowed<String>, Long> sumForKeyForWindows = localWindowStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     *
-     * @param sessionWindows the specification of the aggregation {@link SessionWindows}
-     * @param queryableStoreName  the name of the state store created from this operation; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-. If {@code null} then this will be equivalent to {@link KGroupedStream#count(SessionWindows)}.
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys and {@link Long} values
-     * that represent the latest (rolling) count (i.e., number of records) for each key within a window
-     * @deprecated use {@link #windowedBy(SessionWindows) windowedBy(sessionWindows)} followed by
-     * {@link SessionWindowedKStream#count(Materialized) count(Materialized.as(queryableStoreName))}
-     */
-    @Deprecated
-    KTable<Windowed<K>, Long> count(final SessionWindows sessionWindows, final String queryableStoreName);
-
-    /**
-     * Count the number of records in this stream by the grouped key into {@link SessionWindows}.
-     * Records with {@code null} key or value are ignored.
-     * The result is written into a local {@link SessionStore} (which is basically an ever-updating
-     * materialized view) that can be queried using the provided {@code queryableStoreName}.
-     * SessionWindows are retained until their retention time expires (c.f. {@link SessionWindows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     *
-     * @param sessionWindows the specification of the aggregation {@link SessionWindows}
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys and {@link Long} values
-     * that represent the latest (rolling) count (i.e., number of records) for each key within a window
-     * @deprecated use {@link #windowedBy(SessionWindows) windowedBy(sessionWindows)} followed by
-     * {@link SessionWindowedKStream#count() count()}
-     */
-    @Deprecated
-    KTable<Windowed<K>, Long> count(final SessionWindows sessionWindows);
-
-    /**
-     * Count the number of records in this stream by the grouped key into {@link SessionWindows}.
-     * Records with {@code null} key or value are ignored.
-     * The result is written into a local {@link SessionStore} (which is basically an ever-updating materialized view)
-     * provided by the given {@code storeSupplier}.
-     * SessionWindows are retained until their retention time expires (c.f. {@link SessionWindows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local windowed {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * Use {@link org.apache.kafka.streams.processor.StateStoreSupplier#name()} to get the store name:
-     * <pre>{@code
-     * KafkaStreams streams = ... // compute sum
-     * Sting queryableStoreName = storeSupplier.name();
-     * ReadOnlySessionStore<String,Long> localWindowStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>ReadOnlySessionStore<String, Long>);
-     * String key = "some-key";
-     * KeyValueIterator<Windowed<String>, Long> sumForKeyForWindows = localWindowStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     *
-     * @param sessionWindows the specification of the aggregation {@link SessionWindows}
-     * @param storeSupplier  user defined state store supplier. Cannot be {@code null}.
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys and {@link Long} values
-     * that represent the latest (rolling) count (i.e., number of records) for each key within a window
-     * @deprecated use {@link #windowedBy(SessionWindows) windowedBy(sessionWindows)} followed by
-     * {@link SessionWindowedKStream#count(Materialized) count(Materialized.as(KeyValueByteStoreSupplier))}
-     */
-    @Deprecated
-    KTable<Windowed<K>, Long> count(final SessionWindows sessionWindows,
-                                    final org.apache.kafka.streams.processor.StateStoreSupplier<SessionStore> storeSupplier);
 
     /**
      * Combine the values of records in this stream by the grouped key.
      * Records with {@code null} key or value are ignored.
      * Combining implies that the type of the aggregate result is the same as the type of the input value
      * (c.f. {@link #aggregate(Initializer, Aggregator)}).
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * that can be queried using the provided {@code queryableStoreName}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
      * <p>
      * The specified {@link Reducer} is applied for each input record and computes a new aggregate using the current
      * aggregate and the record's value.
@@ -438,13 +134,16 @@ public interface KGroupedStream<K, V> {
      * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
      * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
      * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
+     *
      * <p>
      * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
      * The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is
      * user-specified in {@link StreamsConfig} via parameter
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
      * and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     * Note that the internal store name may not be queriable through Interactive Queries.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param reducer   a {@link Reducer} that computes a new aggregate result. Cannot be {@code null}.
      * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
@@ -452,13 +151,14 @@ public interface KGroupedStream<K, V> {
      */
     KTable<K, V> reduce(final Reducer<V> reducer);
 
+
     /**
-     * Combine the values of records in this stream by the grouped key.
+     * Combine the value of records in this stream by the grouped key.
      * Records with {@code null} key or value are ignored.
      * Combining implies that the type of the aggregate result is the same as the type of the input value
-     * (c.f. {@link #aggregate(Initializer, Aggregator, Serde, String)}).
+     * (c.f. {@link #aggregate(Initializer, Aggregator, Materialized)}).
      * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * that can be queried using the provided {@code queryableStoreName}.
+     * provided by the given store name in {@code materialized}.
      * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
      * <p>
      * The specified {@link Reducer} is applied for each input record and computes a new aggregate using the current
@@ -474,7 +174,8 @@ public interface KGroupedStream<K, V> {
      * <p>
      * If there is no current aggregate the {@link Reducer} is not applied and the new aggregate will be the record's
      * value as-is.
-     * Thus, {@code reduce(Reducer, String)} can be used to compute aggregate functions like sum, min, or max.
+     * Thus, {@code reduce(Reducer, Materialized)} can be used to compute aggregate functions like sum, min, or
+     * max.
      * <p>
      * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
      * the same key.
@@ -484,61 +185,51 @@ public interface KGroupedStream<K, V> {
      * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
      * <p>
      * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
+     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
      * <pre>{@code
      * KafkaStreams streams = ... // compute sum
+     * String queryableStoreName = "storeName" // the store name should be the name of the store as defined by the Materialized instance
      * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
      * String key = "some-key";
      * Long sumForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
      * }</pre>
      * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
      * query the value of the key on a parallel running instance of your Kafka Streams application.
+     *
      * <p>
      * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * Therefore, the store name must be a valid Kafka topic name and cannot contain characters other than ASCII
-     * alphanumerics, '.', '_' and '-'.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
+     * The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is
      * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
+     * and "-changelog" is a fixed suffix.
+     * Note that the internal store name may not be queriable through Interactive Queries.
      *
-     * @param reducer               a {@link Reducer} that computes a new aggregate result. Cannot be {@code null}.
-     * @param queryableStoreName    the name of the underlying {@link KTable} state store; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} then this will be equivalent to {@link KGroupedStream#reduce(Reducer)} ()}.
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
+     * @param reducer       a {@link Reducer} that computes a new aggregate result. Cannot be {@code null}.
+     * @param materialized  an instance of {@link Materialized} used to materialize a state store. Cannot be {@code null}.
      * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
      * latest (rolling) aggregate for each key
-     * @deprecated  use {@link #reduce(Reducer, Materialized) reduce(reducer, Materialized.as(queryableStoreName))}
      */
-    @Deprecated
     KTable<K, V> reduce(final Reducer<V> reducer,
-                        final String queryableStoreName);
-
+                        final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized);
 
     /**
-     * Combine the value of records in this stream by the grouped key.
+     * Aggregate the values of records in this stream by the grouped key.
      * Records with {@code null} key or value are ignored.
-     * Combining implies that the type of the aggregate result is the same as the type of the input value
-     * (c.f. {@link #aggregate(Initializer, Aggregator, org.apache.kafka.streams.processor.StateStoreSupplier)}).
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * provided by the given {@code storeSupplier}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
+     * Aggregating is a generalization of {@link #reduce(Reducer) combining via reduce(...)} as it, for example,
+     * allows the result to have a different type than the input values.
      * <p>
-     * The specified {@link Reducer} is applied for each input record and computes a new aggregate using the current
-     * aggregate (first argument) and the record's value (second argument):
-     * <pre>{@code
-     * // At the example of a Reducer<Long>
-     * new Reducer<Long>() {
-     *   public Long apply(Long aggValue, Long currValue) {
-     *     return aggValue + currValue;
-     *   }
-     * }
-     * }</pre>
+     * The specified {@link Initializer} is applied once directly before the first input record is processed to
+     * provide an initial intermediate aggregation result that is used to process the first record.
+     * The specified {@link Aggregator} is applied for each input record and computes a new aggregate using the current
+     * aggregate (or for the very first record using the intermediate aggregation result provided via the
+     * {@link Initializer}) and the record's value.
+     * Thus, {@code aggregate(Initializer, Aggregator)} can be used to compute aggregate functions like
+     * count (c.f. {@link #count()}).
      * <p>
-     * If there is no current aggregate the {@link Reducer} is not applied and the new aggregate will be the record's
-     * value as-is.
-     * Thus, {@code reduce(Reducer, org.apache.kafka.streams.processor.StateStoreSupplier)} can be used to compute
-     * aggregate functions like sum, min, or max.
+     * The default value serde from config will be used for serializing the result.
+     * If a different serde is required then you should use {@link #aggregate(Initializer, Aggregator, Materialized)}.
      * <p>
      * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
      * the same key.
@@ -546,440 +237,33 @@ public interface KGroupedStream<K, V> {
      * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
      * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
      * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
+     *
      * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * Use {@link org.apache.kafka.streams.processor.StateStoreSupplier#name()} to get the store name:
-     * <pre>{@code
-     * KafkaStreams streams = ... // compute sum
-     * String queryableStoreName = storeSupplier.name();
-     * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
-     * String key = "some-key";
-     * Long sumForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
+     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
+     * The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is
+     * user-specified in {@link StreamsConfig} via parameter
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
+     * and "-changelog" is a fixed suffix.
+     * Note that the internal store name may not be queriable through Interactive Queries.
      *
-     * @param reducer       a {@link Reducer} that computes a new aggregate result. Cannot be {@code null}.
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
+     * @param initializer   an {@link Initializer} that computes an initial intermediate aggregation result
+     * @param aggregator    an {@link Aggregator} that computes a new aggregate result
+     * @param <VR>          the value type of the resulting {@link KTable}
      * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
      * latest (rolling) aggregate for each key
-     * @deprecated use {@link #reduce(Reducer, Materialized) reduce(reducer, Materialized.as(KeyValueByteStoreSupplier))}
      */
-    @Deprecated
-    KTable<K, V> reduce(final Reducer<V> reducer,
-                        final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier);
+    <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
+                                 final Aggregator<? super K, ? super V, VR> aggregator);
 
     /**
-     * Combine the value of records in this stream by the grouped key.
+     * Aggregate the values of records in this stream by the grouped key.
      * Records with {@code null} key or value are ignored.
-     * Combining implies that the type of the aggregate result is the same as the type of the input value
-     * (c.f. {@link #aggregate(Initializer, Aggregator, Materialized)}).
+     * Aggregating is a generalization of {@link #reduce(Reducer) combining via reduce(...)} as it, for example,
+     * allows the result to have a different type than the input values.
      * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * provided by the given {@code materialized}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * The specified {@link Reducer} is applied for each input record and computes a new aggregate using the current
-     * aggregate (first argument) and the record's value (second argument):
-     * <pre>{@code
-     * // At the example of a Reducer<Long>
-     * new Reducer<Long>() {
-     *   public Long apply(Long aggValue, Long currValue) {
-     *     return aggValue + currValue;
-     *   }
-     * }
-     * }</pre>
-     * <p>
-     * If there is no current aggregate the {@link Reducer} is not applied and the new aggregate will be the record's
-     * value as-is.
-     * Thus, {@code reduce(Reducer, Materialized)} can be used to compute aggregate functions like sum, min, or
-     * max.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * <pre>{@code
-     * KafkaStreams streams = ... // compute sum
-     * String queryableStoreName = "storeName" // the queryableStoreName should be the name of the store as defined by the Materialized instance
-     * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
-     * String key = "some-key";
-     * Long sumForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     *
-     * @param reducer       a {@link Reducer} that computes a new aggregate result. Cannot be {@code null}.
-     * @param materialized  an instance of {@link Materialized} used to materialize a state store. Cannot be {@code null}.
-     * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
-     * latest (rolling) aggregate for each key
-     */
-    KTable<K, V> reduce(final Reducer<V> reducer,
-                        final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized);
-
-    /**
-     * Combine the number of records in this stream by the grouped key and the defined windows.
-     * Records with {@code null} key or value are ignored.
-     * Combining implies that the type of the aggregate result is the same as the type of the input value
-     * (c.f. {@link #aggregate(Initializer, Aggregator, Windows, Serde, String)}).
-     * The specified {@code windows} define either hopping time windows that can be overlapping or tumbling (c.f.
-     * {@link TimeWindows}) or they define landmark windows (c.f. {@link UnlimitedWindows}).
-     * The result is written into a local windowed {@link KeyValueStore} (which is basically an ever-updating
-     * materialized view) that can be queried using the provided {@code queryableStoreName}.
-     * Windows are retained until their retention time expires (c.f. {@link Windows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * The specified {@link Reducer} is applied for each input record and computes a new aggregate using the current
-     * aggregate (first argument) and the record's value (second argument):
-     * <pre>{@code
-     * // At the example of a Reducer<Long>
-     * new Reducer<Long>() {
-     *   public Long apply(Long aggValue, Long currValue) {
-     *     return aggValue + currValue;
-     *   }
-     * }
-     * }</pre>
-     * <p>
-     * If there is no current aggregate the {@link Reducer} is not applied and the new aggregate will be the record's
-     * value as-is.
-     * Thus, {@code reduce(Reducer, Windows, String)} can be used to compute aggregate functions like sum, min, or max.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local windowed {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // compute sum
-     * ReadOnlyWindowStore<String,Long> localWindowStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>windowStore());
-     * String key = "some-key";
-     * long fromTime = ...;
-     * long toTime = ...;
-     * WindowStoreIterator<Long> sumForKeyForWindows = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * Therefore, the store name must be a valid Kafka topic name and cannot contain characters other than ASCII
-     * alphanumerics, '.', '_' and '-'.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param reducer   a {@link Reducer} that computes a new aggregate result
-     * @param windows   the specification of the aggregation {@link Windows}
-     * @param queryableStoreName the name of the state store created from this operation; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} then this will be equivalent to {@link KGroupedStream#reduce(Reducer, Windows)}.
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys, and values that represent
-     * the latest (rolling) aggregate for each key within a window
-     * @deprecated use {@link #windowedBy(Windows) windowedBy(windows)} followed by
-     * {@link TimeWindowedKStream#reduce(Reducer, Materialized) reduce(reducer, Materialized.as(queryableStoreName))}
-     */
-    @Deprecated
-    <W extends Window> KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
-                                                     final Windows<W> windows,
-                                                     final String queryableStoreName);
-
-    /**
-     * Combine the number of records in this stream by the grouped key and the defined windows.
-     * Records with {@code null} key or value are ignored.
-     * Combining implies that the type of the aggregate result is the same as the type of the input value
-     * (c.f. {@link #aggregate(Initializer, Aggregator, Windows, Serde, String)}).
-     * The specified {@code windows} define either hopping time windows that can be overlapping or tumbling (c.f.
-     * {@link TimeWindows}) or they define landmark windows (c.f. {@link UnlimitedWindows}).
-     * The result is written into a local windowed {@link KeyValueStore} (which is basically an ever-updating
-     * materialized view) that can be queried using the provided {@code queryableStoreName}.
-     * Windows are retained until their retention time expires (c.f. {@link Windows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * The specified {@link Reducer} is applied for each input record and computes a new aggregate using the current
-     * aggregate and the record's value.
-     * If there is no current aggregate the {@link Reducer} is not applied and the new aggregate will be the record's
-     * value as-is.
-     * Thus, {@code reduce(Reducer, Windows, String)} can be used to compute aggregate functions like sum, min, or max.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
-     * and "-changelog" is a fixed suffix.
-     * Note that the internal store name may not be queriable through Interactive Queries.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param reducer   a {@link Reducer} that computes a new aggregate result
-     * @param windows   the specification of the aggregation {@link Windows}
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys, and values that represent
-     * the latest (rolling) aggregate for each key within a window
-     * @deprecated use {@link #windowedBy(Windows) windowedBy(windows)} followed by
-     * {@link TimeWindowedKStream#reduce(Reducer) reduce(reducer)}
-     */
-    @Deprecated
-    <W extends Window> KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
-                                                     final Windows<W> windows);
-
-    /**
-     * Combine the values of records in this stream by the grouped key and the defined windows.
-     * Records with {@code null} key or value are ignored.
-     * Combining implies that the type of the aggregate result is the same as the type of the input value
-     * (c.f. {@link #aggregate(Initializer, Aggregator, Windows, Serde, String)}).
-     * The specified {@code windows} define either hopping time windows that can be overlapping or tumbling (c.f.
-     * {@link TimeWindows}) or they define landmark windows (c.f. {@link UnlimitedWindows}).
-     * The result is written into a local windowed {@link KeyValueStore} (which is basically an ever-updating
-     * materialized view) provided by the given {@code storeSupplier}.
-     * Windows are retained until their retention time expires (c.f. {@link Windows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * The specified {@link Reducer} is applied for each input record and computes a new aggregate using the current
-     * aggregate (first argument) and the record's value (second argument):
-     * <pre>{@code
-     * // At the example of a Reducer<Long>
-     * new Reducer<Long>() {
-     *   public Long apply(Long aggValue, Long currValue) {
-     *     return aggValue + currValue;
-     *   }
-     * }
-     * }</pre>
-     * <p>
-     * If there is no current aggregate the {@link Reducer} is not applied and the new aggregate will be the record's
-     * value as-is.
-     * Thus, {@code reduce(Reducer, Windows, org.apache.kafka.streams.processor.StateStoreSupplier)} can be used to
-     * compute aggregate functions like sum, min, or max.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local windowed {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * Use {@link org.apache.kafka.streams.processor.StateStoreSupplier#name()} to get the store name:
-     * <pre>{@code
-     * KafkaStreams streams = ... // compute sum
-     * Sting queryableStoreName = storeSupplier.name();
-     * ReadOnlyWindowStore<String,Long> localWindowStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>windowStore());
-     * String key = "some-key";
-     * long fromTime = ...;
-     * long toTime = ...;
-     * WindowStoreIterator<Long> sumForKeyForWindows = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     *
-     * @param reducer       a {@link Reducer} that computes a new aggregate result. Cannot be {@code null}.
-     * @param windows       the specification of the aggregation {@link Windows}
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys, and values that represent
-     * the latest (rolling) aggregate for each key within a window
-     * @deprecated use {@link #windowedBy(Windows) windowedBy(windows)} followed by
-     * {@link TimeWindowedKStream#reduce(Reducer, Materialized) reduce(reducer, Materialized.as(KeyValueByteStoreSupplier))}
-     */
-    @Deprecated
-    <W extends Window> KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
-                                                     final Windows<W> windows,
-                                                     final org.apache.kafka.streams.processor.StateStoreSupplier<WindowStore> storeSupplier);
-
-    /**
-     * Combine values of this stream by the grouped key into {@link SessionWindows}.
-     * Records with {@code null} key or value are ignored.
-     * Combining implies that the type of the aggregate result is the same as the type of the input value
-     * (c.f. {@link #aggregate(Initializer, Aggregator, Merger, SessionWindows, Serde, String)}).
-     * The result is written into a local {@link SessionStore} (which is basically an ever-updating
-     * materialized view) that can be queried using the provided {@code queryableStoreName}.
-     * SessionWindows are retained until their retention time expires (c.f. {@link SessionWindows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * The specified {@link Reducer} is applied for each input record and computes a new aggregate using the current
-     * aggregate (first argument) and the record's value (second argument):
-     * <pre>{@code
-     * // At the example of a Reducer<Long>
-     * new Reducer<Long>() {
-     *   public Long apply(Long aggValue, Long currValue) {
-     *     return aggValue + currValue;
-     *   }
-     * }
-     * }</pre>
-     * <p>
-     * If there is no current aggregate the {@link Reducer} is not applied and the new aggregate will be the record's
-     * value as-is.
-     * Thus, {@code reduce(Reducer, SessionWindows, String)} can be used to compute aggregate functions like sum, min,
-     * or max.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local windowed {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * <pre>{@code
-     * KafkaStreams streams = ... // compute sum
-     * ReadOnlySessionStore<String,Long> localWindowStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>ReadOnlySessionStore<String, Long>);
-     * String key = "some-key";
-     * KeyValueIterator<Windowed<String>, Long> sumForKeyForWindows = localWindowStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * Therefore, the store name must be a valid Kafka topic name and cannot contain characters other than ASCII
-     * alphanumerics, '.', '_' and '-'.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     * @param reducer           a {@link Reducer} that computes a new aggregate result. Cannot be {@code null}.
-     * @param sessionWindows    the specification of the aggregation {@link SessionWindows}
-     * @param queryableStoreName     the name of the state store created from this operation; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} then this will be equivalent to {@link KGroupedStream#reduce(Reducer, SessionWindows)}.
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys, and values that represent
-     * the latest (rolling) aggregate for each key within a window
-     * @deprecated use {@link #windowedBy(SessionWindows) windowedBy(sessionWindows)} followed by
-     * {@link SessionWindowedKStream#reduce(Reducer, Materialized) reduce(reducer, Materialized.as(queryableStoreName))}
-     */
-    @Deprecated
-    KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
-                                  final SessionWindows sessionWindows,
-                                  final String queryableStoreName);
-
-    /**
-     * Combine values of this stream by the grouped key into {@link SessionWindows}.
-     * Records with {@code null} key or value are ignored.
-     * Combining implies that the type of the aggregate result is the same as the type of the input value
-     * (c.f. {@link #aggregate(Initializer, Aggregator, Merger, SessionWindows, Serde, String)}).
-     * The result is written into a local {@link SessionStore} (which is basically an ever-updating
-     * materialized view) that can be queried using the provided {@code queryableStoreName}.
-     * SessionWindows are retained until their retention time expires (c.f. {@link SessionWindows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * The specified {@link Reducer} is applied for each input record and computes a new aggregate using the current
-     * aggregate and the record's value.
-     * If there is no current aggregate the {@link Reducer} is not applied and the new aggregate will be the record's
-     * value as-is.
-     * Thus, {@code reduce(Reducer, SessionWindows, String)} can be used to compute aggregate functions like sum, min,
-     * or max.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * @param reducer           a {@link Reducer} that computes a new aggregate result. Cannot be {@code null}.
-     * @param sessionWindows    the specification of the aggregation {@link SessionWindows}
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys, and values that represent
-     * the latest (rolling) aggregate for each key within a window
-     * @deprecated use {@link #windowedBy(SessionWindows) windowedBy(sessionWindows)} followed by
-     * {@link SessionWindowedKStream#reduce(Reducer) reduce(reducer)}
-     */
-    @Deprecated
-    KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
-                                  final SessionWindows sessionWindows);
-
-    /**
-     * Combine values of this stream by the grouped key into {@link SessionWindows}.
-     * Records with {@code null} key or value are ignored.
-     * Combining implies that the type of the aggregate result is the same as the type of the input value
-     * (c.f. {@link #aggregate(Initializer, Aggregator, Merger, SessionWindows, Serde, String)}).
-     * The result is written into a local {@link SessionStore} (which is basically an ever-updating materialized view)
-     * provided by the given {@code storeSupplier}.
-     * SessionWindows are retained until their retention time expires (c.f. {@link SessionWindows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * The specified {@link Reducer} is applied for each input record and computes a new aggregate using the current
-     * aggregate (first argument) and the record's value (second argument):
-     * <pre>{@code
-     * // At the example of a Reducer<Long>
-     * new Reducer<Long>() {
-     *   public Long apply(Long aggValue, Long currValue) {
-     *     return aggValue + currValue;
-     *   }
-     * }
-     * }</pre>
-     * <p>
-     * If there is no current aggregate the {@link Reducer} is not applied and the new aggregate will be the record's
-     * value as-is.
-     * Thus, {@code reduce(Reducer, SessionWindows, org.apache.kafka.streams.processor.StateStoreSupplier)} can be used
-     * to compute aggregate functions like sum, min, or max.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local windowed {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * Use {@link org.apache.kafka.streams.processor.StateStoreSupplier#name()} to get the store name:
-     * <pre>{@code
-     * KafkaStreams streams = ... // compute sum
-     * Sting queryableStoreName = storeSupplier.name();
-     * ReadOnlySessionStore<String,Long> localWindowStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>ReadOnlySessionStore<String, Long>);
-     * String key = "some-key";
-     * KeyValueIterator<Windowed<String>, Long> sumForKeyForWindows = localWindowStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * Therefore, the store name must be a valid Kafka topic name and cannot contain characters other than ASCII
-     * alphanumerics, '.', '_' and '-'.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     * @param reducer           a {@link Reducer} that computes a new aggregate result. Cannot be {@code null}.
-     * @param sessionWindows    the specification of the aggregation {@link SessionWindows}
-     * @param storeSupplier     user defined state store supplier. Cannot be {@code null}.
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys, and values that represent
-     * the latest (rolling) aggregate for each key within a window
-     * @deprecated use {@link #windowedBy(SessionWindows) windowedBy(sessionWindows)} followed by
-     * {@link SessionWindowedKStream#reduce(Reducer, Materialized) reduce(reducer, Materialized.as(KeyValueByteStoreSupplier))}
-     */
-    @Deprecated
-    KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
-                                  final SessionWindows sessionWindows,
-                                  final org.apache.kafka.streams.processor.StateStoreSupplier<SessionStore> storeSupplier);
-
-
-    /**
-     * Aggregate the values of records in this stream by the grouped key.
-     * Records with {@code null} key or value are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer, String) combining via reduce(...)} as it, for example,
-     * allows the result to have a different type than the input values.
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * that can be queried using the provided {@code queryableStoreName}.
+     * that can be queried by the given store name in {@code materialized}.
      * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
      * <p>
      * The specified {@link Initializer} is applied once directly before the first input record is processed to
@@ -988,7 +272,7 @@ public interface KGroupedStream<K, V> {
      * aggregate (or for the very first record using the intermediate aggregation result provided via the
      * {@link Initializer}) and the record's value.
      * Thus, {@code aggregate(Initializer, Aggregator, Serde, String)} can be used to compute aggregate functions like
-     * count (c.f. {@link #count(String)}).
+     * count (c.f. {@link #count()}).
      * <p>
      * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
      * the same key.
@@ -1001,86 +285,28 @@ public interface KGroupedStream<K, V> {
      * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
      * <pre>{@code
      * KafkaStreams streams = ... // some aggregation on value type double
+     * String queryableStoreName = "storeName" // the store name should be the name of the store as defined by the Materialized instance
      * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
      * String key = "some-key";
      * Long aggForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
      * }</pre>
      * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
      * query the value of the key on a parallel running instance of your Kafka Streams application.
+     *
      * <p>
      * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * Therefore, the store name must be a valid Kafka topic name and cannot contain characters other than ASCII
+     * Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII
      * alphanumerics, '.', '_' and '-'.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
+     * The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is
      * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is the
+     * provide store name defined in {@code Materialized}, and "-changelog" is a fixed suffix.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param initializer   an {@link Initializer} that computes an initial intermediate aggregation result
      * @param aggregator    an {@link Aggregator} that computes a new aggregate result
-     * @param aggValueSerde aggregate value serdes for materializing the aggregated table,
-     *                      if not specified the default serdes defined in the configs will be used
-     * @param queryableStoreName the name of the state store created from this operation; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} then this will be equivalent to {@link KGroupedStream#aggregate(Initializer, Aggregator, Serde)}.
-     * @param <VR>          the value type of the resulting {@link KTable}
-     * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
-     * latest (rolling) aggregate for each key
-     * @deprecated use {@link #aggregate(Initializer, Aggregator, Materialized) aggregate(initializer, aggregator, Materialized.as(queryableStoreName).withValueSerde(aggValueSerde))}
-     */
-    @Deprecated
-    <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
-                                 final Aggregator<? super K, ? super V, VR> aggregator,
-                                 final Serde<VR> aggValueSerde,
-                                 final String queryableStoreName);
-
-    /**
-     * Aggregate the values of records in this stream by the grouped key.
-     * Records with {@code null} key or value are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer) combining via reduce(...)} as it, for example,
-     * allows the result to have a different type than the input values.
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * that can be queried using the provided {@code queryableStoreName}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * The specified {@link Initializer} is applied once directly before the first input record is processed to
-     * provide an initial intermediate aggregation result that is used to process the first record.
-     * The specified {@link Aggregator} is applied for each input record and computes a new aggregate using the current
-     * aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value.
-     * Thus, {@code aggregate(Initializer, Aggregator, Serde, String)} can be used to compute aggregate functions like
-     * count (c.f. {@link #count()}).
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // some aggregation on value type double
-     * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
-     * String key = "some-key";
-     * Long aggForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * Therefore, the store name must be a valid Kafka topic name and cannot contain characters other than ASCII
-     * alphanumerics, '.', '_' and '-'.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param initializer   an {@link Initializer} that computes an initial intermediate aggregation result
-     * @param aggregator    an {@link Aggregator} that computes a new aggregate result
-     * @param materialized  an instance of {@link Materialized} used to materialize a state store. Cannot be {@code null}.
+     * @param materialized  an instance of {@link Materialized} used to materialize a state store. Cannot be {@code null}.
      * @param <VR>          the value type of the resulting {@link KTable}
      * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
      * latest (rolling) aggregate for each key
@@ -1089,496 +315,6 @@ public interface KGroupedStream<K, V> {
                                  final Aggregator<? super K, ? super V, VR> aggregator,
                                  final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized);
 
-
-    /**
-     * Aggregate the values of records in this stream by the grouped key.
-     * Records with {@code null} key or value are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer) combining via reduce(...)} as it, for example,
-     * allows the result to have a different type than the input values.
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * that can be queried using the provided {@code queryableStoreName}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * The specified {@link Initializer} is applied once directly before the first input record is processed to
-     * provide an initial intermediate aggregation result that is used to process the first record.
-     * The specified {@link Aggregator} is applied for each input record and computes a new aggregate using the current
-     * aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value.
-     * Thus, {@code aggregate(Initializer, Aggregator)} can be used to compute aggregate functions like
-     * count (c.f. {@link #count()}).
-     * <p>
-     * The default value serde from config will be used for serializing the result.
-     * If a different serde is required then you should use {@link #aggregate(Initializer, Aggregator, Materialized)}.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
-     * and "-changelog" is a fixed suffix.
-     * Note that the internal store name may not be queriable through Interactive Queries.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param initializer   an {@link Initializer} that computes an initial intermediate aggregation result
-     * @param aggregator    an {@link Aggregator} that computes a new aggregate result
-     * @param <VR>          the value type of the resulting {@link KTable}
-     * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
-     * latest (rolling) aggregate for each key
-     */
-    <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
-                                 final Aggregator<? super K, ? super V, VR> aggregator);
-
-    /**
-     * Aggregate the values of records in this stream by the grouped key.
-     * Records with {@code null} key or value are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer) combining via reduce(...)} as it, for example,
-     * allows the result to have a different type than the input values.
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * that can be queried using the provided {@code queryableStoreName}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * The specified {@link Initializer} is applied once directly before the first input record is processed to
-     * provide an initial intermediate aggregation result that is used to process the first record.
-     * The specified {@link Aggregator} is applied for each input record and computes a new aggregate using the current
-     * aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value.
-     * Thus, {@code aggregate(Initializer, Aggregator, Serde, String)} can be used to compute aggregate functions like
-     * count (c.f. {@link #count()}).
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
-     * and "-changelog" is a fixed suffix.
-     * Note that the internal store name may not be queriable through Interactive Queries.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param initializer   an {@link Initializer} that computes an initial intermediate aggregation result
-     * @param aggregator    an {@link Aggregator} that computes a new aggregate result
-     * @param aggValueSerde aggregate value serdes for materializing the aggregated table,
-     *                      if not specified the default serdes defined in the configs will be used
-     * @param <VR>          the value type of the resulting {@link KTable}
-     * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
-     * latest (rolling) aggregate for each key
-     * @deprecated use {@link #aggregate(Initializer, Aggregator, Materialized) aggregate(initializer, aggregator, Materialized.with(null, aggValueSerde))}
-     */
-    @Deprecated
-    <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
-                                 final Aggregator<? super K, ? super V, VR> aggregator,
-                                 final Serde<VR> aggValueSerde);
-
-    /**
-     * Aggregate the values of records in this stream by the grouped key.
-     * Records with {@code null} key or value are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer, org.apache.kafka.streams.processor.StateStoreSupplier)
-     * combining via reduce(...)} as it, for example, allows the result to have a different type than the input values.
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * provided by the given {@code storeSupplier}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * The specified {@link Initializer} is applied once directly before the first input record is processed to
-     * provide an initial intermediate aggregation result that is used to process the first record.
-     * The specified {@link Aggregator} is applied for each input record and computes a new aggregate using the current
-     * aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value.
-     * Thus, {@code aggregate(Initializer, Aggregator, org.apache.kafka.streams.processor.StateStoreSupplier)} can be
-     * used to compute aggregate functions like count (c.f. {@link #count()}).
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * Use {@link org.apache.kafka.streams.processor.StateStoreSupplier#name()} to get the store name:
-     * <pre>{@code
-     * KafkaStreams streams = ... // some aggregation on value type double
-     * Sting queryableStoreName = storeSupplier.name();
-     * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
-     * String key = "some-key";
-     * Long aggForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     *
-     * @param initializer   an {@link Initializer} that computes an initial intermediate aggregation result
-     * @param aggregator    an {@link Aggregator} that computes a new aggregate result
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
-     * @param <VR>          the value type of the resulting {@link KTable}
-     * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
-     * latest (rolling) aggregate for each key
-     * @deprecated use {@link #aggregate(Initializer, Aggregator, Materialized) aggregate(initializer, aggregator, Materialized.as(KeyValueByteStoreSupplier))}
-     */
-    @Deprecated
-    <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
-                                 final Aggregator<? super K, ? super V, VR> aggregator,
-                                 final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier);
-
-    /**
-     * Aggregate the values of records in this stream by the grouped key and defined windows.
-     * Records with {@code null} key or value are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer, Windows, String) combining via reduce(...)} as it,
-     * for example, allows the result to have a different type than the input values.
-     * The specified {@code windows} define either hopping time windows that can be overlapping or tumbling (c.f.
-     * {@link TimeWindows}) or they define landmark windows (c.f. {@link UnlimitedWindows}).
-     * The result is written into a local windowed {@link KeyValueStore} (which is basically an ever-updating
-     * materialized view) that can be queried using the provided {@code queryableStoreName}.
-     * Windows are retained until their retention time expires (c.f. {@link Windows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * The specified {@link Initializer} is applied once per window directly before the first input record is
-     * processed to provide an initial intermediate aggregation result that is used to process the first record.
-     * The specified {@link Aggregator} is applied for each input record and computes a new aggregate using the current
-     * aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value.
-     * Thus, {@code aggregate(Initializer, Aggregator, Windows, Serde, String)} can be used to compute aggregate
-     * functions like count (c.f. {@link #count(Windows)}).
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local windowed {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // some windowed aggregation on value type double
-     * ReadOnlyWindowStore<String,Long> localWindowStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>windowStore());
-     * String key = "some-key";
-     * long fromTime = ...;
-     * long toTime = ...;
-     * WindowStoreIterator<Long> aggForKeyForWindows = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * Therefore, the store name must be a valid Kafka topic name and cannot contain characters other than ASCII
-     * alphanumerics, '.', '_' and '-'.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     *
-     * @param initializer   an {@link Initializer} that computes an initial intermediate aggregation result
-     * @param aggregator    an {@link Aggregator} that computes a new aggregate result
-     * @param windows       the specification of the aggregation {@link Windows}
-     * @param aggValueSerde aggregate value serdes for materializing the aggregated table,
-     *                      if not specified the default serdes defined in the configs will be used
-     * @param <VR>          the value type of the resulting {@link KTable}
-     * @param queryableStoreName the name of the state store created from this operation; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} then this will be equivalent to {@link KGroupedStream#aggregate(Initializer, Aggregator, Windows, Serde)}.
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys, and values that represent
-     * the latest (rolling) aggregate for each key within a window
-     * @deprecated use {@link #windowedBy(Windows) windowedBy(windows)} followed by
-     * {@link TimeWindowedKStream#aggregate(Initializer, Aggregator, Materialized) aggregate(initializer, aggregator, Materialized.as(queryableStoreName).withValueSerde(aggValueSerde))}
-     */
-    @Deprecated
-    <W extends Window, VR> KTable<Windowed<K>, VR> aggregate(final Initializer<VR> initializer,
-                                                             final Aggregator<? super K, ? super V, VR> aggregator,
-                                                             final Windows<W> windows,
-                                                             final Serde<VR> aggValueSerde,
-                                                             final String queryableStoreName);
-
-    /**
-     * Aggregate the values of records in this stream by the grouped key and defined windows.
-     * Records with {@code null} key or value are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer, Windows, String) combining via reduce(...)} as it,
-     * for example, allows the result to have a different type than the input values.
-     * The specified {@code windows} define either hopping time windows that can be overlapping or tumbling (c.f.
-     * {@link TimeWindows}) or they define landmark windows (c.f. {@link UnlimitedWindows}).
-     * The result is written into a local windowed {@link KeyValueStore} (which is basically an ever-updating
-     * materialized view) that can be queried using the provided {@code queryableStoreName}.
-     * Windows are retained until their retention time expires (c.f. {@link Windows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * The specified {@link Initializer} is applied once per window directly before the first input record is
-     * processed to provide an initial intermediate aggregation result that is used to process the first record.
-     * The specified {@link Aggregator} is applied for each input record and computes a new aggregate using the current
-     * aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value.
-     * Thus, {@code aggregate(Initializer, Aggregator, Windows, Serde, String)} can be used to compute aggregate
-     * functions like count (c.f. {@link #count(Windows)}).
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
-     * and "-changelog" is a fixed suffix.
-     * Note that the internal store name may not be queriable through Interactive Queries.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param initializer   an {@link Initializer} that computes an initial intermediate aggregation result
-     * @param aggregator    an {@link Aggregator} that computes a new aggregate result
-     * @param windows       the specification of the aggregation {@link Windows}
-     * @param aggValueSerde aggregate value serdes for materializing the aggregated table,
-     *                      if not specified the default serdes defined in the configs will be used
-     * @param <VR>          the value type of the resulting {@link KTable}
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys, and values that represent
-     * the latest (rolling) aggregate for each key within a window
-     * @deprecated use {@link #windowedBy(Windows) windowedBy(windows)} followed by
-     * {@link TimeWindowedKStream#aggregate(Initializer, Aggregator, Materialized)} aggregate(initializer, aggregator, Materialized.with(null, aggValueSerde))}
-     */
-    @Deprecated
-    <W extends Window, VR> KTable<Windowed<K>, VR> aggregate(final Initializer<VR> initializer,
-                                                             final Aggregator<? super K, ? super V, VR> aggregator,
-                                                             final Windows<W> windows,
-                                                             final Serde<VR> aggValueSerde);
-
-    /**
-     * Aggregate the values of records in this stream by the grouped key and defined windows.
-     * Records with {@code null} key or value are ignored.
-     * Aggregating is a generalization of
-     * {@link #reduce(Reducer, Windows, org.apache.kafka.streams.processor.StateStoreSupplier) combining via reduce(...)}
-     * as it, for example, allows the result to have a different type than the input values.
-     * The specified {@code windows} define either hopping time windows that can be overlapping or tumbling (c.f.
-     * {@link TimeWindows}) or they define landmark windows (c.f. {@link UnlimitedWindows}).
-     * The result is written into a local windowed {@link KeyValueStore} (which is basically an ever-updating
-     * materialized view) provided by the given {@code storeSupplier}.
-     * Windows are retained until their retention time expires (c.f. {@link Windows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * The specified {@link Initializer} is applied once per window directly before the first input record is
-     * processed to provide an initial intermediate aggregation result that is used to process the first record.
-     * The specified {@link Aggregator} is applied for each input record and computes a new aggregate using the current
-     * aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value.
-     * Thus, {@code aggregate(Initializer, Aggregator, Windows, org.apache.kafka.streams.processor.StateStoreSupplier)}
-     * can be used to compute aggregate functions like count (c.f. {@link #count(Windows)}).
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local windowed {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * Use {@link org.apache.kafka.streams.processor.StateStoreSupplier#name()} to get the store name:
-     * <pre>{@code
-     * KafkaStreams streams = ... // some windowed aggregation on value type Long
-     * Sting queryableStoreName = storeSupplier.name();
-     * ReadOnlyWindowStore<String,Long> localWindowStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>windowStore());
-     * String key = "some-key";
-     * long fromTime = ...;
-     * long toTime = ...;
-     * WindowStoreIterator<Long> aggForKeyForWindows = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     *
-     * @param initializer   an {@link Initializer} that computes an initial intermediate aggregation result
-     * @param aggregator    an {@link Aggregator} that computes a new aggregate result
-     * @param windows       the specification of the aggregation {@link Windows}
-     * @param <VR>          the value type of the resulting {@link KTable}
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys, and values that represent
-     * the latest (rolling) aggregate for each key within a window
-     * @deprecated use {@link #windowedBy(Windows) windowedBy(windows)} followed by
-     * {@link TimeWindowedKStream#aggregate(Initializer, Aggregator, Materialized) aggregate(initializer, aggregator, Materialized.as(KeyValueByteStoreSupplier))}
-     */
-    @Deprecated
-    <W extends Window, VR> KTable<Windowed<K>, VR> aggregate(final Initializer<VR> initializer,
-                                                             final Aggregator<? super K, ? super V, VR> aggregator,
-                                                             final Windows<W> windows,
-                                                             final org.apache.kafka.streams.processor.StateStoreSupplier<WindowStore> storeSupplier);
-
-    /**
-     * Aggregate the values of records in this stream by the grouped key and defined {@link SessionWindows}.
-     * Records with {@code null} key or value are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer, SessionWindows, String) combining via
-     * reduce(...)} as it, for example, allows the result to have a different type than the input values.
-     * The result is written into a local {@link SessionStore} (which is basically an ever-updating
-     * materialized view) that can be queried using the provided {@code queryableStoreName}.
-     * SessionWindows are retained until their retention time expires (c.f. {@link SessionWindows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * The specified {@link Initializer} is applied once per session directly before the first input record is
-     * processed to provide an initial intermediate aggregation result that is used to process the first record.
-     * The specified {@link Aggregator} is applied for each input record and computes a new aggregate using the current
-     * aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value.
-     * Thus, {@code aggregate(Initializer, Aggregator, Merger, SessionWindows, Serde, String)} can be used to compute
-     * aggregate functions like count (c.f. {@link #count(SessionWindows)})
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local {@link SessionStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * <pre>{@code
-     * KafkaStreams streams = ... // some windowed aggregation on value type double
-     * ReadOnlySessionStore<String, Long> sessionStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>sessionStore());
-     * String key = "some-key";
-     * KeyValueIterator<Windowed<String>, Long> aggForKeyForSession = localWindowStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     *
-     * @param initializer    the instance of {@link Initializer}
-     * @param aggregator     the instance of {@link Aggregator}
-     * @param sessionMerger  the instance of {@link Merger}
-     * @param sessionWindows the specification of the aggregation {@link SessionWindows}
-     * @param aggValueSerde aggregate value serdes for materializing the aggregated table,
-     *                      if not specified the default serdes defined in the configs will be used
-     * @param <T>           the value type of the resulting {@link KTable}
-     * @param queryableStoreName the name of the state store created from this operation; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} then this will be equivalent to {@link KGroupedStream#aggregate(Initializer, Aggregator, Merger, SessionWindows, Serde)}.
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys, and values that represent
-     * the latest (rolling) aggregate for each key within a window
-     * @deprecated use {@link #windowedBy(SessionWindows) windowedBy(sessionWindows)} followed by
-     * {@link SessionWindowedKStream#aggregate(Initializer, Aggregator, Merger, Materialized) aggregate(initializer, aggregator, sessionMerger, Materialized.as(queryableStoreName).withValueSerde(aggValueSerde))}
-     */
-    @Deprecated
-    <T> KTable<Windowed<K>, T> aggregate(final Initializer<T> initializer,
-                                         final Aggregator<? super K, ? super V, T> aggregator,
-                                         final Merger<? super K, T> sessionMerger,
-                                         final SessionWindows sessionWindows,
-                                         final Serde<T> aggValueSerde,
-                                         final String queryableStoreName);
-
-    /**
-     * Aggregate the values of records in this stream by the grouped key and defined {@link SessionWindows}.
-     * Records with {@code null} key or value are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer, SessionWindows, String) combining via
-     * reduce(...)} as it, for example, allows the result to have a different type than the input values.
-     * The result is written into a local {@link SessionStore} (which is basically an ever-updating
-     * materialized view) that can be queried using the provided {@code queryableStoreName}.
-     * SessionWindows are retained until their retention time expires (c.f. {@link SessionWindows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * The specified {@link Initializer} is applied once per session directly before the first input record is
-     * processed to provide an initial intermediate aggregation result that is used to process the first record.
-     * The specified {@link Aggregator} is applied for each input record and computes a new aggregate using the current
-     * aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value.
-     * Thus, {@code aggregate(Initializer, Aggregator, Merger, SessionWindows, Serde, String)} can be used to compute
-     * aggregate functions like count (c.f. {@link #count(SessionWindows)})
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * @param initializer    the instance of {@link Initializer}
-     * @param aggregator     the instance of {@link Aggregator}
-     * @param sessionMerger  the instance of {@link Merger}
-     * @param sessionWindows the specification of the aggregation {@link SessionWindows}
-     * @param aggValueSerde aggregate value serdes for materializing the aggregated table,
-     *                      if not specified the default serdes defined in the configs will be used
-     * @param <T>           the value type of the resulting {@link KTable}
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys, and values that represent
-     * the latest (rolling) aggregate for each key within a window
-     * @deprecated use {@link #windowedBy(SessionWindows) windowedBy(sessionWindows)} followed by
-     * {@link SessionWindowedKStream#aggregate(Initializer, Aggregator, Merger, Materialized) aggregate(initializer, aggregator, sessionMerger, Materialized.with(null, aggValueSerde))}
-     */
-    @Deprecated
-    <T> KTable<Windowed<K>, T> aggregate(final Initializer<T> initializer,
-                                         final Aggregator<? super K, ? super V, T> aggregator,
-                                         final Merger<? super K, T> sessionMerger,
-                                         final SessionWindows sessionWindows,
-                                         final Serde<T> aggValueSerde);
-
-    /**
-     * Aggregate the values of records in this stream by the grouped key and defined {@link SessionWindows}.
-     * Records with {@code null} key or value are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer, SessionWindows, String) combining via
-     * reduce(...)} as it, for example, allows the result to have a different type than the input values.
-     * The result is written into a local {@link SessionStore} (which is basically an ever-updating materialized view)
-     * provided by the given {@code storeSupplier}.
-     * SessionWindows are retained until their retention time expires (c.f. {@link SessionWindows#until(long)}).
-     * Furthermore, updates to the store are sent downstream into a windowed {@link KTable} changelog stream, where
-     * "windowed" implies that the {@link KTable} key is a combined key of the original record key and a window ID.
-     * <p>
-     * The specified {@link Initializer} is applied once per session directly before the first input record is
-     * processed to provide an initial intermediate aggregation result that is used to process the first record.
-     * The specified {@link Aggregator} is applied for each input record and computes a new aggregate using the current
-     * aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value.
-     * Thus, {@code #aggregate(Initializer, Aggregator, Merger, SessionWindows, Serde, org.apache.kafka.streams.processor.StateStoreSupplier)}
-     * can be used to compute aggregate functions like count (c.f. {@link #count(SessionWindows)}).
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same window and key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local {@link SessionStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}.
-     * Use {@link org.apache.kafka.streams.processor.StateStoreSupplier#name()} to get the store name:
-     * <pre>{@code
-     * KafkaStreams streams = ... // some windowed aggregation on value type double
-     * Sting queryableStoreName = storeSupplier.name();
-     * ReadOnlySessionStore<String, Long> sessionStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>sessionStore());
-     * String key = "some-key";
-     * KeyValueIterator<Windowed<String>, Long> aggForKeyForSession = localWindowStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     *
-     *
-     * @param initializer    the instance of {@link Initializer}
-     * @param aggregator     the instance of {@link Aggregator}
-     * @param sessionMerger  the instance of {@link Merger}
-     * @param sessionWindows the specification of the aggregation {@link SessionWindows}
-     * @param aggValueSerde  aggregate value serdes for materializing the aggregated table,
-     *                       if not specified the default serdes defined in the configs will be used
-     * @param storeSupplier  user defined state store supplier. Cannot be {@code null}.
-     * @param <T>           the value type of the resulting {@link KTable}
-     * @return a windowed {@link KTable} that contains "update" records with unmodified keys, and values that represent
-     * the latest (rolling) aggregate for each key within a window
-     * @deprecated use {@link #windowedBy(SessionWindows) windowedBy(sessionWindows)} followed by
-     * {@link SessionWindowedKStream#aggregate(Initializer, Aggregator, Merger, Materialized) aggregate(initializer, aggregator, sessionMerger, Materialized.as(KeyValueByteStoreSupplier).withValueSerde(aggValueSerde))}
-     */
-    @Deprecated
-    <T> KTable<Windowed<K>, T> aggregate(final Initializer<T> initializer,
-                                         final Aggregator<? super K, ? super V, T> aggregator,
-                                         final Merger<? super K, T> sessionMerger,
-                                         final SessionWindows sessionWindows,
-                                         final Serde<T> aggValueSerde,
-                                         final org.apache.kafka.streams.processor.StateStoreSupplier<SessionStore> storeSupplier);
-
     /**
      * Create a new {@link TimeWindowedKStream} instance that can be used to perform windowed aggregations.
      * @param windows the specification of the aggregation {@link Windows}
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/KGroupedTable.java b/streams/src/main/java/org/apache/kafka/streams/kstream/KGroupedTable.java
index 1916f16..0e26336 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/KGroupedTable.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/KGroupedTable.java
@@ -17,10 +17,10 @@
 package org.apache.kafka.streams.kstream;
 
 import org.apache.kafka.common.annotation.InterfaceStability;
-import org.apache.kafka.common.serialization.Serde;
 import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.streams.KafkaStreams;
 import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.Topology;
 import org.apache.kafka.streams.state.KeyValueStore;
 import org.apache.kafka.streams.state.QueryableStoreType;
 
@@ -66,59 +66,17 @@ public interface KGroupedTable<K, V> {
      * }</pre>
      * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
      * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * The store name must be a valid Kafka topic name and cannot contain characters other than ASCII alphanumerics,
-     * '.', '_' and '-'.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
      *
-     * @param queryableStoreName     the name of the underlying {@link KTable} state store; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} this is the equivalent of {@link KGroupedTable#count()}.
-     * @return a {@link KTable} that contains "update" records with unmodified keys and {@link Long} values that
-     * represent the latest (rolling) count (i.e., number of records) for each key
-     * @deprecated use {@link #count(Materialized) count(Materialized.as(queryableStoreName))}
-     */
-    @Deprecated
-    KTable<K, Long> count(final String queryableStoreName);
-
-    /**
-     * Count number of records of the original {@link KTable} that got {@link KTable#groupBy(KeyValueMapper) mapped} to
-     * the same key into a new instance of {@link KTable}.
-     * Records with {@code null} key are ignored.
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * that can be queried using the provided {@code queryableStoreName}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // counting words
-     * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
-     * String key = "some-word";
-     * Long countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
      * <p>
      * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
+     * Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII
+     * alphanumerics, '.', '_' and '-'.
+     * The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is
      * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * The store name must be a valid Kafka topic name and cannot contain characters other than ASCII alphanumerics,
-     * '.', '_' and '-'.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is the
+     * provide store name defined in {@code Materialized}, and "-changelog" is a fixed suffix.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param materialized the instance of {@link Materialized} used to materialize the state store. Cannot be {@code null}
      * @return a {@link KTable} that contains "update" records with unmodified keys and {@link Long} values that
@@ -147,128 +105,13 @@ public interface KGroupedTable<K, V> {
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
      * and "-changelog" is a fixed suffix.
      * Note that the internal store name may not be queriable through Interactive Queries.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
      *
-     * @return a {@link KTable} that contains "update" records with unmodified keys and {@link Long} values that
-     * represent the latest (rolling) count (i.e., number of records) for each key
-     */
-    KTable<K, Long> count();
-
-    /**
-     * Count number of records of the original {@link KTable} that got {@link KTable#groupBy(KeyValueMapper) mapped} to
-     * the same key into a new instance of {@link KTable}.
-     * Records with {@code null} key are ignored.
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * provided by the given {@code storeSupplier}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // counting words
-     * String queryableStoreName = storeSupplier.name();
-     * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
-     * String key = "some-word";
-     * Long countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
      * @return a {@link KTable} that contains "update" records with unmodified keys and {@link Long} values that
      * represent the latest (rolling) count (i.e., number of records) for each key
-     * @deprecated use {@link #count(Materialized) count(Materialized.as(KeyValueByteStoreSupplier)}
      */
-    @Deprecated
-    KTable<K, Long> count(final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier);
-
-    /**
-     * Combine the value of records of the original {@link KTable} that got {@link KTable#groupBy(KeyValueMapper)
-     * mapped} to the same key into a new instance of {@link KTable}.
-     * Records with {@code null} key are ignored.
-     * Combining implies that the type of the aggregate result is the same as the type of the input value
-     * (c.f. {@link #aggregate(Initializer, Aggregator, Aggregator, Serde, String)}).
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * that can be queried using the provided {@code queryableStoreName}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * Each update to the original {@link KTable} results in a two step update of the result {@link KTable}.
-     * The specified {@link Reducer adder} is applied for each update record and computes a new aggregate using the
-     * current aggregate (first argument) and the record's value (second argument) by adding the new record to the
-     * aggregate.
-     * The specified {@link Reducer subtractor} is applied for each "replaced" record of the original {@link KTable}
-     * and computes a new aggregate using the current aggregate (first argument) and the record's value (second
-     * argument) by "removing" the "replaced" record from the aggregate.
-     * If there is no current aggregate the {@link Reducer} is not applied and the new aggregate will be the record's
-     * value as-is.
-     * Thus, {@code reduce(Reducer, Reducer, String)} can be used to compute aggregate functions like sum.
-     * For sum, the adder and subtractor would work as follows:
-     * <pre>{@code
-     * public class SumAdder implements Reducer<Integer> {
-     *   public Integer apply(Integer currentAgg, Integer newValue) {
-     *     return currentAgg + newValue;
-     *   }
-     * }
-     *
-     * public class SumSubtractor implements Reducer<Integer> {
-     *   public Integer apply(Integer currentAgg, Integer oldValue) {
-     *     return currentAgg - oldValue;
-     *   }
-     * }
-     * }</pre>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // counting words
-     * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
-     * String key = "some-word";
-     * Long countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * The store name must be a valid Kafka topic name and cannot contain characters other than ASCII alphanumerics,
-     * '.', '_' and '-'.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param adder      a {@link Reducer} that adds a new value to the aggregate result
-     * @param subtractor a {@link Reducer} that removed an old value from the aggregate result
-     * @param queryableStoreName     the name of the underlying {@link KTable} state store; valid characters are ASCII alphanumerics,
-     * '.', '_' and '-'. If {@code null} this is the equivalent of {@link KGroupedTable#reduce(Reducer, Reducer)} ()}.
-     * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
-     * latest (rolling) aggregate for each key
-     * @deprecated use {@link #reduce(Reducer, Reducer, Materialized) reduce(adder, subtractor, Materialized.as(queryableStoreName))}
-     */
-    @Deprecated
-    KTable<K, V> reduce(final Reducer<V> adder,
-                        final Reducer<V> subtractor,
-                        final String queryableStoreName);
+    KTable<K, Long> count();
 
     /**
      * Combine the value of records of the original {@link KTable} that got {@link KTable#groupBy(KeyValueMapper)
@@ -323,13 +166,14 @@ public interface KGroupedTable<K, V> {
      * query the value of the key on a parallel running instance of your Kafka Streams application.
      * <p>
      * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
+     * Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII
+     * alphanumerics, '.', '_' and '-'.
+     * The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is
      * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * The store name must be a valid Kafka topic name and cannot contain characters other than ASCII alphanumerics,
-     * '.', '_' and '-'.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is the
+     * provide store name defined in {@code Materialized}, and "-changelog" is a fixed suffix.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param adder         a {@link Reducer} that adds a new value to the aggregate result
      * @param subtractor    a {@link Reducer} that removed an old value from the aggregate result
@@ -386,7 +230,8 @@ public interface KGroupedTable<K, V> {
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
      * and "-changelog" is a fixed suffix.
      * Note that the internal store name may not be queriable through Interactive Queries.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param adder      a {@link Reducer} that adds a new value to the aggregate result
      * @param subtractor a {@link Reducer} that removed an old value from the aggregate result
@@ -397,164 +242,6 @@ public interface KGroupedTable<K, V> {
                         final Reducer<V> subtractor);
 
     /**
-     * Combine the value of records of the original {@link KTable} that got {@link KTable#groupBy(KeyValueMapper)
-     * mapped} to the same key into a new instance of {@link KTable}.
-     * Records with {@code null} key are ignored.
-     * Combining implies that the type of the aggregate result is the same as the type of the input value
-     * (c.f. {@link #aggregate(Initializer, Aggregator, Aggregator, Serde, String)}).
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * provided by the given {@code storeSupplier}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * Each update to the original {@link KTable} results in a two step update of the result {@link KTable}.
-     * The specified {@link Reducer adder} is applied for each update record and computes a new aggregate using the
-     * current aggregate (first argument) and the record's value (second argument) by adding the new record to the
-     * aggregate.
-     * The specified {@link Reducer subtractor} is applied for each "replaced" record of the original {@link KTable}
-     * and computes a new aggregate using the current aggregate (first argument) and the record's value (second
-     * argument) by "removing" the "replaced" record from the aggregate.
-     * If there is no current aggregate the {@link Reducer} is not applied and the new aggregate will be the record's
-     * value as-is.
-     * Thus, {@code reduce(Reducer, Reducer, String)} can be used to compute aggregate functions like sum.
-     * For sum, the adder and subtractor would work as follows:
-     * <pre>{@code
-     * public class SumAdder implements Reducer<Integer> {
-     *   public Integer apply(Integer currentAgg, Integer newValue) {
-     *     return currentAgg + newValue;
-     *   }
-     * }
-     *
-     * public class SumSubtractor implements Reducer<Integer> {
-     *   public Integer apply(Integer currentAgg, Integer oldValue) {
-     *     return currentAgg - oldValue;
-     *   }
-     * }
-     * }</pre>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // counting words
-     * String queryableStoreName = storeSupplier.name();
-     * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
-     * String key = "some-word";
-     * Long countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param adder         a {@link Reducer} that adds a new value to the aggregate result
-     * @param subtractor    a {@link Reducer} that removed an old value from the aggregate result
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
-     * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
-     * latest (rolling) aggregate for each key
-     * @deprecated use {@link #reduce(Reducer, Reducer, Materialized) reduce(adder, subtractor, Materialized.as(KeyValueByteStoreSupplier))}
-     */
-    @Deprecated
-    KTable<K, V> reduce(final Reducer<V> adder,
-                        final Reducer<V> subtractor,
-                        final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier);
-
-    /**
-     * Aggregate the value of records of the original {@link KTable} that got {@link KTable#groupBy(KeyValueMapper)
-     * mapped} to the same key into a new instance of {@link KTable} using default serializers and deserializers.
-     * Records with {@code null} key are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer, Reducer, String) combining via reduce(...)} as it,
-     * for example, allows the result to have a different type than the input values.
-     * If the result value type does not match the {@link StreamsConfig#DEFAULT_VALUE_SERDE_CLASS_CONFIG default value
-     * serde} you should use {@link KGroupedTable#aggregate(Initializer, Aggregator, Aggregator, Serde, String)
-     * aggregate(Initializer, Aggregator, Aggregator, Serde, String)}.
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * provided by the given {@code storeSupplier}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * The specified {@link Initializer} is applied once directly before the first input record is processed to
-     * provide an initial intermediate aggregation result that is used to process the first record.
-     * Each update to the original {@link KTable} results in a two step update of the result {@link KTable}.
-     * The specified {@link Aggregator adder} is applied for each update record and computes a new aggregate using the
-     * current aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value by adding the new record to the aggregate.
-     * The specified {@link Aggregator subtractor} is applied for each "replaced" record of the original {@link KTable}
-     * and computes a new aggregate using the current aggregate and the record's value by "removing" the "replaced"
-     * record from the aggregate.
-     * Thus, {@code aggregate(Initializer, Aggregator, Aggregator, String)} can be used to compute aggregate functions
-     * like sum.
-     * For sum, the initializer, adder, and subtractor would work as follows:
-     * <pre>{@code
-     * // in this example, LongSerde.class must be set as default value serde in StreamsConfig
-     * public class SumInitializer implements Initializer<Long> {
-     *   public Long apply() {
-     *     return 0L;
-     *   }
-     * }
-     *
-     * public class SumAdder implements Aggregator<String, Integer, Long> {
-     *   public Long apply(String key, Integer newValue, Long aggregate) {
-     *     return aggregate + newValue;
-     *   }
-     * }
-     *
-     * public class SumSubstractor implements Aggregator<String, Integer, Long> {
-     *   public Long apply(String key, Integer oldValue, Long aggregate) {
-     *     return aggregate - oldValue;
-     *   }
-     * }
-     * }</pre>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // counting words
-     * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
-     * String key = "some-word";
-     * Long countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param initializer a {@link Initializer} that provides an initial aggregate result value
-     * @param adder       a {@link Aggregator} that adds a new record to the aggregate result
-     * @param subtractor  a {@link Aggregator} that removed an old record from the aggregate result
-     * @param queryableStoreName   the name of the underlying {@link KTable} state store.
-     *                             If {@code null} this is the equivalent of {@link KGroupedTable#aggregate(Initializer, Aggregator, Aggregator)} ()}.
-     * @param <VR>        the value type of the aggregated {@link KTable}
-     * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
-     * latest (rolling) aggregate for each key
-     * @deprecated use {@link #aggregate(Initializer, Aggregator, Aggregator, Materialized) aggregate(initializer, adder, subtractor, Materialized.as(queryableStoreName))}
-     */
-    @Deprecated
-    <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
-                                 final Aggregator<? super K, ? super V, VR> adder,
-                                 final Aggregator<? super K, ? super V, VR> subtractor,
-                                 final String queryableStoreName);
-
-    /**
      * Aggregate the value of records of the original {@link KTable} that got {@link KTable#groupBy(KeyValueMapper)
      * mapped} to the same key into a new instance of {@link KTable} using default serializers and deserializers.
      * Records with {@code null} key are ignored.
@@ -615,11 +302,14 @@ public interface KGroupedTable<K, V> {
      * query the value of the key on a parallel running instance of your Kafka Streams application.
      * <p>
      * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
+     * Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII
+     * alphanumerics, '.', '_' and '-'.
+     * The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is
      * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is the
+     * provide store name defined in {@code Materialized}, and "-changelog" is a fixed suffix.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param initializer   an {@link Initializer} that provides an initial aggregate result value
      * @param adder         an {@link Aggregator} that adds a new record to the aggregate result
@@ -640,7 +330,7 @@ public interface KGroupedTable<K, V> {
      * Records with {@code null} key are ignored.
      * Aggregating is a generalization of {@link #reduce(Reducer, Reducer) combining via reduce(...)} as it,
      * for example, allows the result to have a different type than the input values.
-     * If the result value type does not match the {@link StreamsConfig#VALUE_SERDE_CLASS_CONFIG default value
+     * If the result value type does not match the {@link StreamsConfig#DEFAULT_VALUE_SERDE_CLASS_CONFIG default value
      * serde} you should use {@link #aggregate(Initializer, Aggregator, Aggregator, Materialized)}.
      * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
      * provided by the given {@code storeSupplier}.
@@ -690,7 +380,8 @@ public interface KGroupedTable<K, V> {
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
      * and "-changelog" is a fixed suffix.
      * Note that the internal store name may not be queriable through Interactive Queries.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param initializer a {@link Initializer} that provides an initial aggregate result value
      * @param adder       a {@link Aggregator} that adds a new record to the aggregate result
@@ -703,247 +394,4 @@ public interface KGroupedTable<K, V> {
                                  final Aggregator<? super K, ? super V, VR> adder,
                                  final Aggregator<? super K, ? super V, VR> subtractor);
 
-
-    /**
-     * Aggregate the value of records of the original {@link KTable} that got {@link KTable#groupBy(KeyValueMapper)
-     * mapped} to the same key into a new instance of {@link KTable} using default serializers and deserializers.
-     * Records with {@code null} key are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer, Reducer, String) combining via reduce(...)} as it,
-     * for example, allows the result to have a different type than the input values.
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * that can be queried using the provided {@code queryableStoreName}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * The specified {@link Initializer} is applied once directly before the first input record is processed to
-     * provide an initial intermediate aggregation result that is used to process the first record.
-     * Each update to the original {@link KTable} results in a two step update of the result {@link KTable}.
-     * The specified {@link Aggregator adder} is applied for each update record and computes a new aggregate using the
-     * current aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value by adding the new record to the aggregate.
-     * The specified {@link Aggregator subtractor} is applied for each "replaced" record of the original {@link KTable}
-     * and computes a new aggregate using the current aggregate and the record's value by "removing" the "replaced"
-     * record from the aggregate.
-     * Thus, {@code aggregate(Initializer, Aggregator, Aggregator, String)} can be used to compute aggregate functions
-     * like sum.
-     * For sum, the initializer, adder, and subtractor would work as follows:
-     * <pre>{@code
-     * public class SumInitializer implements Initializer<Long> {
-     *   public Long apply() {
-     *     return 0L;
-     *   }
-     * }
-     *
-     * public class SumAdder implements Aggregator<String, Integer, Long> {
-     *   public Long apply(String key, Integer newValue, Long aggregate) {
-     *     return aggregate + newValue;
-     *   }
-     * }
-     *
-     * public class SumSubstractor implements Aggregator<String, Integer, Long> {
-     *   public Long apply(String key, Integer oldValue, Long aggregate) {
-     *     return aggregate - oldValue;
-     *   }
-     * }
-     * }</pre>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // counting words
-     * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
-     * String key = "some-word";
-     * Long countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * The store name must be a valid Kafka topic name and cannot contain characters other than ASCII alphanumerics,
-     * '.', '_' and '-'.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param initializer   a {@link Initializer} that provides an initial aggregate result value
-     * @param adder         a {@link Aggregator} that adds a new record to the aggregate result
-     * @param subtractor    a {@link Aggregator} that removed an old record from the aggregate result
-     * @param aggValueSerde aggregate value serdes for materializing the aggregated table,
-     *                      if not specified the default serdes defined in the configs will be used
-     * @param queryableStoreName     the name of the underlying {@link KTable} state store; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} this is the equivalent of {@link KGroupedTable#aggregate(Initializer, Aggregator, Aggregator, Serde)} ()}.
-     * @param <VR>          the value type of the aggregated {@link KTable}
-     * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
-     * latest (rolling) aggregate for each key
-     * @deprecated use {@link #aggregate(Initializer, Aggregator, Aggregator, Materialized) aggregate(initializer, adder, subtractor, Materialized.as(queryableStoreName).withValueSerde(aggValueSerde))}
-     */
-    @Deprecated
-    <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
-                                 final Aggregator<? super K, ? super V, VR> adder,
-                                 final Aggregator<? super K, ? super V, VR> subtractor,
-                                 final Serde<VR> aggValueSerde,
-                                 final String queryableStoreName);
-
-    /**
-     * Aggregate the value of records of the original {@link KTable} that got {@link KTable#groupBy(KeyValueMapper)
-     * mapped} to the same key into a new instance of {@link KTable} using default serializers and deserializers.
-     * Records with {@code null} key are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer, Reducer, Materialized) combining via reduce(...)} as it,
-     * for example, allows the result to have a different type than the input values.
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * that can be queried using the provided {@code queryableStoreName}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * The specified {@link Initializer} is applied once directly before the first input record is processed to
-     * provide an initial intermediate aggregation result that is used to process the first record.
-     * Each update to the original {@link KTable} results in a two step update of the result {@link KTable}.
-     * The specified {@link Aggregator adder} is applied for each update record and computes a new aggregate using the
-     * current aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value by adding the new record to the aggregate.
-     * The specified {@link Aggregator subtractor} is applied for each "replaced" record of the original {@link KTable}
-     * and computes a new aggregate using the current aggregate and the record's value by "removing" the "replaced"
-     * record from the aggregate.
-     * Thus, {@code aggregate(Initializer, Aggregator, Aggregator, String)} can be used to compute aggregate functions
-     * like sum.
-     * For sum, the initializer, adder, and subtractor would work as follows:
-     * <pre>{@code
-     * public class SumInitializer implements Initializer<Long> {
-     *   public Long apply() {
-     *     return 0L;
-     *   }
-     * }
-     *
-     * public class SumAdder implements Aggregator<String, Integer, Long> {
-     *   public Long apply(String key, Integer newValue, Long aggregate) {
-     *     return aggregate + newValue;
-     *   }
-     * }
-     *
-     * public class SumSubstractor implements Aggregator<String, Integer, Long> {
-     *   public Long apply(String key, Integer oldValue, Long aggregate) {
-     *     return aggregate - oldValue;
-     *   }
-     * }
-     * }</pre>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${internalStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
-     * and "-changelog" is a fixed suffix.
-     * Note that the internal store name may not be queriable through Interactive Queries.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param initializer   a {@link Initializer} that provides an initial aggregate result value
-     * @param adder         a {@link Aggregator} that adds a new record to the aggregate result
-     * @param subtractor    a {@link Aggregator} that removed an old record from the aggregate result
-     * @param aggValueSerde aggregate value serdes for materializing the aggregated table,
-     *                      if not specified the default serdes defined in the configs will be used
-     * @param <VR>          the value type of the aggregated {@link KTable}
-     * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
-     * latest (rolling) aggregate for each key
-     * @deprecated use {@link #aggregate(Initializer, Aggregator, Aggregator, Materialized) aggregate(initializer, adder, subtractor, Materialized.with(null, aggValueSerde))}
-     */
-    @Deprecated
-    <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
-                                 final Aggregator<? super K, ? super V, VR> adder,
-                                 final Aggregator<? super K, ? super V, VR> subtractor,
-                                 final Serde<VR> aggValueSerde);
-
-
-    /**
-     * Aggregate the value of records of the original {@link KTable} that got {@link KTable#groupBy(KeyValueMapper)
-     * mapped} to the same key into a new instance of {@link KTable} using default serializers and deserializers.
-     * Records with {@code null} key are ignored.
-     * Aggregating is a generalization of {@link #reduce(Reducer, Reducer, String) combining via reduce(...)} as it,
-     * for example, allows the result to have a different type than the input values.
-     * The result is written into a local {@link KeyValueStore} (which is basically an ever-updating materialized view)
-     * provided by the given {@code storeSupplier}.
-     * Furthermore, updates to the store are sent downstream into a {@link KTable} changelog stream.
-     * <p>
-     * The specified {@link Initializer} is applied once directly before the first input record is processed to
-     * provide an initial intermediate aggregation result that is used to process the first record.
-     * Each update to the original {@link KTable} results in a two step update of the result {@link KTable}.
-     * The specified {@link Aggregator adder} is applied for each update record and computes a new aggregate using the
-     * current aggregate (or for the very first record using the intermediate aggregation result provided via the
-     * {@link Initializer}) and the record's value by adding the new record to the aggregate.
-     * The specified {@link Aggregator subtractor} is applied for each "replaced" record of the original {@link KTable}
-     * and computes a new aggregate using the current aggregate and the record's value by "removing" the "replaced"
-     * record from the aggregate.
-     * Thus, {@code aggregate(Initializer, Aggregator, Aggregator, String)} can be used to compute aggregate functions
-     * like sum.
-     * For sum, the initializer, adder, and subtractor would work as follows:
-     * <pre>{@code
-     * public class SumInitializer implements Initializer<Long> {
-     *   public Long apply() {
-     *     return 0L;
-     *   }
-     * }
-     *
-     * public class SumAdder implements Aggregator<String, Integer, Long> {
-     *   public Long apply(String key, Integer newValue, Long aggregate) {
-     *     return aggregate + newValue;
-     *   }
-     * }
-     *
-     * public class SumSubstractor implements Aggregator<String, Integer, Long> {
-     *   public Long apply(String key, Integer oldValue, Long aggregate) {
-     *     return aggregate - oldValue;
-     *   }
-     * }
-     * }</pre>
-     * Not all updates might get sent downstream, as an internal cache is used to deduplicate consecutive updates to
-     * the same key.
-     * The rate of propagated updates depends on your input data rate, the number of distinct keys, the number of
-     * parallel running Kafka Streams instances, and the {@link StreamsConfig configuration} parameters for
-     * {@link StreamsConfig#CACHE_MAX_BYTES_BUFFERING_CONFIG cache size}, and
-     * {@link StreamsConfig#COMMIT_INTERVAL_MS_CONFIG commit intervall}.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // counting words
-     * String queryableStoreName = storeSupplier.name();
-     * ReadOnlyKeyValueStore<String,Long> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<String, Long>keyValueStore());
-     * String key = "some-word";
-     * Long countForWord = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param initializer   a {@link Initializer} that provides an initial aggregate result value
-     * @param adder         a {@link Aggregator} that adds a new record to the aggregate result
-     * @param subtractor    a {@link Aggregator} that removed an old record from the aggregate result
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
-     * @param <VR>          the value type of the aggregated {@link KTable}
-     * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
-     * latest (rolling) aggregate for each key
-     * @deprecated use {@link #aggregate(Initializer, Aggregator, Aggregator, Materialized) aggregate(initializer, adder, subtractor, Materialized.as(KeyValueByteStoreSupplier))}
-     */
-    @Deprecated
-    <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
-                                 final Aggregator<? super K, ? super V, VR> adder,
-                                 final Aggregator<? super K, ? super V, VR> subtractor,
-                                 final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier);
-
 }
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/KStream.java b/streams/src/main/java/org/apache/kafka/streams/kstream/KStream.java
index 1436e25..09506ff 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/KStream.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/KStream.java
@@ -20,12 +20,10 @@ import org.apache.kafka.clients.producer.internals.DefaultPartitioner;
 import org.apache.kafka.common.annotation.InterfaceStability;
 import org.apache.kafka.common.serialization.Serde;
 import org.apache.kafka.streams.Consumed;
-import org.apache.kafka.streams.KafkaStreams;
 import org.apache.kafka.streams.KeyValue;
 import org.apache.kafka.streams.StreamsBuilder;
 import org.apache.kafka.streams.StreamsConfig;
 import org.apache.kafka.streams.Topology;
-import org.apache.kafka.streams.kstream.internals.WindowedStreamPartitioner;
 import org.apache.kafka.streams.processor.Processor;
 import org.apache.kafka.streams.processor.ProcessorContext;
 import org.apache.kafka.streams.processor.ProcessorSupplier;
@@ -360,392 +358,12 @@ public interface KStream<K, V> {
     <VR> KStream<K, VR> flatMapValues(final ValueMapperWithKey<? super K, ? super V, ? extends Iterable<? extends VR>> mapper);
 
     /**
-     * Print the records of this stream to {@code System.out}.
-     * This function will use the generated name of the parent processor node to label the key/value pairs printed to
-     * the console.
-     * <p>
-     * The default serde will be used to deserialize the key or value in case the type is {@code byte[]} before calling
-     * {@code toString()} on the deserialized object.
-     * <p>
-     * Implementors will need to override {@code toString()} for keys and values that are not of type {@link String},
-     * {@link Integer} etc. to get meaningful information.
-     * @deprecated use {@code print(Printed)}
-     */
-    @Deprecated
-    void print();
-
-    /**
-     * Print the records of this stream to {@code System.out}.
-     * This function will use the given name to label the key/value pairs printed to the console.
-     * <p>
-     * The default serde will be used to deserialize the key or value in case the type is {@code byte[]} before calling
-     * {@code toString()} on the deserialized object.
-     * <p>
-     * Implementors will need to override {@code toString()} for keys and values that are not of type {@link String},
-     * {@link Integer} etc. to get meaningful information.
-     *
-     * @param label the name used to label the key/value pairs printed to the console
-     * @deprecated use {@link #print(Printed) print(Printed.toSysOut()}
-     */
-    @Deprecated
-    void print(final String label);
-
-    /**
-     * Print the records of this stream to {@code System.out}.
-     * This function will use the generated name of the parent processor node to label the key/value pairs printed to
-     * the console.
-     * <p>
-     * The provided serde will be used to deserialize the key or value in case the type is {@code byte[]} before calling
-     * {@code toString()} on the deserialized object.
-     * <p>
-     * Implementors will need to override {@code toString()} for keys and values that are not of type {@link String},
-     * {@link Integer} etc. to get meaningful information.
-     *
-     * @param keySerde key serde used to deserialize key if type is {@code byte[]},
-     * @param valSerde value serde used to deserialize value if type is {@code byte[]},
-     * @deprecated use {@link #print(Printed) print(Printed.toSysOut().withKeyValueMapper(...)}
-     */
-    @Deprecated
-    void print(final Serde<K> keySerde,
-               final Serde<V> valSerde);
-
-    /**
-     * Print the records of this stream to {@code System.out}.
-     * <p>
-     * The provided serde will be used to deserialize the key or value in case the type is {@code byte[]} before calling
-     * {@code toString()} on the deserialized object.
-     * <p>
-     * Implementors will need to override {@code toString()} for keys and values that are not of type {@link String},
-     * {@link Integer} etc. to get meaningful information.
-     *
-     * @param keySerde   key serde used to deserialize key if type is {@code byte[]},
-     * @param valSerde   value serde used to deserialize value if type is {@code byte[]},
-     * @param label the name used to label the key/value pairs printed to the console
-     * @deprecated use {@link #print(Printed) print(Printed.toSysOut().withLabel(label).withKeyValueMapper(...)}
-     */
-    @Deprecated
-    void print(final Serde<K> keySerde,
-               final Serde<V> valSerde,
-               final String label);
-
-    /**
-     * Print the customized output with {@code System.out}.
-     * <p>
-     * The default serde will be use to deserialize key or value if type is {@code byte[]}.
-     * The user provided {@link KeyValueMapper} which customizes output is used to print with {@code System.out}
-     * <p>
-     * The example below shows the way to customize output data.
-     * <pre>{@code
-     * final KeyValueMapper<Integer, String, String> mapper = new KeyValueMapper<Integer, String, String>() {
-     *     public String apply(Integer key, String value) {
-     *         return String.format("(%d, %s)", key, value);
-     *     }
-     * };
-     * }</pre>
-     * <p>
-     * The KeyValueMapper's mapped value type must be {@code String}.
-     *
-     * @param mapper a {@link KeyValueMapper} that computes output type {@code String}.
-     * @deprecated use {@link #print(Printed) print(Printed.toSysOut().withKeyValueMapper(mapper)}
-     */
-    @Deprecated
-    void print(final KeyValueMapper<? super K, ? super V, String> mapper);
-
-    /**
-     * Print the customized output with {@code System.out}.
-     * <p>
-     * The default serde will be used to deserialize key or value if type is {@code byte[]}.
-     * The user provided {@link KeyValueMapper} which customizes output is used to print with {@code System.out}
-     * <p>
-     * The example below shows the way to customize output data.
-     * <pre>{@code
-     * final KeyValueMapper<Integer, String, String> mapper = new KeyValueMapper<Integer, String, String>() {
-     *     public String apply(Integer key, String value) {
-     *         return String.format("(%d, %s)", key, value);
-     *     }
-     * };
-     * }</pre>
-     * <p>
-     * The KeyValueMapper's mapped value type must be {@code String}.
-     *
-     * @param mapper a {@link KeyValueMapper} that computes output type {@code String}.
-     * @param label The given name which labels output will be printed.
-     * @deprecated use {@link #print(Printed) print(Printed.toSysOut().withLabel(label).withKeyValueMapper(mapper)}
-     */
-    @Deprecated
-    void print(final KeyValueMapper<? super K, ? super V, String> mapper, final String label);
-
-    /**
-     * Print the customized output with {@code System.out}.
-     * <p>
-     * The user provided {@link KeyValueMapper} which customizes output is used to print with {@code System.out}
-     * The provided serde will be use to deserialize key or value if type is {@code byte[]}.
-     * <p>
-     * The example below shows the way to customize output data.
-     * <pre>{@code
-     * final KeyValueMapper<Integer, String, String> mapper = new KeyValueMapper<Integer, String, String>() {
-     *     public String apply(Integer key, String value) {
-     *         return String.format("(%d, %s)", key, value);
-     *     }
-     * };
-     * }</pre>
-     * <p>
-     * The provided KeyValueMapper's mapped value type must be {@code String}.
-     * <p>
-     * Implementors will need to override {@code toString()} for keys and values that are not of type {@link String},
-     * {@link Integer} etc. to get meaningful information.
-     *
-     * @param mapper a {@link KeyValueMapper} that computes output type {@code String}.
-     * @param keySerde a {@link Serde} used to deserialize key if type is {@code byte[]}.
-     * @param valSerde a {@link Serde} used to deserialize value if type is {@code byte[]}.
-     * @deprecated use {@link #print(Printed) print(Printed.toSysOut().withKeyValueMapper(mapper)}
-     */
-    @Deprecated
-    void print(final KeyValueMapper<? super K, ? super V, String> mapper, final Serde<K> keySerde, final Serde<V> valSerde);
-
-    /**
-     * Print the customized output with {@code System.out}.
-     * <p>
-     * The user provided {@link KeyValueMapper} which customizes output is used to print with {@code System.out}.
-     * The provided serde will be use to deserialize key or value if type is {@code byte[]}.
-     * <p>
-     * The example below shows the way to customize output data.
-     * <pre>{@code
-     * final KeyValueMapper<Integer, String, String> mapper = new KeyValueMapper<Integer, String, String>() {
-     *     public String apply(Integer key, String value) {
-     *         return String.format("(%d, %s)", key, value);
-     *     }
-     * };
-     * }</pre>
-     * <p>
-     * The provided KeyValueMapper's mapped value type must be {@code String}.
-     * <p>
-     * Implementors will need to override {@code toString()} for keys and values that are not of type {@link String},
-     * {@link Integer} etc. to get meaningful information.
-     *
-     * @param mapper a {@link KeyValueMapper} that computes output type {@code String}.
-     * @param keySerde a {@link Serde} used to deserialize key if type is {@code byte[]}.
-     * @param valSerde a {@link Serde} used to deserialize value if type is {@code byte[]}.
-     * @param label The given name which labels output will be printed.
-     * @deprecated use {@link #print(Printed) print(Printed.toSysOut().withLabel(label).withKeyValueMapper(mapper)}
-     */
-    @Deprecated
-    void print(final KeyValueMapper<? super K, ? super V, String> mapper, final Serde<K> keySerde, final Serde<V> valSerde, final String label);
-
-    /**
      * Print the records of this KStream using the options provided by {@link Printed}
      * @param printed options for printing
      */
     void print(final Printed<K, V> printed);
     
     /**
-     * Merge this stream and the given stream into one larger stream.
-     * <p>
-     * There is no ordering guarantee between records from this {@code KStream} and records from
-     * the provided {@code KStream} in the merged stream.
-     * Relative order is preserved within each input stream though (ie, records within one input
-     * stream are processed in order).
-     *
-     * @param stream a stream which is to be merged into this stream
-     * @return a merged stream containing all records from this and the provided {@code KStream}
-     */
-    KStream<K, V> merge(final KStream<K, V> stream);
-
-    /**
-     * Write the records of this stream to a file at the given path.
-     * This function will use the generated name of the parent processor node to label the key/value pairs printed to
-     * the file.
-     * <p>
-     * The default serde will be used to deserialize the key or value in case the type is {@code byte[]} before calling
-     * {@code toString()} on the deserialized object.
-     * <p>
-     * Implementors will need to override {@code toString()} for keys and values that are not of type {@link String},
-     * {@link Integer} etc. to get meaningful information.
-     *
-     * @param filePath name of the file to write to
-     * @deprecated use {@link #print(Printed) print(Printed.toFile(filePath)}
-     */
-    @Deprecated
-    void writeAsText(final String filePath);
-
-    /**
-     * Write the records of this stream to a file at the given path.
-     * This function will use the given name to label the key/value printed to the file.
-     * <p>
-     * The default serde will be used to deserialize the key or value in case the type is {@code byte[]} before calling
-     * {@code toString()} on the deserialized object.
-     * <p>
-     * Implementors will need to override {@code toString()} for keys and values that are not of type {@link String},
-     * {@link Integer} etc. to get meaningful information.
-     *
-     * @param filePath   name of the file to write to
-     * @param label the name used to label the key/value pairs written to the file
-     * @deprecated use {@link #print(Printed) print(Printed.toFile(filePath).withLabel(label)}
-     */
-    @Deprecated
-    void writeAsText(final String filePath,
-                     final String label);
-
-    /**
-     * Write the records of this stream to a file at the given path.
-     * This function will use the generated name of the parent processor node to label the key/value pairs printed to
-     * the file.
-     * <p>
-     * The provided serde will be used to deserialize the key or value in case the type is {@code byte[]} before calling
-     * {@code toString()} on the deserialized object.
-     * <p>
-     * Implementors will need to override {@code toString()} for keys and values that are not of type {@link String},
-     * {@link Integer} etc. to get meaningful information.
-     *
-     * @param filePath name of the file to write to
-     * @param keySerde key serde used to deserialize key if type is {@code byte[]},
-     * @param valSerde value serde used to deserialize value if type is {@code byte[]},
-     * @deprecated use {@link #print(Printed) print(Printed.toFile(filePath).withKeyValueMapper(...)}
-     */
-    @Deprecated
-    void writeAsText(final String filePath,
-                     final Serde<K> keySerde,
-                     final Serde<V> valSerde);
-
-    /**
-     * Write the records of this stream to a file at the given path.
-     * This function will use the given name to label the key/value printed to the file.
-     * <p>
-     * The provided serde will be used to deserialize the key or value in case the type is {@code byte[]}
-     * before calling {@code toString()} on the deserialized object.
-     * <p>
-     * Implementors will need to override {@code toString()} for keys and values that are not of type {@link String},
-     * {@link Integer} etc. to get meaningful information.
-     *
-     * @param filePath   name of the file to write to
-     * @param label the name used to label the key/value pairs written to the file
-     * @param keySerde   key serde used to deserialize key if type is {@code byte[]},
-     * @param valSerde   value serde used deserialize value if type is {@code byte[]},
-     * @deprecated use {@link #print(Printed) print(Printed.toFile(filePath).withLabel(label).withKeyValueMapper(...)}
-     */
-    @Deprecated
-    void writeAsText(final String filePath,
-                     final String label,
-                     final Serde<K> keySerde,
-                     final Serde<V> valSerde);
-
-    /**
-     * Write the customised output to a given file path.
-     * <p>
-     * The user provided {@link KeyValueMapper} which customizes output is used to write to file.
-     * This function will use default name of stream to label records.
-     * <p>
-     * The default key and value serde will used to deserialize {@code byte[]} records before calling {@code toString()}.
-     * <p>
-     * The example below shows the way to customize output data.
-     * <pre>{@code
-     * final KeyValueMapper<Integer, String, String> mapper = new KeyValueMapper<Integer, String, String>() {
-     *     public String apply(Integer key, String value) {
-     *         return String.format("(%d, %s)", key, value);
-     *     }
-     * };
-     * }</pre>
-     * <p>
-     * The KeyValueMapper's mapped value type must be {@code String}.
-     *
-     * @param filePath path of the file to write to.
-     * @param mapper a {@link KeyValueMapper} that computes output type {@code String}.
-     * @deprecated use {@link #print(Printed) print(Printed.toFile(filePath).withKeyValueMapper(mapper)}
-     */
-    @Deprecated
-    void writeAsText(final String filePath, final KeyValueMapper<? super K, ? super V, String> mapper);
-
-    /**
-     * Write the customised output to a given file path.
-     * <p>
-     * The user provided {@link KeyValueMapper} which customizes output is used to write to file.
-     * This function will use given name of stream to label records.
-     * <p>
-     * The default key and value serde will used to deserialize {@code byte[]} records before calling {@code toString()}.
-     * <p>
-     * The example below shows the way to customize output data.
-     * <pre>{@code
-     * final KeyValueMapper<Integer, String, String> mapper = new KeyValueMapper<Integer, String, String>() {
-     *     public String apply(Integer key, String value) {
-     *         return String.format("(%d, %s)", key, value);
-     *     }
-     * };
-     * }</pre>
-     * <p>
-     * The KeyValueMapper's mapped value type must be {@code String}.
-     *
-     * @param filePath path of the file to write to.
-     * @param label the name used to label records written to file.
-     * @param mapper a {@link KeyValueMapper} that computes output type {@code String}.
-     * @deprecated use {@link #print(Printed) print(Printed.toFile(filePath).withLabel(label).withKeyValueMapper(mapper)}
-     */
-    @Deprecated
-    void writeAsText(final String filePath, final String label, final KeyValueMapper<? super K, ? super V, String> mapper);
-
-    /**
-     * Write the customised output to a given file path.
-     * <p>
-     * The user provided {@link KeyValueMapper} which customizes output is used to write to file.
-     * This function will use default name of stream to label records.
-     * <p>
-     * The given key and value serde will be used to deserialize {@code byte[]} records before calling {@code toString()}.
-     * <p>
-     * The example below shows the way to customize output data.
-     * <pre>{@code
-     * final KeyValueMapper<Integer, String, String> mapper = new KeyValueMapper<Integer, String, String>() {
-     *     public String apply(Integer key, String value) {
-     *         return String.format("(%d, %s)", key, value);
-     *     }
-     * };
-     * }</pre>
-     * <p>
-     * The KeyValueMapper's mapped value type must be {@code String}.
-     * <p>
-     * Implementors will need to override {@code toString()} for keys and values that are not of type {@link String},
-     * {@link Integer} etc. to get meaningful information.
-     *
-     * @param filePath path of the file to write to.
-     * @param keySerde key serde used to deserialize key if type is {@code byte[]}.
-     * @param valSerde value serde used to deserialize value if type is {@code byte[]}.
-     * @param mapper a {@link KeyValueMapper} that computes output type {@code String}.
-     * @deprecated use {@link #print(Printed) print(Printed.toFile(filePath).withKeyValueMapper(mapper)}
-     */
-    @Deprecated
-    void writeAsText(final String filePath, final Serde<K> keySerde, final Serde<V> valSerde, final KeyValueMapper<? super K, ? super V, String> mapper);
-
-    /**
-     * Write the customised output to a given file path.
-     * <p>
-     * The user provided {@link KeyValueMapper} which customizes output is used to write to file.
-     * This function will use given name of stream to label records.
-     * <p>
-     * The given key and value serde will be used to deserialize {@code byte[]} records before calling {@code toString()}.
-     * <p>
-     * The example below shows the way to customize output data.
-     * <pre>{@code
-     * final KeyValueMapper<Integer, String, String> mapper = new KeyValueMapper<Integer, String, String>() {
-     *     public String apply(Integer key, String value) {
-     *         return String.format("(%d, %s)", key, value);
-     *     }
-     * };
-     * }</pre>
-     * <p>
-     * The KeyValueMapper's mapped value type must be {@code String}.
-     * <p>
-     * Implementors will need to override {@code toString()} for keys and values that are not of type {@link String},
-     * {@link Integer} etc. to get meaningful information.
-     *
-     * @param filePath path of the file to write to.
-     * @param label the name used to label records written to file.
-     * @param keySerde key serde used to deserialize key if type is {@code byte[]}.
-     * @param valSerde value serde used to deserialize value if type is {@code byte[]}.
-     * @param mapper a {@link KeyValueMapper} that computes output type {@code String}.
-     * @deprecated use {@link #print(Printed) print(Printed.toFile(filePath).withLabel(label).withKeyValueMapper(mapper)}
-     */
-    @Deprecated
-    void writeAsText(final String filePath, final String label, final Serde<K> keySerde, final Serde<V> valSerde, final KeyValueMapper<? super K, ? super V, String> mapper);
-
-    /**
      * Perform an action on each record of {@code KStream}.
      * This is a stateless record-by-record operation (cf. {@link #process(ProcessorSupplier, String...)}).
      * Note that this is a terminal operation that returns void.
@@ -786,89 +404,31 @@ public interface KStream<K, V> {
     KStream<K, V>[] branch(final Predicate<? super K, ? super V>... predicates);
 
     /**
-     * Materialize this stream to a topic and creates a new {@code KStream} from the topic using default serializers and
-     * deserializers and producer's {@link DefaultPartitioner}.
-     * The specified topic should be manually created before it is used (i.e., before the Kafka Streams application is
-     * started).
+     * Merge this stream and the given stream into one larger stream.
      * <p>
-     * This is equivalent to calling {@link #to(String) #to(someTopicName)} and
-     * {@link StreamsBuilder#stream(String) StreamsBuilder#stream(someTopicName)}.
+     * There is no ordering guarantee between records from this {@code KStream} and records from
+     * the provided {@code KStream} in the merged stream.
+     * Relative order is preserved within each input stream though (ie, records within one input
+     * stream are processed in order).
      *
-     * @param topic the topic name
-     * @return a {@code KStream} that contains the exact same (and potentially repartitioned) records as this {@code KStream}
+     * @param stream a stream which is to be merged into this stream
+     * @return a merged stream containing all records from this and the provided {@code KStream}
      */
-    KStream<K, V> through(final String topic);
+    KStream<K, V> merge(final KStream<K, V> stream);
 
     /**
      * Materialize this stream to a topic and creates a new {@code KStream} from the topic using default serializers and
-     * deserializers and a customizable {@link StreamPartitioner} to determine the distribution of records to partitions.
+     * deserializers and producer's {@link DefaultPartitioner}.
      * The specified topic should be manually created before it is used (i.e., before the Kafka Streams application is
      * started).
      * <p>
-     * This is equivalent to calling {@link #to(StreamPartitioner, String) #to(StreamPartitioner, someTopicName)} and
+     * This is equivalent to calling {@link #to(String) #to(someTopicName)} and
      * {@link StreamsBuilder#stream(String) StreamsBuilder#stream(someTopicName)}.
      *
-     * @param partitioner the function used to determine how records are distributed among partitions of the topic,
-     *                    if not specified producer's {@link DefaultPartitioner} will be used
-     * @param topic       the topic name
-     * @return a {@code KStream} that contains the exact same (and potentially repartitioned) records as this {@code KStream}
-     * @deprecated use {@link #through(String, Produced) through(topic, Produced.withStreamPartitioner(partitioner))}
-     */
-    @Deprecated
-    KStream<K, V> through(final StreamPartitioner<? super K, ? super V> partitioner,
-                          final String topic);
-
-    /**
-     * Materialize this stream to a topic, and creates a new {@code KStream} from the topic.
-     * The specified topic should be manually created before it is used (i.e., before the Kafka Streams application is
-     * started).
-     * <p>
-     * If {@code keySerde} provides a {@link WindowedSerializer} for the key {@link WindowedStreamPartitioner} is
-     * used&mdash;otherwise producer's {@link DefaultPartitioner} is used.
-     * <p>
-     * This is equivalent to calling {@link #to(Serde, Serde, String) #to(keySerde, valSerde, someTopicName)} and
-     * {@link KStreamBuilder#stream(Serde, Serde, String...) KStreamBuilder#stream(keySerde, valSerde, someTopicName)}.
-     *
-     * @param keySerde key serde used to send key-value pairs,
-     *                 if not specified the default key serde defined in the configuration will be used
-     * @param valSerde value serde used to send key-value pairs,
-     *                 if not specified the default value serde defined in the configuration will be used
-     * @param topic    the topic name
-     * @return a {@code KStream} that contains the exact same (and potentially repartitioned) records as this {@code KStream}
-     * @deprecated use {@link #through(String, Produced) through(topic, Produced.with(keySerde, valSerde))}
-     */
-    @Deprecated
-    KStream<K, V> through(final Serde<K> keySerde,
-                          final Serde<V> valSerde,
-                          final String topic);
-
-    /**
-     * Materialize this stream to a topic and creates a new {@code KStream} from the topic using a customizable
-     * {@link StreamPartitioner} to determine the distribution of records to partitions.
-     * The specified topic should be manually created before it is used (i.e., before the Kafka Streams application is
-     * started).
-     * <p>
-     * This is equivalent to calling {@link #to(Serde, Serde, StreamPartitioner, String) #to(keySerde, valSerde,
-     * StreamPartitioner, someTopicName)} and {@link KStreamBuilder#stream(Serde, Serde, String...)
-     * KStreamBuilder#stream(keySerde, valSerde, someTopicName)}.
-     *
-     * @param keySerde    key serde used to send key-value pairs,
-     *                    if not specified the default key serde defined in the configuration will be used
-     * @param valSerde    value serde used to send key-value pairs,
-     *                    if not specified the default value serde defined in the configuration will be used
-     * @param partitioner the function used to determine how records are distributed among partitions of the topic,
-     *                    if not specified and {@code keySerde} provides a {@link WindowedSerializer} for the key
-     *                    {@link WindowedStreamPartitioner} will be used&mdash;otherwise {@link DefaultPartitioner} will
-     *                    be used
-     * @param topic       the topic name
+     * @param topic the topic name
      * @return a {@code KStream} that contains the exact same (and potentially repartitioned) records as this {@code KStream}
-     * @deprecated use {@link #through(String, Produced) through(topic, Produced.with(keySerde, valSerde, partitioner))}
      */
-    @Deprecated
-    KStream<K, V> through(final Serde<K> keySerde,
-                          final Serde<V> valSerde,
-                          final StreamPartitioner<? super K, ? super V> partitioner,
-                          final String topic);
+    KStream<K, V> through(final String topic);
 
     /**
      * Materialize this stream to a topic and creates a new {@code KStream} from the topic using the
@@ -898,63 +458,6 @@ public interface KStream<K, V> {
     void to(final String topic);
 
     /**
-     * Materialize this stream to a topic using default serializers specified in the config and a customizable
-     * {@link StreamPartitioner} to determine the distribution of records to partitions.
-     * The specified topic should be manually created before it is used (i.e., before the Kafka Streams application is
-     * started).
-     *
-     * @param partitioner the function used to determine how records are distributed among partitions of the topic,
-     *                    if not specified producer's {@link DefaultPartitioner} will be used
-     * @param topic       the topic name
-     * @deprecated use {@link #to(String, Produced) to(topic, Produced.withStreamPartitioner(partitioner))}
-     */
-    @Deprecated
-    void to(final StreamPartitioner<? super K, ? super V> partitioner,
-            final String topic);
-
-    /**
-     * Materialize this stream to a topic. If {@code keySerde} provides a {@link WindowedSerializer WindowedSerializer}
-     * for the key {@link WindowedStreamPartitioner} is used&mdash;otherwise producer's {@link DefaultPartitioner} is
-     * used.
-     * The specified topic should be manually created before it is used (i.e., before the Kafka Streams application is
-     * started).
-     *
-     * @param keySerde key serde used to send key-value pairs,
-     *                 if not specified the default serde defined in the configs will be used
-     * @param valSerde value serde used to send key-value pairs,
-     *                 if not specified the default serde defined in the configs will be used
-     * @param topic    the topic name
-     * @deprecated use {@link #to(String, Produced) to(topic, Produced.with(keySerde, valSerde))}
-     */
-    @Deprecated
-    void to(final Serde<K> keySerde,
-            final Serde<V> valSerde,
-            final String topic);
-
-    /**
-     * Materialize this stream to a topic using a customizable {@link StreamPartitioner} to determine the distribution
-     * of records to partitions.
-     * The specified topic should be manually created before it is used (i.e., before the Kafka Streams application is
-     * started).
-     *
-     * @param keySerde    key serde used to send key-value pairs,
-     *                    if not specified the default serde defined in the configs will be used
-     * @param valSerde    value serde used to send key-value pairs,
-     *                    if not specified the default serde defined in the configs will be used
-     * @param partitioner the function used to determine how records are distributed among partitions of the topic,
-     *                    if not specified and {@code keySerde} provides a {@link  WindowedSerializer} for the key
-     *                    {@link WindowedStreamPartitioner} will be used&mdash;otherwise {@link DefaultPartitioner} will
-     *                    be used
-     * @param topic       the topic name
-     * @deprecated use {@link #to(String, Produced) to(topic, Produced.with(keySerde, valSerde, partitioner)}
-     */
-    @Deprecated
-    void to(final Serde<K> keySerde,
-            final Serde<V> valSerde,
-            final StreamPartitioner<? super K, ? super V> partitioner,
-            final String topic);
-
-    /**
      * Materialize this stream to a topic using the provided {@link Produced} instance.
      * The specified topic should be manually created before it is used (i.e., before the Kafka Streams application is
      * started).
@@ -1249,7 +752,9 @@ public interface KStream<K, V> {
      * This topic will be named "${applicationId}-XXX-repartition", where "applicationId" is user-specified in
      * {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is
      * an internally generated name, and "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * <p>
      * For this case, all data of this stream will be redistributed through the repartitioning topic by writing all
      * records to it, and rereading all records from it, such that the resulting {@link KGroupedStream} is partitioned
@@ -1276,7 +781,9 @@ public interface KStream<K, V> {
      * This topic will be named "${applicationId}-XXX-repartition", where "applicationId" is user-specified in
      * {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is
      * an internally generated name, and "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * <p>
      * For this case, all data of this stream will be redistributed through the repartitioning topic by writing all
      * records to it, and rereading all records from it, such that the resulting {@link KGroupedStream} is partitioned
@@ -1288,36 +795,6 @@ public interface KStream<K, V> {
     KGroupedStream<K, V> groupByKey(final Serialized<K, V> serialized);
 
     /**
-     * Group the records by their current key into a {@link KGroupedStream} while preserving the original values.
-     * Grouping a stream on the record key is required before an aggregation operator can be applied to the data
-     * (cf. {@link KGroupedStream}).
-     * If a record key is {@code null} the record will not be included in the resulting {@link KGroupedStream}.
-     * <p>
-     * If a key changing operator was used before this operation (e.g., {@link #selectKey(KeyValueMapper)},
-     * {@link #map(KeyValueMapper)}, {@link #flatMap(KeyValueMapper)}, or
-     * {@link #transform(TransformerSupplier, String...)}), and no data redistribution happened afterwards (e.g., via
-     * {@link #through(String)}) an internal repartitioning topic will be created in Kafka.
-     * This topic will be named "${applicationId}-XXX-repartition", where "applicationId" is user-specified in
-     * {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is
-     * an internally generated name, and "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     * <p>
-     * For this case, all data of this stream will be redistributed through the repartitioning topic by writing all
-     * records to it, and rereading all records from it, such that the resulting {@link KGroupedStream} is partitioned
-     * correctly on its key.
-     *
-     * @param keySerde key serdes for materializing this stream,
-     *                 if not specified the default serdes defined in the configs will be used
-     * @param valSerde value serdes for materializing this stream,
-     *                 if not specified the default serdes defined in the configs will be used
-     * @return a {@link KGroupedStream} that contains the grouped records of the original {@code KStream}
-     * @deprecated use {@link #groupByKey(Serialized) groupByKey(Serialized.with(keySerde, valSerde))}
-     */
-    @Deprecated
-    KGroupedStream<K, V> groupByKey(final Serde<K> keySerde,
-                                    final Serde<V> valSerde);
-
-    /**
      * Group the records of this {@code KStream} on a new key that is selected using the provided {@link KeyValueMapper}
      * and default serializers and deserializers.
      * Grouping a stream on the record key is required before an aggregation operator can be applied to the data
@@ -1329,7 +806,9 @@ public interface KStream<K, V> {
      * This topic will be named "${applicationId}-XXX-repartition", where "applicationId" is user-specified in
      * {@link  StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is
      * an internally generated name, and "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * <p>
      * All data of this stream will be redistributed through the repartitioning topic by writing all records to it,
      * and rereading all records from it, such that the resulting {@link KGroupedStream} is partitioned on the new key.
@@ -1355,7 +834,9 @@ public interface KStream<K, V> {
      * This topic will be named "${applicationId}-XXX-repartition", where "applicationId" is user-specified in
      * {@link  StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is
      * an internally generated name, and "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * <p>
      * All data of this stream will be redistributed through the repartitioning topic by writing all records to it,
      * and rereading all records from it, such that the resulting {@link KGroupedStream} is partitioned on the new key.
@@ -1370,40 +851,6 @@ public interface KStream<K, V> {
                                        final Serialized<KR, V> serialized);
 
     /**
-     * Group the records of this {@code KStream} on a new key that is selected using the provided {@link KeyValueMapper}.
-     * Grouping a stream on the record key is required before an aggregation operator can be applied to the data
-     * (cf. {@link KGroupedStream}).
-     * The {@link KeyValueMapper} selects a new key (with potentially different type) while preserving the original values.
-     * If the new record key is {@code null} the record will not be included in the resulting {@link KGroupedStream}.
-     * <p>
-     * Because a new key is selected, an internal repartitioning topic will be created in Kafka.
-     * This topic will be named "${applicationId}-XXX-repartition", where "applicationId" is user-specified in
-     * {@link StreamsConfig StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
-     * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     * <p>
-     * All data of this stream will be redistributed through the repartitioning topic by writing all records to it,
-     * and rereading all records from it, such that the resulting {@link KGroupedStream} is partitioned on the new key.
-     * <p>
-     * This is equivalent to calling {@link #selectKey(KeyValueMapper)} followed by {@link #groupByKey(Serde, Serde)}.
-     *
-     * @param selector a {@link KeyValueMapper} that computes a new key for grouping
-     * @param keySerde key serdes for materializing this stream,
-     *                 if not specified the default serdes defined in the configs will be used
-     * @param valSerde value serdes for materializing this stream,
-     *                 if not specified the default serdes defined in the configs will be used
-     * @param <KR>     the key type of the result {@link KGroupedStream}
-     * @return a {@link KGroupedStream} that contains the grouped records of the original {@code KStream}
-     * @see #groupByKey()
-     * @deprecated use {@link #groupBy(KeyValueMapper, Serialized) groupBy(selector, Serialized.with(keySerde, valSerde))}
-     */
-    @Deprecated
-    <KR> KGroupedStream<KR, V> groupBy(final KeyValueMapper<? super K, ? super V, KR> selector,
-                                       final Serde<KR> keySerde,
-                                       final Serde<V> valSerde);
-
-    /**
      * Join records of this stream with another {@code KStream}'s records using windowed inner equi join with default
      * serializers and deserializers.
      * The join is computed on the records' key with join attribute {@code thisKStream.key == otherKStream.key}.
@@ -1450,83 +897,7 @@ public interface KStream<K, V> {
      * user-specified in {@link  StreamsConfig} via parameter
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
      * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     * <p>
-     * Repartitioning can happen for one or both of the joining {@code KStream}s.
-     * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
-     * records to it, and rereading all records from it, such that the join input {@code KStream} is partitioned
-     * correctly on its key.
-     * <p>
-     * Both of the joining {@code KStream}s will be materialized in local state stores with auto-generated store names.
-     * For failure and recovery each store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-storeName-changelog", where "applicationId" is user-specified
-     * in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is an
-     * internally generated name, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
      *
-     * @param otherStream the {@code KStream} to be joined with this stream
-     * @param joiner      a {@link ValueJoiner} that computes the join result for a pair of matching records
-     * @param windows     the specification of the {@link JoinWindows}
-     * @param <VO>        the value type of the other stream
-     * @param <VR>        the value type of the result stream
-     * @return a {@code KStream} that contains join-records for each key and values computed by the given
-     * {@link ValueJoiner}, one for each matched record-pair with the same key and within the joining window intervals
-     * @see #leftJoin(KStream, ValueJoiner, JoinWindows)
-     * @see #outerJoin(KStream, ValueJoiner, JoinWindows)
-     */
-    <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream,
-                                 final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                 final JoinWindows windows);
-
-    /**
-     * Join records of this stream with another {@code KStream}'s records using windowed inner equi join with default
-     * serializers and deserializers.
-     * The join is computed on the records' key with join attribute {@code thisKStream.key == otherKStream.key}.
-     * Furthermore, two records are only joined if their timestamps are close to each other as defined by the given
-     * {@link JoinWindows}, i.e., the window defines an additional join predicate on the record timestamps.
-     * <p>
-     * For each pair of records meeting both join predicates the provided {@link ValueJoiner} will be called to compute
-     * a value (with arbitrary type) for the result record.
-     * The key of the result record is the same as for both joining input records.
-     * If an input record key or value is {@code null} the record will not be included in the join operation and thus no
-     * output record will be added to the resulting {@code KStream}.
-     * <p>
-     * Example (assuming all input records belong to the correct windows):
-     * <table border='1'>
-     * <tr>
-     * <th>this</th>
-     * <th>other</th>
-     * <th>result</th>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:A&gt;</td>
-     * <td></td>
-     * <td></td>
-     * </tr>
-     * <tr>
-     * <td>&lt;K2:B&gt;</td>
-     * <td>&lt;K2:b&gt;</td>
-     * <td>&lt;K2:ValueJoiner(B,b)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td>&lt;K3:c&gt;</td>
-     * <td></td>
-     * </tr>
-     * </table>
-     * Both input streams (or to be more precise, their underlying source topics) need to have the same number of
-     * partitions.
-     * If this is not the case, you would need to call {@link #through(String)} (for one input stream) before doing the
-     * join, using a pre-created topic with the "correct" number of partitions.
-     * Furthermore, both input streams need to be co-partitioned on the join key (i.e., use the same partitioner).
-     * If this requirement is not met, Kafka Streams will automatically repartition the data, i.e., it will create an
-     * internal repartitioning topic in Kafka and write and re-read the data via this topic before the actual join.
-     * The repartitioning topic will be named "${applicationId}-XXX-repartition", where "applicationId" is
-     * user-specified in {@link  StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
-     * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
      * <p>
      * Repartitioning can happen for one or both of the joining {@code KStream}s.
      * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
@@ -1539,173 +910,8 @@ public interface KStream<K, V> {
      * in {@link StreamsConfig} via parameter
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is an
      * internally generated name, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
      *
-     * @param otherStream the {@code KStream} to be joined with this stream
-     * @param joiner      a {@link ValueJoiner} that computes the join result for a pair of matching records
-     * @param windows     the specification of the {@link JoinWindows}
-     * @param joined      a {@link Joined} instance that defines the serdes to
-     *                    be used to serialize/deserialize inputs and outputs of the joined streams
-     * @param <VO>        the value type of the other stream
-     * @param <VR>        the value type of the result stream
-     * @return a {@code KStream} that contains join-records for each key and values computed by the given
-     * {@link ValueJoiner}, one for each matched record-pair with the same key and within the joining window intervals
-     * @see #leftJoin(KStream, ValueJoiner, JoinWindows, Joined)
-     * @see #outerJoin(KStream, ValueJoiner, JoinWindows, Joined)
-     */
-    <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream,
-                                 final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                 final JoinWindows windows,
-                                 final Joined<K, V, VO> joined);
-    /**
-     * Join records of this stream with another {@code KStream}'s records using windowed inner equi join.
-     * The join is computed on the records' key with join attribute {@code thisKStream.key == otherKStream.key}.
-     * Furthermore, two records are only joined if their timestamps are close to each other as defined by the given
-     * {@link JoinWindows}, i.e., the window defines an additional join predicate on the record timestamps.
-     * <p>
-     * For each pair of records meeting both join predicates the provided {@link ValueJoiner} will be called to compute
-     * a value (with arbitrary type) for the result record.
-     * The key of the result record is the same as for both joining input records.
-     * If an input record key or value is {@code null} the record will not be included in the join operation and thus no
-     * output record will be added to the resulting {@code KStream}.
-     * <p>
-     * Example (assuming all input records belong to the correct windows):
-     * <table border='1'>
-     * <tr>
-     * <th>this</th>
-     * <th>other</th>
-     * <th>result</th>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:A&gt;</td>
-     * <td></td>
-     * <td></td>
-     * </tr>
-     * <tr>
-     * <td>&lt;K2:B&gt;</td>
-     * <td>&lt;K2:b&gt;</td>
-     * <td>&lt;K2:ValueJoiner(B,b)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td>&lt;K3:c&gt;</td>
-     * <td></td>
-     * </tr>
-     * </table>
-     * Both input streams (or to be more precise, their underlying source topics) need to have the same number of
-     * partitions.
-     * If this is not the case, you would need to call {@link #through(String)} (for one input stream) before doing the
-     * join, using a pre-created topic with the "correct" number of partitions.
-     * Furthermore, both input streams need to be co-partitioned on the join key (i.e., use the same partitioner).
-     * If this requirement is not met, Kafka Streams will automatically repartition the data, i.e., it will create an
-     * internal repartitioning topic in Kafka and write and re-read the data via this topic before the actual join.
-     * The repartitioning topic will be named "${applicationId}-XXX-repartition", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
-     * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     * <p>
-     * Repartitioning can happen for one or both of the joining {@code KStream}s.
-     * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
-     * records to it, and rereading all records from it, such that the join input {@code KStream} is partitioned
-     * correctly on its key.
-     * <p>
-     * Both of the joining {@code KStream}s will be materialized in local state stores with auto-generated store names.
-     * For failure and recovery each store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-storeName-changelog", where "applicationId" is user-specified
-     * in {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG},
-     * "storeName" is an internally generated name, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param otherStream     the {@code KStream} to be joined with this stream
-     * @param joiner          a {@link ValueJoiner} that computes the join result for a pair of matching records
-     * @param windows         the specification of the {@link JoinWindows}
-     * @param keySerde        key serdes for materializing both streams,
-     *                        if not specified the default serdes defined in the configs will be used
-     * @param thisValueSerde  value serdes for materializing this stream,
-     *                        if not specified the default serdes defined in the configs will be used
-     * @param otherValueSerde value serdes for materializing the other stream,
-     *                        if not specified the default serdes defined in the configs will be used
-     * @param <VO>            the value type of the other stream
-     * @param <VR>            the value type of the result stream
-     * @return a {@code KStream} that contains join-records for each key and values computed by the given
-     * {@link ValueJoiner}, one for each matched record-pair with the same key and within the joining window intervals
-     * @see #leftJoin(KStream, ValueJoiner, JoinWindows, Joined)
-     * @see #outerJoin(KStream, ValueJoiner, JoinWindows, Joined)
-     * @deprecated use {@link #join(KStream, ValueJoiner, JoinWindows, Joined) join(otherStream, joiner, windows, Joined.with(keySerde, thisValueSerde, otherValueSerde))}
-     */
-    @Deprecated
-    <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream,
-                                 final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                 final JoinWindows windows,
-                                 final Serde<K> keySerde,
-                                 final Serde<V> thisValueSerde,
-                                 final Serde<VO> otherValueSerde);
-
-    /**
-     * Join records of this stream with another {@code KStream}'s records using windowed left equi join with default
-     * serializers and deserializers.
-     * In contrast to {@link #join(KStream, ValueJoiner, JoinWindows) inner-join}, all records from this stream will
-     * produce at least one output record (cf. below).
-     * The join is computed on the records' key with join attribute {@code thisKStream.key == otherKStream.key}.
-     * Furthermore, two records are only joined if their timestamps are close to each other as defined by the given
-     * {@link JoinWindows}, i.e., the window defines an additional join predicate on the record timestamps.
-     * <p>
-     * For each pair of records meeting both join predicates the provided {@link ValueJoiner} will be called to compute
-     * a value (with arbitrary type) for the result record.
-     * The key of the result record is the same as for both joining input records.
-     * Furthermore, for each input record of this {@code KStream} that does not satisfy the join predicate the provided
-     * {@link ValueJoiner} will be called with a {@code null} value for the other stream.
-     * If an input record key or value is {@code null} the record will not be included in the join operation and thus no
-     * output record will be added to the resulting {@code KStream}.
-     * <p>
-     * Example (assuming all input records belong to the correct windows):
-     * <table border='1'>
-     * <tr>
-     * <th>this</th>
-     * <th>other</th>
-     * <th>result</th>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:A&gt;</td>
-     * <td></td>
-     * <td>&lt;K1:ValueJoiner(A,null)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td>&lt;K2:B&gt;</td>
-     * <td>&lt;K2:b&gt;</td>
-     * <td>&lt;K2:ValueJoiner(B,b)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td>&lt;K3:c&gt;</td>
-     * <td></td>
-     * </tr>
-     * </table>
-     * Both input streams (or to be more precise, their underlying source topics) need to have the same number of
-     * partitions.
-     * If this is not the case, you would need to call {@link #through(String)} (for one input stream) before doing the
-     * join, using a pre-created topic with the "correct" number of partitions.
-     * Furthermore, both input streams need to be co-partitioned on the join key (i.e., use the same partitioner).
-     * If this requirement is not met, Kafka Streams will automatically repartition the data, i.e., it will create an
-     * internal repartitioning topic in Kafka and write and re-read the data via this topic before the actual join.
-     * The repartitioning topic will be named "${applicationId}-XXX-repartition", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
-     * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     * <p>
-     * Repartitioning can happen for one or both of the joining {@code KStream}s.
-     * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
-     * records to it, and rereading all records from it, such that the join input {@code KStream} is partitioned
-     * correctly on its key.
-     * <p>
-     * Both of the joining {@code KStream}s will be materialized in local state stores with auto-generated store names.
-     * For failure and recovery each store will be backed by an internal changelog topic that will be created in Kafka.
-     * The changelog topic will be named "${applicationId}-storeName-changelog", where "applicationId" is user-specified
-     * in {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG},
-     * "storeName" is an internally generated name, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param otherStream the {@code KStream} to be joined with this stream
      * @param joiner      a {@link ValueJoiner} that computes the join result for a pair of matching records
@@ -1713,20 +919,17 @@ public interface KStream<K, V> {
      * @param <VO>        the value type of the other stream
      * @param <VR>        the value type of the result stream
      * @return a {@code KStream} that contains join-records for each key and values computed by the given
-     * {@link ValueJoiner}, one for each matched record-pair with the same key plus one for each non-matching record of
-     * this {@code KStream} and within the joining window intervals
-     * @see #join(KStream, ValueJoiner, JoinWindows)
+     * {@link ValueJoiner}, one for each matched record-pair with the same key and within the joining window intervals
+     * @see #leftJoin(KStream, ValueJoiner, JoinWindows)
      * @see #outerJoin(KStream, ValueJoiner, JoinWindows)
      */
-    <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream,
-                                     final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                     final JoinWindows windows);
+    <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream,
+                                 final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
+                                 final JoinWindows windows);
 
     /**
-     * Join records of this stream with another {@code KStream}'s records using windowed left equi join with default
+     * Join records of this stream with another {@code KStream}'s records using windowed inner equi join with default
      * serializers and deserializers.
-     * In contrast to {@link #join(KStream, ValueJoiner, JoinWindows) inner-join}, all records from this stream will
-     * produce at least one output record (cf. below).
      * The join is computed on the records' key with join attribute {@code thisKStream.key == otherKStream.key}.
      * Furthermore, two records are only joined if their timestamps are close to each other as defined by the given
      * {@link JoinWindows}, i.e., the window defines an additional join predicate on the record timestamps.
@@ -1734,8 +937,6 @@ public interface KStream<K, V> {
      * For each pair of records meeting both join predicates the provided {@link ValueJoiner} will be called to compute
      * a value (with arbitrary type) for the result record.
      * The key of the result record is the same as for both joining input records.
-     * Furthermore, for each input record of this {@code KStream} that does not satisfy the join predicate the provided
-     * {@link ValueJoiner} will be called with a {@code null} value for the other stream.
      * If an input record key or value is {@code null} the record will not be included in the join operation and thus no
      * output record will be added to the resulting {@code KStream}.
      * <p>
@@ -1749,7 +950,7 @@ public interface KStream<K, V> {
      * <tr>
      * <td>&lt;K1:A&gt;</td>
      * <td></td>
-     * <td>&lt;K1:ValueJoiner(A,null)&gt;</td>
+     * <td></td>
      * </tr>
      * <tr>
      * <td>&lt;K2:B&gt;</td>
@@ -1770,10 +971,9 @@ public interface KStream<K, V> {
      * If this requirement is not met, Kafka Streams will automatically repartition the data, i.e., it will create an
      * internal repartitioning topic in Kafka and write and re-read the data via this topic before the actual join.
      * The repartitioning topic will be named "${applicationId}-XXX-repartition", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
+     * user-specified in {@link  StreamsConfig} via parameter
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
      * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
      * <p>
      * Repartitioning can happen for one or both of the joining {@code KStream}s.
      * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
@@ -1783,9 +983,11 @@ public interface KStream<K, V> {
      * Both of the joining {@code KStream}s will be materialized in local state stores with auto-generated store names.
      * For failure and recovery each store will be backed by an internal changelog topic that will be created in Kafka.
      * The changelog topic will be named "${applicationId}-storeName-changelog", where "applicationId" is user-specified
-     * in {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG},
-     * "storeName" is an internally generated name, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     * in {@link StreamsConfig} via parameter
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is an
+     * internally generated name, and "-changelog" is a fixed suffix.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param otherStream the {@code KStream} to be joined with this stream
      * @param joiner      a {@link ValueJoiner} that computes the join result for a pair of matching records
@@ -1795,20 +997,20 @@ public interface KStream<K, V> {
      * @param <VO>        the value type of the other stream
      * @param <VR>        the value type of the result stream
      * @return a {@code KStream} that contains join-records for each key and values computed by the given
-     * {@link ValueJoiner}, one for each matched record-pair with the same key plus one for each non-matching record of
-     * this {@code KStream} and within the joining window intervals
-     * @see #join(KStream, ValueJoiner, JoinWindows, Joined)
+     * {@link ValueJoiner}, one for each matched record-pair with the same key and within the joining window intervals
+     * @see #leftJoin(KStream, ValueJoiner, JoinWindows, Joined)
      * @see #outerJoin(KStream, ValueJoiner, JoinWindows, Joined)
      */
-    <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream,
-                                     final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                     final JoinWindows windows,
-                                     final Joined<K, V, VO> joined);
+    <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream,
+                                 final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
+                                 final JoinWindows windows,
+                                 final Joined<K, V, VO> joined);
 
     /**
-     * Join records of this stream with another {@code KStream}'s records using windowed left equi join.
-     * In contrast to {@link #join(KStream, ValueJoiner, JoinWindows, Joined) inner-join}, all records from
-     * this stream will produce at least one output record (cf. below).
+     * Join records of this stream with another {@code KStream}'s records using windowed left equi join with default
+     * serializers and deserializers.
+     * In contrast to {@link #join(KStream, ValueJoiner, JoinWindows) inner-join}, all records from this stream will
+     * produce at least one output record (cf. below).
      * The join is computed on the records' key with join attribute {@code thisKStream.key == otherKStream.key}.
      * Furthermore, two records are only joined if their timestamps are close to each other as defined by the given
      * {@link JoinWindows}, i.e., the window defines an additional join predicate on the record timestamps.
@@ -1855,7 +1057,6 @@ public interface KStream<K, V> {
      * user-specified in {@link StreamsConfig} via parameter
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
      * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
      * <p>
      * Repartitioning can happen for one or both of the joining {@code KStream}s.
      * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
@@ -1865,43 +1066,31 @@ public interface KStream<K, V> {
      * Both of the joining {@code KStream}s will be materialized in local state stores with auto-generated store names.
      * For failure and recovery each store will be backed by an internal changelog topic that will be created in Kafka.
      * The changelog topic will be named "${applicationId}-storeName-changelog", where "applicationId" is user-specified
-     * in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is an
-     * internally generated name, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param otherStream     the {@code KStream} to be joined with this stream
-     * @param joiner          a {@link ValueJoiner} that computes the join result for a pair of matching records
-     * @param windows         the specification of the {@link JoinWindows}
-     * @param keySerde        key serdes for materializing the other stream,
-     *                        if not specified the default serdes defined in the configs will be used
-     * @param thisValSerde    value serdes for materializing this stream,
-     *                        if not specified the default serdes defined in the configs will be used
-     * @param otherValueSerde value serdes for materializing the other stream,
-     *                        if not specified the default serdes defined in the configs will be used
-     * @param <VO>            the value type of the other stream
-     * @param <VR>            the value type of the result stream
+     * in {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG},
+     * "storeName" is an internally generated name, and "-changelog" is a fixed suffix.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
+     * @param otherStream the {@code KStream} to be joined with this stream
+     * @param joiner      a {@link ValueJoiner} that computes the join result for a pair of matching records
+     * @param windows     the specification of the {@link JoinWindows}
+     * @param <VO>        the value type of the other stream
+     * @param <VR>        the value type of the result stream
      * @return a {@code KStream} that contains join-records for each key and values computed by the given
      * {@link ValueJoiner}, one for each matched record-pair with the same key plus one for each non-matching record of
      * this {@code KStream} and within the joining window intervals
-     * @see #join(KStream, ValueJoiner, JoinWindows, Joined)
-     * @see #outerJoin(KStream, ValueJoiner, JoinWindows, Joined)
-     * @deprecated use {@link #leftJoin(KStream, ValueJoiner, JoinWindows, Joined) leftJoin(otherStream, joiner, windows, Joined.with(keySerde, thisValSerde, otherValueSerde))}
+     * @see #join(KStream, ValueJoiner, JoinWindows)
+     * @see #outerJoin(KStream, ValueJoiner, JoinWindows)
      */
-    @Deprecated
     <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream,
                                      final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                     final JoinWindows windows,
-                                     final Serde<K> keySerde,
-                                     final Serde<V> thisValSerde,
-                                     final Serde<VO> otherValueSerde);
+                                     final JoinWindows windows);
 
     /**
-     * Join records of this stream with another {@code KStream}'s records using windowed outer equi join with default
+     * Join records of this stream with another {@code KStream}'s records using windowed left equi join with default
      * serializers and deserializers.
-     * In contrast to {@link #join(KStream, ValueJoiner, JoinWindows) inner-join} or
-     * {@link #leftJoin(KStream, ValueJoiner, JoinWindows) left-join}, all records from both streams will produce at
-     * least one output record (cf. below).
+     * In contrast to {@link #join(KStream, ValueJoiner, JoinWindows) inner-join}, all records from this stream will
+     * produce at least one output record (cf. below).
      * The join is computed on the records' key with join attribute {@code thisKStream.key == otherKStream.key}.
      * Furthermore, two records are only joined if their timestamps are close to each other as defined by the given
      * {@link JoinWindows}, i.e., the window defines an additional join predicate on the record timestamps.
@@ -1909,8 +1098,8 @@ public interface KStream<K, V> {
      * For each pair of records meeting both join predicates the provided {@link ValueJoiner} will be called to compute
      * a value (with arbitrary type) for the result record.
      * The key of the result record is the same as for both joining input records.
-     * Furthermore, for each input record of both {@code KStream}s that does not satisfy the join predicate the provided
-     * {@link ValueJoiner} will be called with a {@code null} value for the this/other stream, respectively.
+     * Furthermore, for each input record of this {@code KStream} that does not satisfy the join predicate the provided
+     * {@link ValueJoiner} will be called with a {@code null} value for the other stream.
      * If an input record key or value is {@code null} the record will not be included in the join operation and thus no
      * output record will be added to the resulting {@code KStream}.
      * <p>
@@ -1929,12 +1118,12 @@ public interface KStream<K, V> {
      * <tr>
      * <td>&lt;K2:B&gt;</td>
      * <td>&lt;K2:b&gt;</td>
-     * <td>&lt;K2:ValueJoiner(null,b)&gt;<br />&lt;K2:ValueJoiner(B,b)&gt;</td>
+     * <td>&lt;K2:ValueJoiner(B,b)&gt;</td>
      * </tr>
      * <tr>
      * <td></td>
      * <td>&lt;K3:c&gt;</td>
-     * <td>&lt;K3:ValueJoiner(null,c)&gt;</td>
+     * <td></td>
      * </tr>
      * </table>
      * Both input streams (or to be more precise, their underlying source topics) need to have the same number of
@@ -1948,7 +1137,6 @@ public interface KStream<K, V> {
      * user-specified in {@link StreamsConfig} via parameter
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
      * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
      * <p>
      * Repartitioning can happen for one or both of the joining {@code KStream}s.
      * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
@@ -1960,22 +1148,26 @@ public interface KStream<K, V> {
      * The changelog topic will be named "${applicationId}-storeName-changelog", where "applicationId" is user-specified
      * in {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG},
      * "storeName" is an internally generated name, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param otherStream the {@code KStream} to be joined with this stream
      * @param joiner      a {@link ValueJoiner} that computes the join result for a pair of matching records
      * @param windows     the specification of the {@link JoinWindows}
+     * @param joined      a {@link Joined} instance that defines the serdes to
+     *                    be used to serialize/deserialize inputs and outputs of the joined streams
      * @param <VO>        the value type of the other stream
      * @param <VR>        the value type of the result stream
      * @return a {@code KStream} that contains join-records for each key and values computed by the given
      * {@link ValueJoiner}, one for each matched record-pair with the same key plus one for each non-matching record of
-     * both {@code KStream} and within the joining window intervals
-     * @see #join(KStream, ValueJoiner, JoinWindows)
-     * @see #leftJoin(KStream, ValueJoiner, JoinWindows)
+     * this {@code KStream} and within the joining window intervals
+     * @see #join(KStream, ValueJoiner, JoinWindows, Joined)
+     * @see #outerJoin(KStream, ValueJoiner, JoinWindows, Joined)
      */
-    <VO, VR> KStream<K, VR> outerJoin(final KStream<K, VO> otherStream,
-                                      final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                      final JoinWindows windows);
+    <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream,
+                                     final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
+                                     final JoinWindows windows,
+                                     final Joined<K, V, VO> joined);
 
     /**
      * Join records of this stream with another {@code KStream}'s records using windowed outer equi join with default
@@ -2029,7 +1221,6 @@ public interface KStream<K, V> {
      * user-specified in {@link StreamsConfig} via parameter
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
      * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
      * <p>
      * Repartitioning can happen for one or both of the joining {@code KStream}s.
      * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
@@ -2041,7 +1232,8 @@ public interface KStream<K, V> {
      * The changelog topic will be named "${applicationId}-storeName-changelog", where "applicationId" is user-specified
      * in {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG},
      * "storeName" is an internally generated name, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param otherStream the {@code KStream} to be joined with this stream
      * @param joiner      a {@link ValueJoiner} that computes the join result for a pair of matching records
@@ -2051,19 +1243,19 @@ public interface KStream<K, V> {
      * @return a {@code KStream} that contains join-records for each key and values computed by the given
      * {@link ValueJoiner}, one for each matched record-pair with the same key plus one for each non-matching record of
      * both {@code KStream} and within the joining window intervals
-     * @see #join(KStream, ValueJoiner, JoinWindows, Joined)
-     * @see #leftJoin(KStream, ValueJoiner, JoinWindows, Joined)
+     * @see #join(KStream, ValueJoiner, JoinWindows)
+     * @see #leftJoin(KStream, ValueJoiner, JoinWindows)
      */
     <VO, VR> KStream<K, VR> outerJoin(final KStream<K, VO> otherStream,
                                       final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                      final JoinWindows windows,
-                                      final Joined<K, V, VO> joined);
+                                      final JoinWindows windows);
 
     /**
-     * Join records of this stream with another {@code KStream}'s records using windowed outer equi join.
-     * In contrast to {@link #join(KStream, ValueJoiner, JoinWindows, Joined) inner-join} or
-     * {@link #leftJoin(KStream, ValueJoiner, JoinWindows, Joined) left-join}, all records from both
-     * streams will produce at least one output record (cf. below).
+     * Join records of this stream with another {@code KStream}'s records using windowed outer equi join with default
+     * serializers and deserializers.
+     * In contrast to {@link #join(KStream, ValueJoiner, JoinWindows) inner-join} or
+     * {@link #leftJoin(KStream, ValueJoiner, JoinWindows) left-join}, all records from both streams will produce at
+     * least one output record (cf. below).
      * The join is computed on the records' key with join attribute {@code thisKStream.key == otherKStream.key}.
      * Furthermore, two records are only joined if their timestamps are close to each other as defined by the given
      * {@link JoinWindows}, i.e., the window defines an additional join predicate on the record timestamps.
@@ -2110,7 +1302,6 @@ public interface KStream<K, V> {
      * user-specified in {@link StreamsConfig} via parameter
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
      * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
      * <p>
      * Repartitioning can happen for one or both of the joining {@code KStream}s.
      * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
@@ -2122,33 +1313,24 @@ public interface KStream<K, V> {
      * The changelog topic will be named "${applicationId}-storeName-changelog", where "applicationId" is user-specified
      * in {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG},
      * "storeName" is an internally generated name, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     *
-     * @param otherStream     the {@code KStream} to be joined with this stream
-     * @param joiner          a {@link ValueJoiner} that computes the join result for a pair of matching records
-     * @param windows         the specification of the {@link JoinWindows}
-     * @param keySerde        key serdes for materializing both streams,
-     *                        if not specified the default serdes defined in the configs will be used
-     * @param thisValueSerde  value serdes for materializing this stream,
-     *                        if not specified the default serdes defined in the configs will be used
-     * @param otherValueSerde value serdes for materializing the other stream,
-     *                        if not specified the default serdes defined in the configs will be used
-     * @param <VO>            the value type of the other stream
-     * @param <VR>            the value type of the result stream
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
+     * @param otherStream the {@code KStream} to be joined with this stream
+     * @param joiner      a {@link ValueJoiner} that computes the join result for a pair of matching records
+     * @param windows     the specification of the {@link JoinWindows}
+     * @param <VO>        the value type of the other stream
+     * @param <VR>        the value type of the result stream
      * @return a {@code KStream} that contains join-records for each key and values computed by the given
      * {@link ValueJoiner}, one for each matched record-pair with the same key plus one for each non-matching record of
-     * both {@code KStream}s and within the joining window intervals
+     * both {@code KStream} and within the joining window intervals
      * @see #join(KStream, ValueJoiner, JoinWindows, Joined)
      * @see #leftJoin(KStream, ValueJoiner, JoinWindows, Joined)
-     * @deprecated use {@link #outerJoin(KStream, ValueJoiner, JoinWindows, Joined) outerJoin(otherStream, joiner, windows, Joined.with(keySerde, thisValueSerde, otherValueSerde))}
      */
-    @Deprecated
     <VO, VR> KStream<K, VR> outerJoin(final KStream<K, VO> otherStream,
                                       final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
                                       final JoinWindows windows,
-                                      final Serde<K> keySerde,
-                                      final Serde<V> thisValueSerde,
-                                      final Serde<VO> otherValueSerde);
+                                      final Joined<K, V, VO> joined);
 
     /**
      * Join records of this stream with {@link KTable}'s records using non-windowed inner equi join with default
@@ -2205,7 +1387,9 @@ public interface KStream<K, V> {
      * user-specified in {@link StreamsConfig} via parameter
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
      * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * <p>
      * Repartitioning can happen only for this {@code KStream} but not for the provided {@link KTable}.
      * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
@@ -2279,7 +1463,9 @@ public interface KStream<K, V> {
      * user-specified in {@link StreamsConfig} via parameter
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
      * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * <p>
      * Repartitioning can happen only for this {@code KStream} but not for the provided {@link KTable}.
      * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
@@ -2302,87 +1488,6 @@ public interface KStream<K, V> {
                                  final Joined<K, V, VT> joined);
 
     /**
-     * Join records of this stream with {@link KTable}'s records using non-windowed inner equi join.
-     * The join is a primary key table lookup join with join attribute {@code stream.key == table.key}.
-     * "Table lookup join" means, that results are only computed if {@code KStream} records are processed.
-     * This is done by performing a lookup for matching records in the <em>current</em> (i.e., processing time) internal
-     * {@link KTable} state.
-     * In contrast, processing {@link KTable} input records will only update the internal {@link KTable} state and
-     * will not produce any result records.
-     * <p>
-     * For each {@code KStream} record that finds a corresponding record in {@link KTable} the provided
-     * {@link ValueJoiner} will be called to compute a value (with arbitrary type) for the result record.
-     * The key of the result record is the same as for both joining input records.
-     * If an {@code KStream} input record key or value is {@code null} the record will not be included in the join
-     * operation and thus no output record will be added to the resulting {@code KStream}.
-     * <p>
-     * Example:
-     * <table border='1'>
-     * <tr>
-     * <th>KStream</th>
-     * <th>KTable</th>
-     * <th>state</th>
-     * <th>result</th>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:A&gt;</td>
-     * <td></td>
-     * <td></td>
-     * <td></td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td></td>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:C&gt;</td>
-     * <td></td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:ValueJoiner(C,b)&gt;</td>
-     * </tr>
-     * </table>
-     * Both input streams (or to be more precise, their underlying source topics) need to have the same number of
-     * partitions.
-     * If this is not the case, you would need to call {@link #through(String)} for this {@code KStream} before doing
-     * the join, using a pre-created topic with the same number of partitions as the given {@link KTable}.
-     * Furthermore, both input streams need to be co-partitioned on the join key (i.e., use the same partitioner);
-     * cf. {@link #join(GlobalKTable, KeyValueMapper, ValueJoiner)}.
-     * If this requirement is not met, Kafka Streams will automatically repartition the data, i.e., it will create an
-     * internal repartitioning topic in Kafka and write and re-read the data via this topic before the actual join.
-     * The repartitioning topic will be named "${applicationId}-XXX-repartition", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
-     * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     * <p>
-     * Repartitioning can happen only for this {@code KStream} but not for the provided {@link KTable}.
-     * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
-     * records to it, and rereading all records from it, such that the join input {@code KStream} is partitioned
-     * correctly on its key.
-     *
-     * @param table    the {@link KTable} to be joined with this stream
-     * @param joiner   a {@link ValueJoiner} that computes the join result for a pair of matching records
-     * @param keySerde key serdes for materializing this ({@link KStream} input) stream
-     *                 If not specified the default serdes defined in the configs will be used
-     * @param valSerde value serdes for materializing this ({@link KStream} input) stream,
-     *                 if not specified the default serdes defined in the configs will be used
-     * @param <VT>     the value type of the table
-     * @param <VR>     the value type of the result stream
-     * @return a {@code KStream} that contains join-records for each key and values computed by the given
-     * {@link ValueJoiner}, one for each matched record-pair with the same key
-     * @see #leftJoin(KTable, ValueJoiner, Joined)
-     * @see #join(GlobalKTable, KeyValueMapper, ValueJoiner)
-     * @deprecated use {@link #join(KTable, ValueJoiner, Joined) join(table, joiner, Joined.with(keySerde, valSerde, null))}
-     */
-    @Deprecated
-    <VT, VR> KStream<K, VR> join(final KTable<K, VT> table,
-                                 final ValueJoiner<? super V, ? super VT, ? extends VR> joiner,
-                                 final Serde<K> keySerde,
-                                 final Serde<V> valSerde);
-
-    /**
      * Join records of this stream with {@link KTable}'s records using non-windowed left equi join with default
      * serializers and deserializers.
      * In contrast to {@link #join(KTable, ValueJoiner) inner-join}, all records from this stream will produce an
@@ -2440,7 +1545,9 @@ public interface KStream<K, V> {
      * user-specified in {@link StreamsConfig} via parameter
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
      * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * <p>
      * Repartitioning can happen only for this {@code KStream} but not for the provided {@link KTable}.
      * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
@@ -2517,7 +1624,9 @@ public interface KStream<K, V> {
      * user-specified in {@link StreamsConfig} via parameter
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
      * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * <p>
      * Repartitioning can happen only for this {@code KStream} but not for the provided {@link KTable}.
      * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
@@ -2538,90 +1647,6 @@ public interface KStream<K, V> {
                                      final Joined<K, V, VT> joined);
 
     /**
-     * Join records of this stream with {@link KTable}'s records using non-windowed left equi join.
-     * In contrast to {@link #join(KTable, ValueJoiner) inner-join}, all records from this stream will produce an
-     * output record (cf. below).
-     * The join is a primary key table lookup join with join attribute {@code stream.key == table.key}.
-     * "Table lookup join" means, that results are only computed if {@code KStream} records are processed.
-     * This is done by performing a lookup for matching records in the <em>current</em> (i.e., processing time) internal
-     * {@link KTable} state.
-     * In contrast, processing {@link KTable} input records will only update the internal {@link KTable} state and
-     * will not produce any result records.
-     * <p>
-     * For each {@code KStream} record whether or not it finds a corresponding record in {@link KTable} the provided
-     * {@link ValueJoiner} will be called to compute a value (with arbitrary type) for the result record.
-     * If no {@link KTable} record was found during lookup, a {@code null} value will be provided to {@link ValueJoiner}.
-     * The key of the result record is the same as for both joining input records.
-     * If an {@code KStream} input record key or value is {@code null} the record will not be included in the join
-     * operation and thus no output record will be added to the resulting {@code KStream}.
-     * <p>
-     * Example:
-     * <table border='1'>
-     * <tr>
-     * <th>KStream</th>
-     * <th>KTable</th>
-     * <th>state</th>
-     * <th>result</th>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:A&gt;</td>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:ValueJoiner(A,null)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td></td>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:C&gt;</td>
-     * <td></td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:ValueJoiner(C,b)&gt;</td>
-     * </tr>
-     * </table>
-     * Both input streams (or to be more precise, their underlying source topics) need to have the same number of
-     * partitions.
-     * If this is not the case, you would need to call {@link #through(String)} for this {@code KStream} before doing
-     * the join, using a pre-created topic with the same number of partitions as the given {@link KTable}.
-     * Furthermore, both input streams need to be co-partitioned on the join key (i.e., use the same partitioner);
-     * cf. {@link #join(GlobalKTable, KeyValueMapper, ValueJoiner)}.
-     * If this requirement is not met, Kafka Streams will automatically repartition the data, i.e., it will create an
-     * internal repartitioning topic in Kafka and write and re-read the data via this topic before the actual join.
-     * The repartitioning topic will be named "${applicationId}-XXX-repartition", where "applicationId" is
-     * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
-     * "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     * <p>
-     * Repartitioning can happen only for this {@code KStream} but not for the provided {@link KTable}.
-     * For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
-     * records to it, and rereading all records from it, such that the join input {@code KStream} is partitioned
-     * correctly on its key.
-     *
-     * @param table    the {@link KTable} to be joined with this stream
-     * @param joiner   a {@link ValueJoiner} that computes the join result for a pair of matching records
-     * @param keySerde key serdes for materializing this ({@link KStream} input) stream
-     *                 If not specified the default serdes defined in the configs will be used
-     * @param valSerde value serdes for materializing this ({@link KStream} input) stream,
-     *                 if not specified the default serdes defined in the configs will be used
-     * @param <VT>     the value type of the table
-     * @param <VR>     the value type of the result stream
-     * @return a {@code KStream} that contains join-records for each key and values computed by the given
-     * {@link ValueJoiner}, one output for each input {@code KStream} record
-     * @see #join(KTable, ValueJoiner, Serde, Serde)
-     * @see #leftJoin(GlobalKTable, KeyValueMapper, ValueJoiner)
-     * @deprecated use {@link #leftJoin(KTable, ValueJoiner, Joined) leftJoin(table, joiner, Joined.with(keySerde, valSerde, null))}
-     */
-    @Deprecated
-    <VT, VR> KStream<K, VR> leftJoin(final KTable<K, VT> table,
-                                     final ValueJoiner<? super V, ? super VT, ? extends VR> joiner,
-                                     final Serde<K> keySerde,
-                                     final Serde<V> valSerde);
-
-    /**
      * Join records of this stream with {@link GlobalKTable}'s records using non-windowed inner equi join.
      * The join is a primary key table lookup join with join attribute
      * {@code keyValueMapper.map(stream.keyValue) == table.key}.
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/KTable.java b/streams/src/main/java/org/apache/kafka/streams/kstream/KTable.java
index 1aaad1e..dbead79 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/KTable.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/KTable.java
@@ -23,6 +23,7 @@ import org.apache.kafka.streams.KafkaStreams;
 import org.apache.kafka.streams.KeyValue;
 import org.apache.kafka.streams.StreamsBuilder;
 import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.Topology;
 import org.apache.kafka.streams.processor.StateStore;
 import org.apache.kafka.streams.state.KeyValueBytesStoreSupplier;
 import org.apache.kafka.streams.state.KeyValueStore;
@@ -127,84 +128,6 @@ public interface KTable<K, V> {
                         final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized);
 
     /**
-     * Create a new {@code KTable} that consists of all records of this {@code KTable} which satisfy the given
-     * predicate.
-     * All records that do not satisfy the predicate are dropped.
-     * For each {@code KTable} update the filter is evaluated on the updated record to produce an updated record for the
-     * result {@code KTable}.
-     * This is a stateless record-by-record operation.
-     * <p>
-     * Note that {@code filter} for a <i>changelog stream</i> works differently than {@link KStream#filter(Predicate)
-     * record stream filters}, because {@link KeyValue records} with {@code null} values (so-called tombstone records)
-     * have delete semantics.
-     * Thus, for tombstones the provided filter predicate is not evaluated but the tombstone record is forwarded
-     * directly if required (i.e., if there is anything to be deleted).
-     * Furthermore, for each record that gets dropped (i.e., does not satisfy the given predicate) a tombstone record
-     * is forwarded.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // filtering words
-     * ReadOnlyKeyValueStore<K,V> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<K, V>keyValueStore());
-     * K key = "some-word";
-     * V valueForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     *
-     * @param predicate a filter {@link Predicate} that is applied to each record
-     * @param queryableStoreName a user-provided name of the underlying {@link KTable} that can be
-     *                          used to subsequently query the operation results; valid characters are ASCII
-     *                          alphanumerics, '.', '_' and '-'. If {@code null} then the results cannot be queried
-     *                          (i.e., that would be equivalent to calling {@link KTable#filter(Predicate)}.
-     * @return a {@code KTable} that contains only those records that satisfy the given predicate
-     * @see #filterNot(Predicate, Materialized)
-     * @deprecated use {@link #filter(Predicate, Materialized) filter(predicate, Materialized.as(queryableStoreName))}
-     */
-    @Deprecated
-    KTable<K, V> filter(final Predicate<? super K, ? super V> predicate, final String queryableStoreName);
-
-    /**
-     * Create a new {@code KTable} that consists of all records of this {@code KTable} which satisfy the given
-     * predicate.
-     * All records that do not satisfy the predicate are dropped.
-     * For each {@code KTable} update the filter is evaluated on the updated record to produce an updated record for the
-     * result {@code KTable}.
-     * This is a stateless record-by-record operation.
-     * <p>
-     * Note that {@code filter} for a <i>changelog stream</i> works differently than {@link KStream#filter(Predicate)
-     * record stream filters}, because {@link KeyValue records} with {@code null} values (so-called tombstone records)
-     * have delete semantics.
-     * Thus, for tombstones the provided filter predicate is not evaluated but the tombstone record is forwarded
-     * directly if required (i.e., if there is anything to be deleted).
-     * Furthermore, for each record that gets dropped (i.e., does not satisfy the given predicate) a tombstone record
-     * is forwarded.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // filtering words
-     * ReadOnlyKeyValueStore<K,V> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<K, V>keyValueStore());
-     * K key = "some-word";
-     * V valueForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     *
-     * @param predicate a filter {@link Predicate} that is applied to each record
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
-     * @return a {@code KTable} that contains only those records that satisfy the given predicate
-     * @see #filterNot(Predicate, Materialized)
-     * @deprecated use {@link #filter(Predicate, Materialized) filter(predicate, Materialized.as(KeyValueByteStoreSupplier))}
-     */
-    @Deprecated
-    KTable<K, V> filter(final Predicate<? super K, ? super V> predicate,
-                        final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier);
-
-    /**
      * Create a new {@code KTable} that consists all records of this {@code KTable} which do <em>not</em> satisfy the
      * given predicate.
      * All records that <em>do</em> satisfy the predicate are dropped.
@@ -262,81 +185,6 @@ public interface KTable<K, V> {
      */
     KTable<K, V> filterNot(final Predicate<? super K, ? super V> predicate,
                            final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized);
-    /**
-     * Create a new {@code KTable} that consists all records of this {@code KTable} which do <em>not</em> satisfy the
-     * given predicate.
-     * All records that <em>do</em> satisfy the predicate are dropped.
-     * For each {@code KTable} update the filter is evaluated on the updated record to produce an updated record for the
-     * result {@code KTable}.
-     * This is a stateless record-by-record operation.
-     * <p>
-     * Note that {@code filterNot} for a <i>changelog stream</i> works differently than {@link KStream#filterNot(Predicate)
-     * record stream filters}, because {@link KeyValue records} with {@code null} values (so-called tombstone records)
-     * have delete semantics.
-     * Thus, for tombstones the provided filter predicate is not evaluated but the tombstone record is forwarded
-     * directly if required (i.e., if there is anything to be deleted).
-     * Furthermore, for each record that gets dropped (i.e., does satisfy the given predicate) a tombstone record is
-     * forwarded.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // filtering words
-     * ReadOnlyKeyValueStore<K,V> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<K, V>keyValueStore());
-     * K key = "some-word";
-     * V valueForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * @param predicate a filter {@link Predicate} that is applied to each record
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
-     * @return a {@code KTable} that contains only those records that do <em>not</em> satisfy the given predicate
-     * @see #filter(Predicate, Materialized)
-     * @deprecated use {@link #filterNot(Predicate, Materialized) filterNot(predicate, Materialized.as(KeyValueByteStoreSupplier))}
-     */
-    @Deprecated
-    KTable<K, V> filterNot(final Predicate<? super K, ? super V> predicate,
-                           final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier);
-
-    /**
-     * Create a new {@code KTable} that consists all records of this {@code KTable} which do <em>not</em> satisfy the
-     * given predicate.
-     * All records that <em>do</em> satisfy the predicate are dropped.
-     * For each {@code KTable} update the filter is evaluated on the updated record to produce an updated record for the
-     * result {@code KTable}.
-     * This is a stateless record-by-record operation.
-     * <p>
-     * Note that {@code filterNot} for a <i>changelog stream</i> works differently than {@link KStream#filterNot(Predicate)
-     * record stream filters}, because {@link KeyValue records} with {@code null} values (so-called tombstone records)
-     * have delete semantics.
-     * Thus, for tombstones the provided filter predicate is not evaluated but the tombstone record is forwarded
-     * directly if required (i.e., if there is anything to be deleted).
-     * Furthermore, for each record that gets dropped (i.e., does satisfy the given predicate) a tombstone record is
-     * forwarded.
-     * <p>
-     * To query the local {@link KeyValueStore} it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * <pre>{@code
-     * KafkaStreams streams = ... // filtering words
-     * ReadOnlyKeyValueStore<K,V> localStore = streams.store(queryableStoreName, QueryableStoreTypes.<K, V>keyValueStore());
-     * K key = "some-word";
-     * V valueForKey = localStore.get(key); // key must be local (application state is shared over all running Kafka Streams instances)
-     * }</pre>
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * @param predicate a filter {@link Predicate} that is applied to each record
-     * @param queryableStoreName a user-provided name of the underlying {@link KTable} that can be
-     * used to subsequently query the operation results; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} then the results cannot be queried
-     * (i.e., that would be equivalent to calling {@link KTable#filterNot(Predicate)}.
-     * @return a {@code KTable} that contains only those records that do <em>not</em> satisfy the given predicate
-     * @see #filter(Predicate, Materialized)
-     * @deprecated use {@link #filter(Predicate, Materialized) filterNot(predicate, Materialized.as(queryableStoreName))}
-     */
-    @Deprecated
-    KTable<K, V> filterNot(final Predicate<? super K, ? super V> predicate, final String queryableStoreName);
 
 
     /**
@@ -498,100 +346,6 @@ public interface KTable<K, V> {
                                  final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized);
 
     /**
-     * Create a new {@code KTable} by transforming the value of each record in this {@code KTable} into a new value
-     * (with possible new type) in the new {@code KTable}.
-     * For each {@code KTable} update the provided {@link ValueMapper} is applied to the value of the updated record and
-     * computes a new value for it, resulting in an updated record for the result {@code KTable}.
-     * Thus, an input record {@code <K,V>} can be transformed into an output record {@code <K:V'>}.
-     * This is a stateless record-by-record operation.
-     * <p>
-     * The example below counts the number of token of the value string.
-     * <pre>{@code
-     * KTable<String, String> inputTable = builder.table("topic");
-     * KTable<String, Integer> outputTable = inputTable.mapValue(new ValueMapper<String, Integer> {
-     *     Integer apply(String value) {
-     *         return value.split(" ").length;
-     *     }
-     * });
-     * }</pre>
-     * <p>
-     * To query the local {@link KeyValueStore} representing outputTable above it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * <p>
-     * This operation preserves data co-location with respect to the key.
-     * Thus, <em>no</em> internal data redistribution is required if a key based operator (like a join) is applied to
-     * the result {@code KTable}.
-     * <p>
-     * Note that {@code mapValues} for a <i>changelog stream</i> works differently than {@link KStream#mapValues(ValueMapper)
-     * record stream filters}, because {@link KeyValue records} with {@code null} values (so-called tombstone records)
-     * have delete semantics.
-     * Thus, for tombstones the provided value-mapper is not evaluated but the tombstone record is forwarded directly to
-     * delete the corresponding record in the result {@code KTable}.
-     *
-     * @param mapper a {@link ValueMapper} that computes a new output value
-     * @param queryableStoreName a user-provided name of the underlying {@link KTable} that can be
-     * used to subsequently query the operation results; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} then the results cannot be queried
-     * (i.e., that would be equivalent to calling {@link KTable#mapValues(ValueMapper)}.
-     * @param valueSerde serializer for new value type
-     * @param <VR>   the value type of the result {@code KTable}
-     *
-     * @return a {@code KTable} that contains records with unmodified keys and new values (possibly of different type)
-     * @deprecated use {@link #mapValues(ValueMapper, Materialized) mapValues(mapper, Materialized.as(queryableStoreName).withValueSerde(valueSerde))}
-     */
-    @Deprecated
-    <VR> KTable<K, VR> mapValues(final ValueMapper<? super V, ? extends VR> mapper, final Serde<VR> valueSerde, final String queryableStoreName);
-
-    /**
-     * Create a new {@code KTable} by transforming the value of each record in this {@code KTable} into a new value
-     * (with possible new type) in the new {@code KTable}.
-     * For each {@code KTable} update the provided {@link ValueMapper} is applied to the value of the updated record and
-     * computes a new value for it, resulting in an updated record for the result {@code KTable}.
-     * Thus, an input record {@code <K,V>} can be transformed into an output record {@code <K:V'>}.
-     * This is a stateless record-by-record operation.
-     * <p>
-     * The example below counts the number of token of the value string.
-     * <pre>{@code
-     * KTable<String, String> inputTable = builder.table("topic");
-     * KTable<String, Integer> outputTable = inputTable.mapValue(new ValueMapper<String, Integer> {
-     *     Integer apply(String value) {
-     *         return value.split(" ").length;
-     *     }
-     * });
-     * }</pre>
-     * <p>
-     * To query the local {@link KeyValueStore} representing outputTable above it must be obtained via
-     * {@link KafkaStreams#store(String, QueryableStoreType) KafkaStreams#store(...)}:
-     * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
-     * query the value of the key on a parallel running instance of your Kafka Streams application.
-     * <p>
-     * <p>
-     * This operation preserves data co-location with respect to the key.
-     * Thus, <em>no</em> internal data redistribution is required if a key based operator (like a join) is applied to
-     * the result {@code KTable}.
-     * <p>
-     * Note that {@code mapValues} for a <i>changelog stream</i> works differently than {@link KStream#mapValues(ValueMapper)
-     * record stream filters}, because {@link KeyValue records} with {@code null} values (so-called tombstone records)
-     * have delete semantics.
-     * Thus, for tombstones the provided value-mapper is not evaluated but the tombstone record is forwarded directly to
-     * delete the corresponding record in the result {@code KTable}.
-     *
-     * @param mapper a {@link ValueMapper} that computes a new output value
-     * @param valueSerde serializer for new value type
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
-     * @param <VR>   the value type of the result {@code KTable}
-     * @return a {@code KTable} that contains records with unmodified keys and new values (possibly of different type)
-     * @deprecated use {@link #mapValues(ValueMapper, Materialized) mapValues(mapper, Materialized.as(KeyValueByteStoreSupplier).withValueSerde(valueSerde))}
-     */
-    @Deprecated
-    <VR> KTable<K, VR> mapValues(final ValueMapper<? super V, ? extends VR> mapper,
-                                 final Serde<VR> valueSerde,
-                                 final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier);
-
-    /**
      * Convert this changelog stream to a {@link KStream}.
      * <p>
      * Note that this is a logical operation and only changes the "interpretation" of the stream, i.e., each record of
@@ -642,7 +396,9 @@ public interface KTable<K, V> {
      * This topic will be named "${applicationId}-XXX-repartition", where "applicationId" is user-specified in
      * {@link  StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is
      * an internally generated name, and "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * <p>
      * All data of this {@code KTable} will be redistributed through the repartitioning topic by writing all update
      * records to and rereading all updated records from it, such that the resulting {@link KGroupedTable} is partitioned
@@ -672,7 +428,9 @@ public interface KTable<K, V> {
      * This topic will be named "${applicationId}-XXX-repartition", where "applicationId" is user-specified in
      * {@link  StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is
      * an internally generated name, and "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * <p>
      * All data of this {@code KTable} will be redistributed through the repartitioning topic by writing all update
      * records to and rereading all updated records from it, such that the resulting {@link KGroupedTable} is partitioned
@@ -688,40 +446,6 @@ public interface KTable<K, V> {
                                            final Serialized<KR, VR> serialized);
 
     /**
-     * Re-groups the records of this {@code KTable} using the provided {@link KeyValueMapper}.
-     * Each {@link KeyValue} pair of this {@code KTable} is mapped to a new {@link KeyValue} pair by applying the
-     * provided {@link KeyValueMapper}.
-     * Re-grouping a {@code KTable} is required before an aggregation operator can be applied to the data
-     * (cf. {@link KGroupedTable}).
-     * The {@link KeyValueMapper} selects a new key and value (both with potentially different type).
-     * If the new record key is {@code null} the record will not be included in the resulting {@link KGroupedTable}
-     * <p>
-     * Because a new key is selected, an internal repartitioning topic will be created in Kafka.
-     * This topic will be named "${applicationId}-XXX-repartition", where "applicationId" is user-specified in
-     * {@link  StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is
-     * an internally generated name, and "-repartition" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
-     * <p>
-     * All data of this {@code KTable} will be redistributed through the repartitioning topic by writing all update
-     * records to and rereading all updated records from it, such that the resulting {@link KGroupedTable} is partitioned
-     * on the new key.
-     *
-     * @param selector   a {@link KeyValueMapper} that computes a new grouping key and value to be aggregated
-     * @param keySerde   key serdes for materializing this stream,
-     *                   if not specified the default serdes defined in the configs will be used
-     * @param valueSerde value serdes for materializing this stream,
-     *                   if not specified the default serdes defined in the configs will be used
-     * @param <KR>       the key type of the result {@link KGroupedTable}
-     * @param <VR>       the value type of the result {@link KGroupedTable}
-     * @return a {@link KGroupedTable} that contains the re-grouped records of the original {@code KTable}
-     * @deprecated use {@link #groupBy(KeyValueMapper, Serialized) groupBy(selector, Serialized.with(keySerde, valueSerde)}
-     */
-    @Deprecated
-    <KR, VR> KGroupedTable<KR, VR> groupBy(final KeyValueMapper<? super K, ? super V, KeyValue<KR, VR>> selector,
-                                           final Serde<KR> keySerde,
-                                           final Serde<VR> valueSerde);
-
-    /**
      * Join records of this {@code KTable} with another {@code KTable}'s records using non-windowed inner equi join.
      * The join is a primary key join with join attribute {@code thisKTable.key == otherKTable.key}.
      * The result is an ever updating {@code KTable} that represents the <em>current</em> (i.e., processing time) result
@@ -871,9 +595,14 @@ public interface KTable<K, V> {
     <VO, VR> KTable<K, VR> join(final KTable<K, VO> other,
                                 final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
                                 final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized);
+
+
     /**
-     * Join records of this {@code KTable} with another {@code KTable}'s records using non-windowed inner equi join.
+     * Join records of this {@code KTable} (left input) with another {@code KTable}'s (right input) records using
+     * non-windowed left equi join.
      * The join is a primary key join with join attribute {@code thisKTable.key == otherKTable.key}.
+     * In contrast to {@link #join(KTable, ValueJoiner) inner-join}, all records from left {@code KTable} will produce
+     * an output record (cf. below).
      * The result is an ever updating {@code KTable} that represents the <em>current</em> (i.e., processing time) result
      * of the join.
      * <p>
@@ -882,13 +611,17 @@ public interface KTable<K, V> {
      * This happens in a symmetric way, i.e., for each update of either {@code this} or the {@code other} input
      * {@code KTable} the result gets updated.
      * <p>
-     * For each {@code KTable} record that finds a corresponding record in the other {@code KTable} the provided
-     * {@link ValueJoiner} will be called to compute a value (with arbitrary type) for the result record.
+     * For each {@code KTable} record that finds a corresponding record in the other {@code KTable}'s state the
+     * provided {@link ValueJoiner} will be called to compute a value (with arbitrary type) for the result record.
+     * Additionally, for each record of left {@code KTable} that does not find a corresponding record in the
+     * right {@code KTable}'s state the provided {@link ValueJoiner} will be called with {@code rightValue =
+     * null} to compute a value (with arbitrary type) for the result record.
      * The key of the result record is the same as for both joining input records.
      * <p>
      * Note that {@link KeyValue records} with {@code null} values (so-called tombstone records) have delete semantics.
-     * Thus, for input tombstones the provided value-joiner is not called but a tombstone record is forwarded
-     * directly to delete a record in the result {@code KTable} if required (i.e., if there is anything to be deleted).
+     * For example, for left input tombstones the provided value-joiner is not called but a tombstone record is
+     * forwarded directly to delete a record in the result {@code KTable} if required (i.e., if there is anything to be
+     * deleted).
      * <p>
      * Input records with {@code null} key will be dropped and no join computation is performed.
      * <p>
@@ -906,7 +639,7 @@ public interface KTable<K, V> {
      * <td>&lt;K1:A&gt;</td>
      * <td></td>
      * <td></td>
-     * <td></td>
+     * <td>&lt;K1:ValueJoiner(A,null)&gt;</td>
      * </tr>
      * <tr>
      * <td></td>
@@ -916,180 +649,11 @@ public interface KTable<K, V> {
      * <td>&lt;K1:ValueJoiner(A,b)&gt;</td>
      * </tr>
      * <tr>
-     * <td>&lt;K1:C&gt;</td>
-     * <td>&lt;K1:C&gt;</td>
+     * <td>&lt;K1:null&gt;</td>
+     * <td></td>
      * <td></td>
      * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:ValueJoiner(C,b)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td>&lt;K1:C&gt;</td>
-     * <td>&lt;K1:null&gt;</td>
-     * <td></td>
-     * <td>&lt;K1:null&gt;</td>
-     * </tr>
-     * </table>
-     * Both input streams (or to be more precise, their underlying source topics) need to have the same number of
-     * partitions.
-     *
-     * @param other  the other {@code KTable} to be joined with this {@code KTable}
-     * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records
-     * @param <VO>   the value type of the other {@code KTable}
-     * @param <VR>   the value type of the result {@code KTable}
-     * @param joinSerde serializer for join result value type
-     * @param queryableStoreName a user-provided name of the underlying {@link KTable} that can be
-     * used to subsequently query the operation results; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} then the results cannot be queried
-     * (i.e., that would be equivalent to calling {@link KTable#join(KTable, ValueJoiner)}.
-     * @return a {@code KTable} that contains join-records for each key and values computed by the given
-     * {@link ValueJoiner}, one for each matched record-pair with the same key
-     * @see #leftJoin(KTable, ValueJoiner, Materialized)
-     * @see #outerJoin(KTable, ValueJoiner, Materialized)
-     * @deprecated use {@link #join(KTable, ValueJoiner, Materialized) join(other, joiner, Materialized.as(queryableStoreName).withValueSerde(joinSerde)}
-     */
-    @Deprecated
-    <VO, VR> KTable<K, VR> join(final KTable<K, VO> other,
-                                final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                final Serde<VR> joinSerde,
-                                final String queryableStoreName);
-
-    /**
-     * Join records of this {@code KTable} with another {@code KTable}'s records using non-windowed inner equi join.
-     * The join is a primary key join with join attribute {@code thisKTable.key == otherKTable.key}.
-     * The result is an ever updating {@code KTable} that represents the <em>current</em> (i.e., processing time) result
-     * of the join.
-     * <p>
-     * The join is computed by (1) updating the internal state of one {@code KTable} and (2) performing a lookup for a
-     * matching record in the <em>current</em> (i.e., processing time) internal state of the other {@code KTable}.
-     * This happens in a symmetric way, i.e., for each update of either {@code this} or the {@code other} input
-     * {@code KTable} the result gets updated.
-     * <p>
-     * For each {@code KTable} record that finds a corresponding record in the other {@code KTable} the provided
-     * {@link ValueJoiner} will be called to compute a value (with arbitrary type) for the result record.
-     * The key of the result record is the same as for both joining input records.
-     * <p>
-     * Note that {@link KeyValue records} with {@code null} values (so-called tombstone records) have delete semantics.
-     * Thus, for input tombstones the provided value-joiner is not called but a tombstone record is forwarded
-     * directly to delete a record in the result {@code KTable} if required (i.e., if there is anything to be deleted).
-     * <p>
-     * Input records with {@code null} key will be dropped and no join computation is performed.
-     * <p>
-     * Example:
-     * <table border='1'>
-     * <tr>
-     * <th>thisKTable</th>
-     * <th>thisState</th>
-     * <th>otherKTable</th>
-     * <th>otherState</th>
-     * <th>result updated record</th>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:A&gt;</td>
-     * <td>&lt;K1:A&gt;</td>
-     * <td></td>
-     * <td></td>
-     * <td></td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td>&lt;K1:A&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:ValueJoiner(A,b)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:C&gt;</td>
-     * <td>&lt;K1:C&gt;</td>
-     * <td></td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:ValueJoiner(C,b)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td>&lt;K1:C&gt;</td>
-     * <td>&lt;K1:null&gt;</td>
-     * <td></td>
-     * <td>&lt;K1:null&gt;</td>
-     * </tr>
-     * </table>
-     * Both input streams (or to be more precise, their underlying source topics) need to have the same number of
-     * partitions.
-     *
-     * @param other  the other {@code KTable} to be joined with this {@code KTable}
-     * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records
-     * @param <VO>   the value type of the other {@code KTable}
-     * @param <VR>   the value type of the result {@code KTable}
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
-     * @return a {@code KTable} that contains join-records for each key and values computed by the given
-     * {@link ValueJoiner}, one for each matched record-pair with the same key
-     * @see #leftJoin(KTable, ValueJoiner, Materialized)
-     * @see #outerJoin(KTable, ValueJoiner, Materialized)
-     * @deprecated use {@link #join(KTable, ValueJoiner, Materialized) join(other, joiner, Materialized.as(KeyValueByteStoreSupplier)}
-     */
-    @Deprecated
-    <VO, VR> KTable<K, VR> join(final KTable<K, VO> other,
-                                final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier);
-
-
-    /**
-     * Join records of this {@code KTable} (left input) with another {@code KTable}'s (right input) records using
-     * non-windowed left equi join.
-     * The join is a primary key join with join attribute {@code thisKTable.key == otherKTable.key}.
-     * In contrast to {@link #join(KTable, ValueJoiner) inner-join}, all records from left {@code KTable} will produce
-     * an output record (cf. below).
-     * The result is an ever updating {@code KTable} that represents the <em>current</em> (i.e., processing time) result
-     * of the join.
-     * <p>
-     * The join is computed by (1) updating the internal state of one {@code KTable} and (2) performing a lookup for a
-     * matching record in the <em>current</em> (i.e., processing time) internal state of the other {@code KTable}.
-     * This happens in a symmetric way, i.e., for each update of either {@code this} or the {@code other} input
-     * {@code KTable} the result gets updated.
-     * <p>
-     * For each {@code KTable} record that finds a corresponding record in the other {@code KTable}'s state the
-     * provided {@link ValueJoiner} will be called to compute a value (with arbitrary type) for the result record.
-     * Additionally, for each record of left {@code KTable} that does not find a corresponding record in the
-     * right {@code KTable}'s state the provided {@link ValueJoiner} will be called with {@code rightValue =
-     * null} to compute a value (with arbitrary type) for the result record.
-     * The key of the result record is the same as for both joining input records.
-     * <p>
-     * Note that {@link KeyValue records} with {@code null} values (so-called tombstone records) have delete semantics.
-     * For example, for left input tombstones the provided value-joiner is not called but a tombstone record is
-     * forwarded directly to delete a record in the result {@code KTable} if required (i.e., if there is anything to be
-     * deleted).
-     * <p>
-     * Input records with {@code null} key will be dropped and no join computation is performed.
-     * <p>
-     * Example:
-     * <table border='1'>
-     * <tr>
-     * <th>thisKTable</th>
-     * <th>thisState</th>
-     * <th>otherKTable</th>
-     * <th>otherState</th>
-     * <th>result updated record</th>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:A&gt;</td>
-     * <td>&lt;K1:A&gt;</td>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:ValueJoiner(A,null)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td>&lt;K1:A&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:ValueJoiner(A,b)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:null&gt;</td>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:null&gt;</td>
+     * <td>&lt;K1:null&gt;</td>
      * </tr>
      * <tr>
      * <td></td>
@@ -1199,182 +763,6 @@ public interface KTable<K, V> {
     <VO, VR> KTable<K, VR> leftJoin(final KTable<K, VO> other,
                                     final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
                                     final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized);
-    /**
-     * Join records of this {@code KTable} (left input) with another {@code KTable}'s (right input) records using
-     * non-windowed left equi join.
-     * The join is a primary key join with join attribute {@code thisKTable.key == otherKTable.key}.
-     * In contrast to {@link #join(KTable, ValueJoiner) inner-join}, all records from left {@code KTable} will produce
-     * an output record (cf. below).
-     * The result is an ever updating {@code KTable} that represents the <em>current</em> (i.e., processing time) result
-     * of the join.
-     * <p>
-     * The join is computed by (1) updating the internal state of one {@code KTable} and (2) performing a lookup for a
-     * matching record in the <em>current</em> (i.e., processing time) internal state of the other {@code KTable}.
-     * This happens in a symmetric way, i.e., for each update of either {@code this} or the {@code other} input
-     * {@code KTable} the result gets updated.
-     * <p>
-     * For each {@code KTable} record that finds a corresponding record in the other {@code KTable}'s state the
-     * provided {@link ValueJoiner} will be called to compute a value (with arbitrary type) for the result record.
-     * Additionally, for each record of left {@code KTable} that does not find a corresponding record in the
-     * right {@code KTable}'s state the provided {@link ValueJoiner} will be called with {@code rightValue =
-     * null} to compute a value (with arbitrary type) for the result record.
-     * The key of the result record is the same as for both joining input records.
-     * <p>
-     * Note that {@link KeyValue records} with {@code null} values (so-called tombstone records) have delete semantics.
-     * For example, for left input tombstones the provided value-joiner is not called but a tombstone record is
-     * forwarded directly to delete a record in the result {@code KTable} if required (i.e., if there is anything to be
-     * deleted).
-     * <p>
-     * Input records with {@code null} key will be dropped and no join computation is performed.
-     * <p>
-     * Example:
-     * <table border='1'>
-     * <tr>
-     * <th>thisKTable</th>
-     * <th>thisState</th>
-     * <th>otherKTable</th>
-     * <th>otherState</th>
-     * <th>result updated record</th>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:A&gt;</td>
-     * <td>&lt;K1:A&gt;</td>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:ValueJoiner(A,null)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td>&lt;K1:A&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:ValueJoiner(A,b)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:null&gt;</td>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:null&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:null&gt;</td>
-     * <td></td>
-     * <td></td>
-     * </tr>
-     * </table>
-     * Both input streams (or to be more precise, their underlying source topics) need to have the same number of
-     * partitions.
-     *
-     * @param other  the other {@code KTable} to be joined with this {@code KTable}
-     * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records
-     * @param <VO>   the value type of the other {@code KTable}
-     * @param <VR>   the value type of the result {@code KTable}
-     * @param joinSerde serializer for join result value type
-     * @param queryableStoreName a user-provided name of the underlying {@link KTable} that can be
-     * used to subsequently query the operation results; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} then the results cannot be queried
-     * (i.e., that would be equivalent to calling {@link KTable#leftJoin(KTable, ValueJoiner)}.
-     * @return a {@code KTable} that contains join-records for each key and values computed by the given
-     * {@link ValueJoiner}, one for each matched record-pair with the same key plus one for each non-matching record of
-     * left {@code KTable}
-     * @see #join(KTable, ValueJoiner, Materialized)
-     * @see #outerJoin(KTable, ValueJoiner, Materialized)
-     * @deprecated use {@link #leftJoin(KTable, ValueJoiner, Materialized) leftJoin(other, joiner, Materialized.as(queryableStoreName).withValueSerde(joinSerde)}
-     */
-    @Deprecated
-    <VO, VR> KTable<K, VR> leftJoin(final KTable<K, VO> other,
-                                    final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                    final Serde<VR> joinSerde,
-                                    final String queryableStoreName);
-
-    /**
-     * Join records of this {@code KTable} (left input) with another {@code KTable}'s (right input) records using
-     * non-windowed left equi join.
-     * The join is a primary key join with join attribute {@code thisKTable.key == otherKTable.key}.
-     * In contrast to {@link #join(KTable, ValueJoiner) inner-join}, all records from left {@code KTable} will produce
-     * an output record (cf. below).
-     * The result is an ever updating {@code KTable} that represents the <em>current</em> (i.e., processing time) result
-     * of the join.
-     * <p>
-     * The join is computed by (1) updating the internal state of one {@code KTable} and (2) performing a lookup for a
-     * matching record in the <em>current</em> (i.e., processing time) internal state of the other {@code KTable}.
-     * This happens in a symmetric way, i.e., for each update of either {@code this} or the {@code other} input
-     * {@code KTable} the result gets updated.
-     * <p>
-     * For each {@code KTable} record that finds a corresponding record in the other {@code KTable}'s state the
-     * provided {@link ValueJoiner} will be called to compute a value (with arbitrary type) for the result record.
-     * Additionally, for each record of left {@code KTable} that does not find a corresponding record in the
-     * right {@code KTable}'s state the provided {@link ValueJoiner} will be called with {@code rightValue =
-     * null} to compute a value (with arbitrary type) for the result record.
-     * The key of the result record is the same as for both joining input records.
-     * <p>
-     * Note that {@link KeyValue records} with {@code null} values (so-called tombstone records) have delete semantics.
-     * For example, for left input tombstones the provided value-joiner is not called but a tombstone record is
-     * forwarded directly to delete a record in the result {@code KTable} if required (i.e., if there is anything to be
-     * deleted).
-     * <p>
-     * Input records with {@code null} key will be dropped and no join computation is performed.
-     * <p>
-     * Example:
-     * <table border='1'>
-     * <tr>
-     * <th>thisKTable</th>
-     * <th>thisState</th>
-     * <th>otherKTable</th>
-     * <th>otherState</th>
-     * <th>result updated record</th>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:A&gt;</td>
-     * <td>&lt;K1:A&gt;</td>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:ValueJoiner(A,null)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td>&lt;K1:A&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:ValueJoiner(A,b)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:null&gt;</td>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:null&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:null&gt;</td>
-     * <td></td>
-     * <td></td>
-     * </tr>
-     * </table>
-     * Both input streams (or to be more precise, their underlying source topics) need to have the same number of
-     * partitions.
-     *
-     * @param other  the other {@code KTable} to be joined with this {@code KTable}
-     * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records
-     * @param <VO>   the value type of the other {@code KTable}
-     * @param <VR>   the value type of the result {@code KTable}
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
-     * @return a {@code KTable} that contains join-records for each key and values computed by the given
-     * {@link ValueJoiner}, one for each matched record-pair with the same key plus one for each non-matching record of
-     * left {@code KTable}
-     * @see #join(KTable, ValueJoiner, Materialized)
-     * @see #outerJoin(KTable, ValueJoiner, Materialized)
-     * @deprecated use {@link #leftJoin(KTable, ValueJoiner, Materialized) leftJoin(other, joiner, Materialized.as(KeyValueByteStoreSupplier)}
-     */
-    @Deprecated
-    <VO, VR> KTable<K, VR> leftJoin(final KTable<K, VO> other,
-                                    final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                    final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier);
 
 
     /**
@@ -1543,181 +931,6 @@ public interface KTable<K, V> {
                                      final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized);
 
     /**
-     * Join records of this {@code KTable} (left input) with another {@code KTable}'s (right input) records using
-     * non-windowed outer equi join.
-     * The join is a primary key join with join attribute {@code thisKTable.key == otherKTable.key}.
-     * In contrast to {@link #join(KTable, ValueJoiner) inner-join} or {@link #leftJoin(KTable, ValueJoiner) left-join},
-     * all records from both input {@code KTable}s will produce an output record (cf. below).
-     * The result is an ever updating {@code KTable} that represents the <em>current</em> (i.e., processing time) result
-     * of the join.
-     * <p>
-     * The join is computed by (1) updating the internal state of one {@code KTable} and (2) performing a lookup for a
-     * matching record in the <em>current</em> (i.e., processing time) internal state of the other {@code KTable}.
-     * This happens in a symmetric way, i.e., for each update of either {@code this} or the {@code other} input
-     * {@code KTable} the result gets updated.
-     * <p>
-     * For each {@code KTable} record that finds a corresponding record in the other {@code KTable}'s state the
-     * provided {@link ValueJoiner} will be called to compute a value (with arbitrary type) for the result record.
-     * Additionally, for each record that does not find a corresponding record in the corresponding other
-     * {@code KTable}'s state the provided {@link ValueJoiner} will be called with {@code null} value for the
-     * corresponding other value to compute a value (with arbitrary type) for the result record.
-     * The key of the result record is the same as for both joining input records.
-     * <p>
-     * Note that {@link KeyValue records} with {@code null} values (so-called tombstone records) have delete semantics.
-     * Thus, for input tombstones the provided value-joiner is not called but a tombstone record is forwarded directly
-     * to delete a record in the result {@code KTable} if required (i.e., if there is anything to be deleted).
-     * <p>
-     * Input records with {@code null} key will be dropped and no join computation is performed.
-     * <p>
-     * Example:
-     * <table border='1'>
-     * <tr>
-     * <th>thisKTable</th>
-     * <th>thisState</th>
-     * <th>otherKTable</th>
-     * <th>otherState</th>
-     * <th>result updated record</th>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:A&gt;</td>
-     * <td>&lt;K1:A&gt;</td>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:ValueJoiner(A,null)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td>&lt;K1:A&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:ValueJoiner(A,b)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:null&gt;</td>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:ValueJoiner(null,b)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:null&gt;</td>
-     * <td></td>
-     * <td>&lt;K1:null&gt;</td>
-     * </tr>
-     * </table>
-     * Both input streams (or to be more precise, their underlying source topics) need to have the same number of
-     * partitions.
-     *
-     * @param other  the other {@code KTable} to be joined with this {@code KTable}
-     * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records
-     * @param <VO>   the value type of the other {@code KTable}
-     * @param <VR>   the value type of the result {@code KTable}
-     * @param joinSerde serializer for join result value type
-     * @param queryableStoreName a user-provided name of the underlying {@link KTable} that can be
-     * used to subsequently query the operation results; valid characters are ASCII
-     * alphanumerics, '.', '_' and '-'. If {@code null} then the results cannot be queried
-     * (i.e., that would be equivalent to calling {@link KTable#outerJoin(KTable, ValueJoiner)}.
-     * @return a {@code KTable} that contains join-records for each key and values computed by the given
-     * {@link ValueJoiner}, one for each matched record-pair with the same key plus one for each non-matching record of
-     * both {@code KTable}s
-     * @see #join(KTable, ValueJoiner, Materialized)
-     * @see #leftJoin(KTable, ValueJoiner, Materialized)
-     * @deprecated use {@link #outerJoin(KTable, ValueJoiner, Materialized) outerJoin(other, joiner, Materialized.as(queryableStoreName).withValueSerde(joinSerde)}
-     */
-    @Deprecated
-    <VO, VR> KTable<K, VR> outerJoin(final KTable<K, VO> other,
-                                     final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                     final Serde<VR> joinSerde,
-                                     final String queryableStoreName);
-
-    /**
-     * Join records of this {@code KTable} (left input) with another {@code KTable}'s (right input) records using
-     * non-windowed outer equi join.
-     * The join is a primary key join with join attribute {@code thisKTable.key == otherKTable.key}.
-     * In contrast to {@link #join(KTable, ValueJoiner) inner-join} or {@link #leftJoin(KTable, ValueJoiner) left-join},
-     * all records from both input {@code KTable}s will produce an output record (cf. below).
-     * The result is an ever updating {@code KTable} that represents the <em>current</em> (i.e., processing time) result
-     * of the join.
-     * <p>
-     * The join is computed by (1) updating the internal state of one {@code KTable} and (2) performing a lookup for a
-     * matching record in the <em>current</em> (i.e., processing time) internal state of the other {@code KTable}.
-     * This happens in a symmetric way, i.e., for each update of either {@code this} or the {@code other} input
-     * {@code KTable} the result gets updated.
-     * <p>
-     * For each {@code KTable} record that finds a corresponding record in the other {@code KTable}'s state the
-     * provided {@link ValueJoiner} will be called to compute a value (with arbitrary type) for the result record.
-     * Additionally, for each record that does not find a corresponding record in the corresponding other
-     * {@code KTable}'s state the provided {@link ValueJoiner} will be called with {@code null} value for the
-     * corresponding other value to compute a value (with arbitrary type) for the result record.
-     * The key of the result record is the same as for both joining input records.
-     * <p>
-     * Note that {@link KeyValue records} with {@code null} values (so-called tombstone records) have delete semantics.
-     * Thus, for input tombstones the provided value-joiner is not called but a tombstone record is forwarded directly
-     * to delete a record in the result {@code KTable} if required (i.e., if there is anything to be deleted).
-     * <p>
-     * Input records with {@code null} key will be dropped and no join computation is performed.
-     * <p>
-     * Example:
-     * <table border='1'>
-     * <tr>
-     * <th>thisKTable</th>
-     * <th>thisState</th>
-     * <th>otherKTable</th>
-     * <th>otherState</th>
-     * <th>result updated record</th>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:A&gt;</td>
-     * <td>&lt;K1:A&gt;</td>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:ValueJoiner(A,null)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td>&lt;K1:A&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:ValueJoiner(A,b)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td>&lt;K1:null&gt;</td>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:b&gt;</td>
-     * <td>&lt;K1:ValueJoiner(null,b)&gt;</td>
-     * </tr>
-     * <tr>
-     * <td></td>
-     * <td></td>
-     * <td>&lt;K1:null&gt;</td>
-     * <td></td>
-     * <td>&lt;K1:null&gt;</td>
-     * </tr>
-     * </table>
-     * Both input streams (or to be more precise, their underlying source topics) need to have the same number of
-     * partitions.
-     *
-     * @param other  the other {@code KTable} to be joined with this {@code KTable}
-     * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records
-     * @param <VO>   the value type of the other {@code KTable}
-     * @param <VR>   the value type of the result {@code KTable}
-     * @param storeSupplier user defined state store supplier. Cannot be {@code null}.
-     * @return a {@code KTable} that contains join-records for each key and values computed by the given
-     * {@link ValueJoiner}, one for each matched record-pair with the same key plus one for each non-matching record of
-     * both {@code KTable}s
-     * @see #join(KTable, ValueJoiner)
-     * @see #leftJoin(KTable, ValueJoiner)
-     * @deprecated use {@link #outerJoin(KTable, ValueJoiner, Materialized) outerJoin(other, joiner, Materialized.as(KeyValueByteStoreSupplier)}
-     */
-    @Deprecated
-    <VO, VR> KTable<K, VR> outerJoin(final KTable<K, VO> other,
-                                     final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
-                                     final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier);
-
-    /**
      * Get the name of the local state store used that can be used to query this {@code KTable}.
      *
      * @return the underlying state store name, or {@code null} if this {@code KTable} cannot be queried.
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/SessionWindowedKStream.java b/streams/src/main/java/org/apache/kafka/streams/kstream/SessionWindowedKStream.java
index efc60ea..36e7823 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/SessionWindowedKStream.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/SessionWindowedKStream.java
@@ -21,6 +21,7 @@ import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.streams.KafkaStreams;
 import org.apache.kafka.streams.KeyValue;
 import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.Topology;
 import org.apache.kafka.streams.state.KeyValueStore;
 import org.apache.kafka.streams.state.QueryableStoreType;
 import org.apache.kafka.streams.state.SessionStore;
@@ -91,6 +92,17 @@ public interface SessionWindowedKStream<K, V> {
      * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
      * query the value of the key on a parallel running instance of your Kafka Streams application.
      *
+     * <p>
+     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
+     * Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII
+     * alphanumerics, '.', '_' and '-'.
+     * The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is
+     * user-specified in {@link StreamsConfig} via parameter
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is the
+     * provide store name defined in {@code Materialized}, and "-changelog" is a fixed suffix.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * @param materialized  an instance of {@link Materialized} used to materialize a state store. Cannot be {@code null}.
      *                      Note: the valueSerde will be automatically set to {@link Serdes#Long()} if there is no valueSerde provided
      * @return a windowed {@link KTable} that contains "update" records with unmodified keys and {@link Long} values
@@ -168,6 +180,18 @@ public interface SessionWindowedKStream<K, V> {
      * String key = "some-key";
      * KeyValueIterator<Windowed<String>, Long> aggForKeyForSession = localWindowStore.fetch(key); // key must be local (application state is shared over all running Kafka Streams instances)
      * }</pre>
+     *
+     * <p>
+     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
+     * Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII
+     * alphanumerics, '.', '_' and '-'.
+     * The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is
+     * user-specified in {@link StreamsConfig} via parameter
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is the
+     * provide store name defined in {@code Materialized}, and "-changelog" is a fixed suffix.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * @param initializer    the instance of {@link Initializer}
      * @param aggregator     the instance of {@link Aggregator}
      * @param sessionMerger  the instance of {@link Merger}
@@ -251,15 +275,18 @@ public interface SessionWindowedKStream<K, V> {
      * }</pre>
      * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
      * query the value of the key on a parallel running instance of your Kafka Streams application.
+     *
      * <p>
      * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
-     * Therefore, the store name must be a valid Kafka topic name and cannot contain characters other than ASCII
+     * Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII
      * alphanumerics, '.', '_' and '-'.
-     * The changelog topic will be named "${applicationId}-${queryableStoreName}-changelog", where "applicationId" is
+     * The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is
      * user-specified in {@link StreamsConfig} via parameter
-     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "queryableStoreName" is the
-     * provide {@code queryableStoreName}, and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is the
+     * provide store name defined in {@code Materialized}, and "-changelog" is a fixed suffix.
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
+     *
      * @param reducer a {@link Reducer} that computes a new aggregate result. Cannot be {@code null}.
      * @param materializedAs an instance of {@link Materialized} used to materialize a state store. Cannot be {@code null}
      * @return a windowed {@link KTable} that contains "update" records with unmodified keys, and values that represent
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/TimeWindowedKStream.java b/streams/src/main/java/org/apache/kafka/streams/kstream/TimeWindowedKStream.java
index 7f9752b..03bc986 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/TimeWindowedKStream.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/TimeWindowedKStream.java
@@ -20,6 +20,7 @@ import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.streams.KafkaStreams;
 import org.apache.kafka.streams.KeyValue;
 import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.Topology;
 import org.apache.kafka.streams.state.KeyValueStore;
 import org.apache.kafka.streams.state.QueryableStoreType;
 import org.apache.kafka.streams.state.WindowStore;
@@ -67,7 +68,9 @@ public interface TimeWindowedKStream<K, V> {
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
      * and "-changelog" is a fixed suffix.
      * Note that the internal store name may not be queriable through Interactive Queries.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * @return a {@link KTable} that contains "update" records with unmodified keys and {@link Long} values that
      * represent the latest (rolling) count (i.e., number of records) for each key
      */
@@ -99,7 +102,19 @@ public interface TimeWindowedKStream<K, V> {
      * }</pre>
      * For non-local keys, a custom RPC mechanism must be implemented using {@link KafkaStreams#allMetadata()} to
      * query the value of the key on a parallel running instance of your Kafka Streams application.
-     ** @param materialized  an instance of {@link Materialized} used to materialize a state store. Cannot be {@code null}.
+     *
+     * <p>
+     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
+     * Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII
+     * alphanumerics, '.', '_' and '-'.
+     * The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is
+     * user-specified in {@link StreamsConfig} via parameter
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is the
+     * provide store name defined in {@code Materialized}, and "-changelog" is a fixed suffix.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
+     * @param materialized  an instance of {@link Materialized} used to materialize a state store. Cannot be {@code null}.
      *                       Note: the valueSerde will be automatically set to {@link org.apache.kafka.common.serialization.Serdes#Long() Serdes#Long()}
      *                       if there is no valueSerde provided
      * @return a {@link KTable} that contains "update" records with unmodified keys and {@link Long} values that
@@ -140,7 +155,9 @@ public interface TimeWindowedKStream<K, V> {
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
      * and "-changelog" is a fixed suffix.
      * Note that the internal store name may not be queriable through Interactive Queries.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      *
      * @param <VR>          the value type of the resulting {@link KTable}
      * @param initializer   an {@link Initializer} that computes an initial intermediate aggregation result
@@ -189,6 +206,17 @@ public interface TimeWindowedKStream<K, V> {
      * WindowStoreIterator<Long> aggregateStore = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
      * }</pre>
      *
+     * <p>
+     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
+     * Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII
+     * alphanumerics, '.', '_' and '-'.
+     * The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is
+     * user-specified in {@link StreamsConfig} via parameter
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is the
+     * provide store name defined in {@code Materialized}, and "-changelog" is a fixed suffix.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
+     *
      * @param initializer   an {@link Initializer} that computes an initial intermediate aggregation result
      * @param aggregator    an {@link Aggregator} that computes a new aggregate result
      * @param materialized  an instance of {@link Materialized} used to materialize a state store. Cannot be {@code null}.
@@ -226,7 +254,8 @@ public interface TimeWindowedKStream<K, V> {
      * user-specified in {@link StreamsConfig} via parameter
      * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "internalStoreName" is an internal name
      * and "-changelog" is a fixed suffix.
-     * You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param reducer   a {@link Reducer} that computes a new aggregate result
      * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
@@ -268,6 +297,16 @@ public interface TimeWindowedKStream<K, V> {
      * WindowStoreIterator<Long> reduceStore = localWindowStore.fetch(key, timeFrom, timeTo); // key must be local (application state is shared over all running Kafka Streams instances)
      * }</pre>
      *
+     * <p>
+     * For failure and recovery the store will be backed by an internal changelog topic that will be created in Kafka.
+     * Therefore, the store name defined by the Materialized instance must be a valid Kafka topic name and cannot contain characters other than ASCII
+     * alphanumerics, '.', '_' and '-'.
+     * The changelog topic will be named "${applicationId}-${storeName}-changelog", where "applicationId" is
+     * user-specified in {@link StreamsConfig} via parameter
+     * {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "storeName" is the
+     * provide store name defined in {@code Materialized}, and "-changelog" is a fixed suffix.
+     *
+     * You can retrieve all generated internal topic names via {@link Topology#describe()}.
      *
      * @param reducer   a {@link Reducer} that computes a new aggregate result
      * @return a {@link KTable} that contains "update" records with unmodified keys, and values that represent the
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractStream.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractStream.java
index 7410a0a..3c65399 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractStream.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractStream.java
@@ -72,12 +72,6 @@ public abstract class AbstractStream<K> {
         return allSourceNodes;
     }
 
-    String getOrCreateName(final String queryableStoreName, final String prefix) {
-        final String returnName = queryableStoreName != null ? queryableStoreName : builder.newStoreName(prefix);
-        Topic.validate(returnName);
-        return returnName;
-    }
-
     static <T2, T1, R> ValueJoiner<T2, T1, R> reverseJoiner(final ValueJoiner<T1, T2, R> joiner) {
         return new ValueJoiner<T2, T1, R>() {
             @Override
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/GroupedStreamAggregateBuilder.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/GroupedStreamAggregateBuilder.java
index 24ed8a0..715c291 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/GroupedStreamAggregateBuilder.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/GroupedStreamAggregateBuilder.java
@@ -39,6 +39,7 @@ class GroupedStreamAggregateBuilder<K, V> {
             return 0L;
         }
     };
+
     final Aggregator<K, V, Long> countAggregator = new Aggregator<K, V, Long>() {
         @Override
         public Long apply(K aggKey, V value, Long aggregate) {
@@ -46,6 +47,13 @@ class GroupedStreamAggregateBuilder<K, V> {
         }
     };
 
+    final Initializer<V> reduceInitializer = new Initializer<V>() {
+        @Override
+        public V apply() {
+            return null;
+        }
+    };
+
     GroupedStreamAggregateBuilder(final InternalStreamsBuilder builder,
                                   final Serde<K> keySerde,
                                   final Serde<V> valueSerde,
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImpl.java
index 82e2823..3a9f919 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImpl.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImpl.java
@@ -24,20 +24,15 @@ import org.apache.kafka.streams.kstream.Initializer;
 import org.apache.kafka.streams.kstream.KGroupedStream;
 import org.apache.kafka.streams.kstream.KTable;
 import org.apache.kafka.streams.kstream.Materialized;
-import org.apache.kafka.streams.kstream.Merger;
 import org.apache.kafka.streams.kstream.Reducer;
 import org.apache.kafka.streams.kstream.SessionWindowedKStream;
 import org.apache.kafka.streams.kstream.SessionWindows;
 import org.apache.kafka.streams.kstream.TimeWindowedKStream;
 import org.apache.kafka.streams.kstream.Window;
-import org.apache.kafka.streams.kstream.Windowed;
 import org.apache.kafka.streams.kstream.Windows;
 import org.apache.kafka.streams.state.KeyValueStore;
-import org.apache.kafka.streams.state.SessionStore;
 import org.apache.kafka.streams.state.StoreBuilder;
-import org.apache.kafka.streams.state.WindowStore;
 
-import java.util.Collections;
 import java.util.Objects;
 import java.util.Set;
 
@@ -50,7 +45,6 @@ class KGroupedStreamImpl<K, V> extends AbstractStream<K> implements KGroupedStre
     private final Serde<V> valSerde;
     private final boolean repartitionRequired;
     private final GroupedStreamAggregateBuilder<K, V> aggregateBuilder;
-    private boolean isQueryable = true;
 
     KGroupedStreamImpl(final InternalStreamsBuilder builder,
                        final String name,
@@ -68,39 +62,11 @@ class KGroupedStreamImpl<K, V> extends AbstractStream<K> implements KGroupedStre
         this.keySerde = keySerde;
         this.valSerde = valSerde;
         this.repartitionRequired = repartitionRequired;
-        this.isQueryable = true;
-    }
-
-    private void determineIsQueryable(final String queryableStoreName) {
-        if (queryableStoreName == null) {
-            isQueryable = false;
-        } // no need for else {} since isQueryable is true by default
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<K, V> reduce(final Reducer<V> reducer,
-                               final String queryableStoreName) {
-        determineIsQueryable(queryableStoreName);
-        return reduce(reducer, keyValueStore(keySerde, valSerde, getOrCreateName(queryableStoreName, REDUCE_NAME)));
     }
 
     @Override
     public KTable<K, V> reduce(final Reducer<V> reducer) {
-        determineIsQueryable(null);
-        return reduce(reducer, (String) null);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<K, V> reduce(final Reducer<V> reducer,
-                               final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        Objects.requireNonNull(reducer, "reducer can't be null");
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-        return doAggregate(
-                new KStreamReduce<K, V>(storeSupplier.name(), reducer),
-                REDUCE_NAME,
-                storeSupplier);
+        return reduce(reducer, Materialized.<K, V, KeyValueStore<Bytes, byte[]>>with(keySerde, valSerde));
     }
 
     @Override
@@ -110,53 +76,19 @@ class KGroupedStreamImpl<K, V> extends AbstractStream<K> implements KGroupedStre
         Objects.requireNonNull(materialized, "materialized can't be null");
         final MaterializedInternal<K, V, KeyValueStore<Bytes, byte[]>> materializedInternal
                 = new MaterializedInternal<>(materialized, builder, REDUCE_NAME);
+        if (materializedInternal.keySerde() == null) {
+            materializedInternal.withKeySerde(keySerde);
+        }
+        if (materializedInternal.valueSerde() == null) {
+            materializedInternal.withValueSerde(valSerde);
+        }
+
         return doAggregate(
                 new KStreamReduce<K, V>(materializedInternal.storeName(), reducer),
                 REDUCE_NAME,
                 materializedInternal);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public <W extends Window> KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
-                                                            final Windows<W> windows,
-                                                            final String queryableStoreName) {
-        determineIsQueryable(queryableStoreName);
-        return reduce(reducer, windows, windowedStore(keySerde, valSerde, windows, getOrCreateName(queryableStoreName, REDUCE_NAME)));
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <W extends Window> KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
-                                                            final Windows<W> windows) {
-        return windowedBy(windows).reduce(reducer);
-    }
-
-    @SuppressWarnings({"unchecked", "deprecation"})
-    @Override
-    public <W extends Window> KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
-                                                            final Windows<W> windows,
-                                                            final org.apache.kafka.streams.processor.StateStoreSupplier<WindowStore> storeSupplier) {
-        Objects.requireNonNull(reducer, "reducer can't be null");
-        Objects.requireNonNull(windows, "windows can't be null");
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-        return (KTable<Windowed<K>, V>) doAggregate(
-                new KStreamWindowReduce<K, V, W>(windows, storeSupplier.name(), reducer),
-                REDUCE_NAME,
-                storeSupplier
-        );
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <T> KTable<K, T> aggregate(final Initializer<T> initializer,
-                                      final Aggregator<? super K, ? super V, T> aggregator,
-                                      final Serde<T> aggValueSerde,
-                                      final String queryableStoreName) {
-        determineIsQueryable(queryableStoreName);
-        return aggregate(initializer, aggregator, keyValueStore(keySerde, aggValueSerde, getOrCreateName(queryableStoreName, AGGREGATE_NAME)));
-    }
-
     @Override
     public <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
                                         final Aggregator<? super K, ? super V, VR> aggregator,
@@ -164,14 +96,13 @@ class KGroupedStreamImpl<K, V> extends AbstractStream<K> implements KGroupedStre
         Objects.requireNonNull(initializer, "initializer can't be null");
         Objects.requireNonNull(aggregator, "aggregator can't be null");
         Objects.requireNonNull(materialized, "materialized can't be null");
-        return aggregateMaterialized(initializer, aggregator, materialized);
-    }
 
-    private <VR> KTable<K, VR> aggregateMaterialized(final Initializer<VR> initializer,
-                                                     final Aggregator<? super K, ? super V, VR> aggregator,
-                                                     final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
         final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal
                 = new MaterializedInternal<>(materialized, builder, AGGREGATE_NAME);
+        if (materializedInternal.keySerde() == null) {
+            materializedInternal.withKeySerde(keySerde);
+        }
+
         return doAggregate(
                 new KStreamAggregate<>(materializedInternal.storeName(), initializer, aggregator),
                 AGGREGATE_NAME,
@@ -181,96 +112,12 @@ class KGroupedStreamImpl<K, V> extends AbstractStream<K> implements KGroupedStre
     @Override
     public <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
                                         final Aggregator<? super K, ? super V, VR> aggregator) {
-        Objects.requireNonNull(initializer, "initializer can't be null");
-        Objects.requireNonNull(aggregator, "aggregator can't be null");
-        MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal =
-                new MaterializedInternal<>(Materialized.<K, VR, KeyValueStore<Bytes, byte[]>>with(keySerde, null),
-                                           builder,
-                                           AGGREGATE_NAME);
-        return doAggregate(new KStreamAggregate<>(materializedInternal.storeName(), initializer, aggregator),
-                           AGGREGATE_NAME,
-                           materializedInternal);
-
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <T> KTable<K, T> aggregate(final Initializer<T> initializer,
-                                      final Aggregator<? super K, ? super V, T> aggregator,
-                                      final Serde<T> aggValueSerde) {
-        return aggregate(initializer, aggregator, aggValueSerde, null);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <T> KTable<K, T> aggregate(final Initializer<T> initializer,
-                                      final Aggregator<? super K, ? super V, T> aggregator,
-                                      final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        Objects.requireNonNull(initializer, "initializer can't be null");
-        Objects.requireNonNull(aggregator, "aggregator can't be null");
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-        return doAggregate(
-                new KStreamAggregate<>(storeSupplier.name(), initializer, aggregator),
-                AGGREGATE_NAME,
-                storeSupplier);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <W extends Window, T> KTable<Windowed<K>, T> aggregate(final Initializer<T> initializer,
-                                                                  final Aggregator<? super K, ? super V, T> aggregator,
-                                                                  final Windows<W> windows,
-                                                                  final Serde<T> aggValueSerde,
-                                                                  final String queryableStoreName) {
-        determineIsQueryable(queryableStoreName);
-        return aggregate(initializer, aggregator, windows, windowedStore(keySerde, aggValueSerde, windows, getOrCreateName(queryableStoreName, AGGREGATE_NAME)));
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <W extends Window, T> KTable<Windowed<K>, T> aggregate(final Initializer<T> initializer,
-                                                                  final Aggregator<? super K, ? super V, T> aggregator,
-                                                                  final Windows<W> windows,
-                                                                  final Serde<T> aggValueSerde) {
-        return windowedBy(windows).aggregate(initializer, aggregator,
-                                             Materialized.<K, T, WindowStore<Bytes, byte[]>>as(builder.newStoreName(AGGREGATE_NAME))
-                                                     .withKeySerde(keySerde)
-                                                     .withValueSerde(aggValueSerde));
-    }
-
-    @SuppressWarnings({"unchecked", "deprecation"})
-    @Override
-    public <W extends Window, T> KTable<Windowed<K>, T> aggregate(final Initializer<T> initializer,
-                                                                  final Aggregator<? super K, ? super V, T> aggregator,
-                                                                  final Windows<W> windows,
-                                                                  final org.apache.kafka.streams.processor.StateStoreSupplier<WindowStore> storeSupplier) {
-        Objects.requireNonNull(initializer, "initializer can't be null");
-        Objects.requireNonNull(aggregator, "aggregator can't be null");
-        Objects.requireNonNull(windows, "windows can't be null");
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-        return (KTable<Windowed<K>, T>) doAggregate(
-                new KStreamWindowAggregate<>(windows, storeSupplier.name(), initializer, aggregator),
-                AGGREGATE_NAME,
-                storeSupplier
-        );
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<K, Long> count(final String queryableStoreName) {
-        determineIsQueryable(queryableStoreName);
-        return count(keyValueStore(keySerde, Serdes.Long(), getOrCreateName(queryableStoreName, AGGREGATE_NAME)));
+        return aggregate(initializer, aggregator, Materialized.<K, VR, KeyValueStore<Bytes, byte[]>>with(keySerde, null));
     }
 
     @Override
     public KTable<K, Long> count() {
-        return count((String) null);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<K, Long> count(final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        return aggregate(aggregateBuilder.countInitializer, aggregateBuilder.countAggregator, storeSupplier);
+        return count(Materialized.<K, Long, KeyValueStore<Bytes, byte[]>>with(keySerde, Serdes.Long()));
     }
 
     @Override
@@ -278,91 +125,17 @@ class KGroupedStreamImpl<K, V> extends AbstractStream<K> implements KGroupedStre
         Objects.requireNonNull(materialized, "materialized can't be null");
         final MaterializedInternal<K, Long, KeyValueStore<Bytes, byte[]>> materializedInternal
                 = new MaterializedInternal<>(materialized, builder, AGGREGATE_NAME);
+        if (materializedInternal.keySerde() == null) {
+            materializedInternal.withKeySerde(keySerde);
+        }
         if (materializedInternal.valueSerde() == null) {
-            materialized.withValueSerde(Serdes.Long());
+            materializedInternal.withValueSerde(Serdes.Long());
         }
-        return aggregate(aggregateBuilder.countInitializer, aggregateBuilder.countAggregator, materialized);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <W extends Window> KTable<Windowed<K>, Long> count(final Windows<W> windows,
-                                                              final String queryableStoreName) {
-        determineIsQueryable(queryableStoreName);
-        return count(windows, windowedStore(keySerde, Serdes.Long(), windows, getOrCreateName(queryableStoreName, AGGREGATE_NAME)));
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <W extends Window> KTable<Windowed<K>, Long> count(final Windows<W> windows) {
-        return windowedBy(windows).count();
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <W extends Window> KTable<Windowed<K>, Long> count(final Windows<W> windows,
-                                                              final org.apache.kafka.streams.processor.StateStoreSupplier<WindowStore> storeSupplier) {
-        return aggregate(
-                aggregateBuilder.countInitializer,
-                aggregateBuilder.countAggregator,
-                windows,
-                storeSupplier);
-    }
-
-    @SuppressWarnings({"unchecked", "deprecation"})
-    @Override
-    public <T> KTable<Windowed<K>, T> aggregate(final Initializer<T> initializer,
-                                                final Aggregator<? super K, ? super V, T> aggregator,
-                                                final Merger<? super K, T> sessionMerger,
-                                                final SessionWindows sessionWindows,
-                                                final Serde<T> aggValueSerde,
-                                                final String queryableStoreName) {
-        determineIsQueryable(queryableStoreName);
-        return aggregate(initializer,
-                         aggregator,
-                         sessionMerger,
-                         sessionWindows,
-                         aggValueSerde,
-                         storeFactory(keySerde, aggValueSerde, getOrCreateName(queryableStoreName, AGGREGATE_NAME))
-                          .sessionWindowed(sessionWindows.maintainMs()).build());
-
 
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <T> KTable<Windowed<K>, T> aggregate(final Initializer<T> initializer,
-                                                final Aggregator<? super K, ? super V, T> aggregator,
-                                                final Merger<? super K, T> sessionMerger,
-                                                final SessionWindows sessionWindows,
-                                                final Serde<T> aggValueSerde) {
-        return windowedBy(sessionWindows).aggregate(initializer,
-                                                    aggregator,
-                                                    sessionMerger,
-                                                    Materialized.<K, T, SessionStore<Bytes, byte[]>>as(builder.newStoreName(AGGREGATE_NAME))
-                                                            .withKeySerde(keySerde)
-                                                            .withValueSerde(aggValueSerde));
-    }
-
-    @SuppressWarnings({"unchecked", "deprecation"})
-    @Override
-    public <T> KTable<Windowed<K>, T> aggregate(final Initializer<T> initializer,
-                                                final Aggregator<? super K, ? super V, T> aggregator,
-                                                final Merger<? super K, T> sessionMerger,
-                                                final SessionWindows sessionWindows,
-                                                final Serde<T> aggValueSerde,
-                                                final org.apache.kafka.streams.processor.StateStoreSupplier<SessionStore> storeSupplier) {
-        Objects.requireNonNull(initializer, "initializer can't be null");
-        Objects.requireNonNull(aggregator, "aggregator can't be null");
-        Objects.requireNonNull(sessionWindows, "sessionWindows can't be null");
-        Objects.requireNonNull(sessionMerger, "sessionMerger can't be null");
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-
-        return (KTable<Windowed<K>, T>) doAggregate(
-                new KStreamSessionWindowAggregate<>(sessionWindows, storeSupplier.name(), initializer, aggregator, sessionMerger),
+        return doAggregate(
+                new KStreamAggregate<>(materializedInternal.storeName(), aggregateBuilder.countInitializer, aggregateBuilder.countAggregator),
                 AGGREGATE_NAME,
-                storeSupplier);
-
+                materializedInternal);
     }
 
     @Override
@@ -387,98 +160,6 @@ class KGroupedStreamImpl<K, V> extends AbstractStream<K> implements KGroupedStre
                                                 aggregateBuilder);
     }
 
-    @SuppressWarnings({"unchecked", "deprecation"})
-    public KTable<Windowed<K>, Long> count(final SessionWindows sessionWindows, final String queryableStoreName) {
-        Materialized<K, Long, SessionStore<Bytes, byte[]>> materialized = Materialized.<K, Long, SessionStore<Bytes, byte[]>>as(getOrCreateName(queryableStoreName, AGGREGATE_NAME))
-                .withKeySerde(keySerde)
-                .withValueSerde(Serdes.Long());
-        return windowedBy(sessionWindows).count(materialized);
-    }
-
-    @SuppressWarnings("deprecation")
-    public KTable<Windowed<K>, Long> count(final SessionWindows sessionWindows) {
-        return windowedBy(sessionWindows).count();
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<Windowed<K>, Long> count(final SessionWindows sessionWindows,
-                                           final org.apache.kafka.streams.processor.StateStoreSupplier<SessionStore> storeSupplier) {
-        Objects.requireNonNull(sessionWindows, "sessionWindows can't be null");
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-        final Merger<K, Long> sessionMerger = new Merger<K, Long>() {
-            @Override
-            public Long apply(final K aggKey, final Long aggOne, final Long aggTwo) {
-                return aggOne + aggTwo;
-            }
-        };
-
-        return aggregate(aggregateBuilder.countInitializer,
-                         aggregateBuilder.countAggregator,
-                         sessionMerger,
-                         sessionWindows,
-                         Serdes.Long(),
-                         storeSupplier);
-    }
-
-
-    @SuppressWarnings({"unchecked", "deprecation"})
-    @Override
-    public KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
-                                         final SessionWindows sessionWindows,
-                                         final String queryableStoreName) {
-        determineIsQueryable(queryableStoreName);
-
-        return reduce(reducer, sessionWindows,
-                      storeFactory(keySerde, valSerde, getOrCreateName(queryableStoreName, AGGREGATE_NAME))
-                              .sessionWindowed(sessionWindows.maintainMs()).build());
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
-                                         final SessionWindows sessionWindows) {
-
-        return windowedBy(sessionWindows).reduce(reducer);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
-                                         final SessionWindows sessionWindows,
-                                         final org.apache.kafka.streams.processor.StateStoreSupplier<SessionStore> storeSupplier) {
-        Objects.requireNonNull(reducer, "reducer can't be null");
-        Objects.requireNonNull(sessionWindows, "sessionWindows can't be null");
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-
-        final Initializer<V> initializer = new Initializer<V>() {
-            @Override
-            public V apply() {
-                return null;
-            }
-        };
-
-        final Aggregator<K, V, V> aggregator = new Aggregator<K, V, V>() {
-            @Override
-            public V apply(final K aggKey, final V value, final V aggregate) {
-                if (aggregate == null) {
-                    return value;
-                }
-                return reducer.apply(aggregate, value);
-            }
-        };
-
-        final Merger<K, V> sessionMerger = new Merger<K, V>() {
-            @Override
-            public V apply(final K aggKey, final V aggOne, final V aggTwo) {
-                return aggregator.apply(aggKey, aggTwo, aggOne);
-            }
-        };
-
-        return aggregate(initializer, aggregator, sessionMerger, sessionWindows, valSerde, storeSupplier);
-    }
-
-
     private <T> KTable<K, T> doAggregate(final KStreamAggProcessorSupplier<K, ?, V, T> aggregateSupplier,
                                          final String functionName,
                                          final MaterializedInternal<K, T, KeyValueStore<Bytes, byte[]>> materializedInternal) {
@@ -488,37 +169,4 @@ class KGroupedStreamImpl<K, V> extends AbstractStream<K> implements KGroupedStre
         return aggregateBuilder.build(aggregateSupplier, functionName, storeBuilder, materializedInternal.isQueryable());
 
     }
-
-    @SuppressWarnings("deprecation")
-    private <T> KTable<K, T> doAggregate(
-            final KStreamAggProcessorSupplier<K, ?, V, T> aggregateSupplier,
-            final String functionName,
-            final org.apache.kafka.streams.processor.StateStoreSupplier storeSupplier) {
-
-        final String aggFunctionName = builder.newProcessorName(functionName);
-
-        final String sourceName = repartitionIfRequired(storeSupplier.name());
-
-        builder.internalTopologyBuilder.addProcessor(aggFunctionName, aggregateSupplier, sourceName);
-        builder.internalTopologyBuilder.addStateStore(storeSupplier, aggFunctionName);
-
-        return new KTableImpl<>(
-            builder,
-            aggFunctionName,
-            aggregateSupplier,
-            sourceName.equals(this.name) ? sourceNodes
-                    : Collections.singleton(sourceName),
-            storeSupplier.name(),
-            isQueryable);
-    }
-
-    /**
-     * @return the new sourceName if repartitioned. Otherwise the name of this stream
-     */
-    private String repartitionIfRequired(final String queryableStoreName) {
-        if (!repartitionRequired) {
-            return this.name;
-        }
-        return KStreamImpl.createRepartitionedSource(builder, keySerde, valSerde, queryableStoreName, name);
-    }
 }
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedTableImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedTableImpl.java
index 6e33251..db119f3 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedTableImpl.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedTableImpl.java
@@ -48,7 +48,6 @@ public class KGroupedTableImpl<K, V> extends AbstractStream<K> implements KGroup
 
     protected final Serde<K> keySerde;
     protected final Serde<V> valSerde;
-    private boolean isQueryable;
     private final Initializer<Long> countInitializer = new Initializer<Long>() {
         @Override
         public Long apply() {
@@ -78,83 +77,6 @@ public class KGroupedTableImpl<K, V> extends AbstractStream<K> implements KGroup
         super(builder, name, Collections.singleton(sourceName));
         this.keySerde = keySerde;
         this.valSerde = valSerde;
-        this.isQueryable = true;
-    }
-
-    private void determineIsQueryable(final String queryableStoreName) {
-        if (queryableStoreName == null) {
-            isQueryable = false;
-        } // no need for else {} since isQueryable is true by default
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <T> KTable<K, T> aggregate(final Initializer<T> initializer,
-                                      final Aggregator<? super K, ? super V, T> adder,
-                                      final Aggregator<? super K, ? super V, T> subtractor,
-                                      final Serde<T> aggValueSerde,
-                                      final String queryableStoreName) {
-        determineIsQueryable(queryableStoreName);
-        return aggregate(initializer, adder, subtractor, keyValueStore(keySerde, aggValueSerde, getOrCreateName(queryableStoreName, AGGREGATE_NAME)));
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <T> KTable<K, T> aggregate(final Initializer<T> initializer,
-                                      final Aggregator<? super K, ? super V, T> adder,
-                                      final Aggregator<? super K, ? super V, T> subtractor,
-                                      final Serde<T> aggValueSerde) {
-        return aggregate(initializer, adder, subtractor, aggValueSerde, null);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <T> KTable<K, T> aggregate(final Initializer<T> initializer,
-                                      final Aggregator<? super K, ? super V, T> adder,
-                                      final Aggregator<? super K, ? super V, T> subtractor,
-                                      final String queryableStoreName) {
-        determineIsQueryable(queryableStoreName);
-        return aggregate(initializer, adder, subtractor, null, getOrCreateName(queryableStoreName, AGGREGATE_NAME));
-    }
-
-    @Override
-    public <T> KTable<K, T> aggregate(final Initializer<T> initializer,
-                                      final Aggregator<? super K, ? super V, T> adder,
-                                      final Aggregator<? super K, ? super V, T> subtractor) {
-        return aggregate(initializer, adder, subtractor, (String) null);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <T> KTable<K, T> aggregate(final Initializer<T> initializer,
-                                      final Aggregator<? super K, ? super V, T> adder,
-                                      final Aggregator<? super K, ? super V, T> subtractor,
-                                      final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        Objects.requireNonNull(initializer, "initializer can't be null");
-        Objects.requireNonNull(adder, "adder can't be null");
-        Objects.requireNonNull(subtractor, "subtractor can't be null");
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-        ProcessorSupplier<K, Change<V>> aggregateSupplier = new KTableAggregate<>(storeSupplier.name(), initializer, adder, subtractor);
-        return doAggregate(aggregateSupplier, AGGREGATE_NAME, storeSupplier);
-    }
-
-    @SuppressWarnings("deprecation")
-    private <T> KTable<K, T> doAggregate(final ProcessorSupplier<K, Change<V>> aggregateSupplier,
-                                         final String functionName,
-                                         final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        final String sinkName = builder.newProcessorName(KStreamImpl.SINK_NAME);
-        final String sourceName = builder.newProcessorName(KStreamImpl.SOURCE_NAME);
-        final String funcName = builder.newProcessorName(functionName);
-
-        buildAggregate(aggregateSupplier,
-                       storeSupplier.name() + KStreamImpl.REPARTITION_TOPIC_SUFFIX,
-                       funcName,
-                       sourceName,
-                       sinkName);
-        builder.internalTopologyBuilder.addStateStore(storeSupplier, funcName);
-
-        // return the KTable representation with the intermediate topic as the sources
-        return new KTableImpl<>(builder, funcName, aggregateSupplier, Collections.singleton(sourceName), storeSupplier.name(), isQueryable);
     }
 
     private void buildAggregate(final ProcessorSupplier<K, Change<V>> aggregateSupplier,
@@ -196,16 +118,7 @@ public class KGroupedTableImpl<K, V> extends AbstractStream<K> implements KGroup
                                                               .materialize(), funcName);
 
         // return the KTable representation with the intermediate topic as the sources
-        return new KTableImpl<>(builder, funcName, aggregateSupplier, Collections.singleton(sourceName), materialized.storeName(), isQueryable);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<K, V> reduce(final Reducer<V> adder,
-                               final Reducer<V> subtractor,
-                               final String queryableStoreName) {
-        determineIsQueryable(queryableStoreName);
-        return reduce(adder, subtractor, keyValueStore(keySerde, valSerde, getOrCreateName(queryableStoreName, REDUCE_NAME)));
+        return new KTableImpl<>(builder, funcName, aggregateSupplier, Collections.singleton(sourceName), materialized.storeName(), materialized.isQueryable());
     }
 
     @Override
@@ -216,7 +129,13 @@ public class KGroupedTableImpl<K, V> extends AbstractStream<K> implements KGroup
         Objects.requireNonNull(subtractor, "subtractor can't be null");
         Objects.requireNonNull(materialized, "materialized can't be null");
         final MaterializedInternal<K, V, KeyValueStore<Bytes, byte[]>> materializedInternal
-                = new MaterializedInternal<>(materialized, builder, REDUCE_NAME);
+                = new MaterializedInternal<>(materialized, builder, AGGREGATE_NAME);
+        if (materializedInternal.keySerde() == null) {
+            materializedInternal.withKeySerde(keySerde);
+        }
+        if (materializedInternal.valueSerde() == null) {
+            materializedInternal.withValueSerde(valSerde);
+        }
         final ProcessorSupplier<K, Change<V>> aggregateSupplier = new KTableReduce<>(materializedInternal.storeName(),
                                                                                      adder,
                                                                                      subtractor);
@@ -226,37 +145,33 @@ public class KGroupedTableImpl<K, V> extends AbstractStream<K> implements KGroup
     @Override
     public KTable<K, V> reduce(final Reducer<V> adder,
                                final Reducer<V> subtractor) {
-        return reduce(adder, subtractor, (String) null);
+        return reduce(adder, subtractor, Materialized.<K, V, KeyValueStore<Bytes, byte[]>>with(keySerde, valSerde));
     }
 
-    @SuppressWarnings("deprecation")
     @Override
-    public KTable<K, V> reduce(final Reducer<V> adder,
-                               final Reducer<V> subtractor,
-                               final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        Objects.requireNonNull(adder, "adder can't be null");
-        Objects.requireNonNull(subtractor, "subtractor can't be null");
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-        ProcessorSupplier<K, Change<V>> aggregateSupplier = new KTableReduce<>(storeSupplier.name(), adder, subtractor);
-        return doAggregate(aggregateSupplier, REDUCE_NAME, storeSupplier);
-    }
+    public KTable<K, Long> count(final Materialized<K, Long, KeyValueStore<Bytes, byte[]>> materialized) {
+        final MaterializedInternal<K, Long, KeyValueStore<Bytes, byte[]>> materializedInternal
+                = new MaterializedInternal<>(materialized, builder, AGGREGATE_NAME);
+        if (materializedInternal.keySerde() == null) {
+            materializedInternal.withKeySerde(keySerde);
+        }
+        if (materializedInternal.valueSerde() == null) {
+            materializedInternal.withValueSerde(Serdes.Long());
+        }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<K, Long> count(final String queryableStoreName) {
-        determineIsQueryable(queryableStoreName);
-        return count(keyValueStore(keySerde, Serdes.Long(), getOrCreateName(queryableStoreName, AGGREGATE_NAME)));
+        final ProcessorSupplier<K, Change<V>> aggregateSupplier = new KTableAggregate<>(materializedInternal.storeName(),
+                countInitializer,
+                countAdder,
+                countSubtractor);
+
+        return doAggregate(aggregateSupplier, AGGREGATE_NAME, materializedInternal);
     }
 
     @Override
-    public KTable<K, Long> count(final Materialized<K, Long, KeyValueStore<Bytes, byte[]>> materialized) {
-        return aggregate(countInitializer,
-                         countAdder,
-                         countSubtractor,
-                         materialized);
+    public KTable<K, Long> count() {
+        return count(Materialized.<K, Long, KeyValueStore<Bytes, byte[]>>with(keySerde, Serdes.Long()));
     }
 
-    @SuppressWarnings("unchecked")
     @Override
     public <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
                                         final Aggregator<? super K, ? super V, VR> adder,
@@ -266,6 +181,7 @@ public class KGroupedTableImpl<K, V> extends AbstractStream<K> implements KGroup
         Objects.requireNonNull(adder, "adder can't be null");
         Objects.requireNonNull(subtractor, "subtractor can't be null");
         Objects.requireNonNull(materialized, "materialized can't be null");
+
         final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal =
                 new MaterializedInternal<>(materialized, builder, AGGREGATE_NAME);
         if (materializedInternal.keySerde() == null) {
@@ -279,18 +195,10 @@ public class KGroupedTableImpl<K, V> extends AbstractStream<K> implements KGroup
     }
 
     @Override
-    public KTable<K, Long> count() {
-        return count((String) null);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<K, Long> count(final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        return this.aggregate(
-                countInitializer,
-                countAdder,
-                countSubtractor,
-                storeSupplier);
+    public <T> KTable<K, T> aggregate(final Initializer<T> initializer,
+                                      final Aggregator<? super K, ? super V, T> adder,
+                                      final Aggregator<? super K, ? super V, T> subtractor) {
+        return aggregate(initializer, adder, subtractor, Materialized.<K, T, KeyValueStore<Bytes, byte[]>>with(keySerde, null));
     }
 
 }
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImpl.java
index 857abf7..2ddd5ff 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImpl.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImpl.java
@@ -104,8 +104,6 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
 
     private static final String FOREACH_NAME = "KSTREAM-FOREACH-";
 
-    private final KeyValueMapper<K, V, String> defaultKeyValueMapper;
-
     private final boolean repartitionRequired;
 
     public KStreamImpl(final InternalStreamsBuilder builder,
@@ -114,12 +112,6 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
                        final boolean repartitionRequired) {
         super(builder, name, sourceNodes);
         this.repartitionRequired = repartitionRequired;
-        this.defaultKeyValueMapper = new KeyValueMapper<K, V, String>() {
-            @Override
-            public String apply(K key, V value) {
-                return String.format("%s, %s", key, value);
-            }
-        };
     }
 
     @Override
@@ -191,65 +183,6 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
         return new KStreamImpl<>(builder, name, sourceNodes, this.repartitionRequired);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public void print() {
-        print(defaultKeyValueMapper, null, null, this.name);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void print(final String label) {
-        print(defaultKeyValueMapper, null, null, label);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void print(final Serde<K> keySerde,
-                      final Serde<V> valSerde) {
-        print(defaultKeyValueMapper, keySerde, valSerde, this.name);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void print(final Serde<K> keySerde,
-                      final Serde<V> valSerde,
-                      final String label) {
-        print(defaultKeyValueMapper, keySerde, valSerde, label);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void print(final KeyValueMapper<? super K, ? super V, String> mapper) {
-        print(mapper, null, null, this.name);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void print(final KeyValueMapper<? super K, ? super V, String> mapper,
-                      final String label) {
-        print(mapper, null, null, label);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void print(final KeyValueMapper<? super K, ? super V, String> mapper,
-                      final Serde<K> keySerde,
-                      final Serde<V> valSerde) {
-        print(mapper, keySerde, valSerde, this.name);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void print(final KeyValueMapper<? super K, ? super V, String> mapper,
-                      final Serde<K> keySerde,
-                      final Serde<V> valSerde,
-                      final String label) {
-        Objects.requireNonNull(mapper, "mapper can't be null");
-        Objects.requireNonNull(label, "label can't be null");
-        print(Printed.<K, V>toSysOut().withLabel(label).withKeyValueMapper(mapper));
-    }
-
     @Override
     public void print(final Printed<K, V> printed) {
         Objects.requireNonNull(printed, "printed can't be null");
@@ -258,72 +191,6 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
         builder.internalTopologyBuilder.addProcessor(name, printedInternal.build(this.name), this.name);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public void writeAsText(final String filePath) {
-        writeAsText(filePath, this.name, null, null, defaultKeyValueMapper);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void writeAsText(final String filePath,
-                            final String label) {
-        writeAsText(filePath, label, null, null, defaultKeyValueMapper);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void writeAsText(final String filePath,
-                            final Serde<K> keySerde,
-                            final Serde<V> valSerde) {
-        writeAsText(filePath, this.name, keySerde, valSerde, defaultKeyValueMapper);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void writeAsText(final String filePath,
-                            final String label,
-                            final Serde<K> keySerde,
-                            final Serde<V> valSerde) {
-        writeAsText(filePath, label, keySerde, valSerde, defaultKeyValueMapper);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void writeAsText(final String filePath,
-                            final KeyValueMapper<? super K, ? super V, String> mapper) {
-        writeAsText(filePath, this.name, null, null, mapper);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void writeAsText(final String filePath,
-                            final String label,
-                            final KeyValueMapper<? super K, ? super V, String> mapper) {
-        writeAsText(filePath, label, null, null, mapper);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void writeAsText(final String filePath,
-                            final Serde<K> keySerde,
-                            final Serde<V> valSerde,
-                            final KeyValueMapper<? super K, ? super V, String> mapper) {
-        writeAsText(filePath, this.name, keySerde, valSerde, mapper);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void writeAsText(final String filePath,
-                            final String label,
-                            final Serde<K> keySerde,
-                            final Serde<V> valSerde, KeyValueMapper<? super K, ? super V, String> mapper) {
-        Objects.requireNonNull(filePath, "filePath can't be null");
-        Objects.requireNonNull(label, "label can't be null");
-        Objects.requireNonNull(mapper, "mapper can't be null");
-        print(Printed.<K, V>toFile(filePath).withKeyValueMapper(mapper).withLabel(label));
-    }
-
     @Override
     public <K1, V1> KStream<K1, V1> flatMap(final KeyValueMapper<? super K, ? super V, ? extends Iterable<? extends KeyValue<? extends K1, ? extends V1>>> mapper) {
         Objects.requireNonNull(mapper, "mapper can't be null");
@@ -399,14 +266,6 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
         return new KStreamImpl<>(builder, name, allSourceNodes, requireRepartitioning);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public KStream<K, V> through(final Serde<K> keySerde,
-                                 final Serde<V> valSerde,
-                                 final StreamPartitioner<? super K, ? super V> partitioner, String topic) {
-        return through(topic, Produced.with(keySerde, valSerde, partitioner));
-    }
-
     @Override
     public KStream<K, V> through(final String topic, final Produced<K, V> produced) {
         final ProducedInternal<K, V> producedInternal = new ProducedInternal<>(produced);
@@ -436,24 +295,9 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
         return new KStreamImpl<>(builder, name, sourceNodes, repartitionRequired);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public KStream<K, V> through(final Serde<K> keySerde,
-                                 final Serde<V> valSerde,
-                                 final String topic) {
-        return through(topic, Produced.with(keySerde, valSerde));
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public KStream<K, V> through(final StreamPartitioner<? super K, ? super V> partitioner,
-                                 final String topic) {
-        return through(topic, Produced.streamPartitioner(partitioner));
-    }
-
     @Override
     public KStream<K, V> through(final String topic) {
-        return through(null, null, null, topic);
+        return through(topic, Produced.<K, V>with(null, null, null));
     }
 
     @Override
@@ -461,31 +305,6 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
         to(topic, Produced.<K, V>with(null, null, null));
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public void to(final StreamPartitioner<? super K, ? super V> partitioner,
-                   final String topic) {
-        to(topic, Produced.streamPartitioner(partitioner));
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public void to(final Serde<K> keySerde,
-                   final Serde<V> valSerde,
-                   final String topic) {
-        to(topic, Produced.with(keySerde, valSerde));
-    }
-
-    @SuppressWarnings({"unchecked", "deprecation"})
-    @Override
-    public void to(final Serde<K> keySerde,
-                   final Serde<V> valSerde,
-                   final StreamPartitioner<? super K, ? super V> partitioner,
-                   final String topic) {
-        Objects.requireNonNull(topic, "topic can't be null");
-        to(topic, Produced.with(keySerde, valSerde, partitioner));
-    }
-
     @SuppressWarnings("unchecked")
     @Override
     public void to(final String topic, final Produced<K, V> produced) {
@@ -562,18 +381,6 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
         }
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public <V1, R> KStream<K, R> join(final KStream<K, V1> other,
-                                      final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
-                                      final JoinWindows windows,
-                                      final Serde<K> keySerde,
-                                      final Serde<V> thisValueSerde,
-                                      final Serde<V1> otherValueSerde) {
-        return doJoin(other, joiner, windows, Joined.with(keySerde, thisValueSerde, otherValueSerde),
-            new KStreamImplJoin(false, false));
-    }
-
     @Override
     public <V1, R> KStream<K, R> join(final KStream<K, V1> other,
                                       final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
@@ -590,17 +397,6 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
                              new KStreamImplJoin(false, false));
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public <V1, R> KStream<K, R> outerJoin(final KStream<K, V1> other,
-                                           final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
-                                           final JoinWindows windows,
-                                           final Serde<K> keySerde,
-                                           final Serde<V> thisValueSerde,
-                                           final Serde<V1> otherValueSerde) {
-        return outerJoin(other, joiner, windows, Joined.with(keySerde, thisValueSerde, otherValueSerde));
-    }
-
     @Override
     public <V1, R> KStream<K, R> outerJoin(final KStream<K, V1> other,
                                            final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
@@ -692,21 +488,6 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
         return sourceName;
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public <V1, R> KStream<K, R> leftJoin(final KStream<K, V1> other,
-                                          final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
-                                          final JoinWindows windows,
-                                          final Serde<K> keySerde,
-                                          final Serde<V> thisValSerde,
-                                          final Serde<V1> otherValueSerde) {
-        return doJoin(other,
-            joiner,
-            windows,
-            Joined.with(keySerde, thisValSerde, otherValueSerde),
-            new KStreamImplJoin(true, false));
-    }
-
     @Override
     public <V1, R> KStream<K, R> leftJoin(final KStream<K, V1> other,
                                           final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
@@ -748,15 +529,6 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
         }
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public <V1, R> KStream<K, R> join(final KTable<K, V1> other,
-                                      final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
-                                      final Serde<K> keySerde,
-                                      final Serde<V> valueSerde) {
-        return join(other, joiner, Joined.<K, V, V1>with(keySerde, valueSerde, null));
-    }
-
     @Override
     public <K1, V1, R> KStream<K, R> leftJoin(final GlobalKTable<K1, V1> globalTable,
                                               final KeyValueMapper<? super K, ? super V, ? extends K1> keyMapper,
@@ -822,15 +594,6 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
         }
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public <V1, R> KStream<K, R> leftJoin(final KTable<K, V1> other,
-                                          final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
-                                          final Serde<K> keySerde,
-                                          final Serde<V> valueSerde) {
-        return leftJoin(other, joiner, Joined.<K, V, V1>with(keySerde, valueSerde, null));
-    }
-
     @Override
     public <K1> KGroupedStream<K1, V> groupBy(final KeyValueMapper<? super K, ? super V, K1> selector) {
         return groupBy(selector, Serialized.<K1, V>with(null, null));
@@ -851,15 +614,6 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
                                         true);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public <K1> KGroupedStream<K1, V> groupBy(final KeyValueMapper<? super K, ? super V, K1> selector,
-                                              final Serde<K1> keySerde,
-                                              final Serde<V> valSerde) {
-        Objects.requireNonNull(selector, "selector can't be null");
-        return groupBy(selector, Serialized.with(keySerde, valSerde));
-    }
-
     @Override
     public KGroupedStream<K, V> groupByKey() {
         return groupByKey(Serialized.<K, V>with(null, null));
@@ -877,13 +631,6 @@ public class KStreamImpl<K, V> extends AbstractStream<K> implements KStream<K, V
 
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public KGroupedStream<K, V> groupByKey(final Serde<K> keySerde,
-                                           final Serde<V> valSerde) {
-        return groupByKey(Serialized.with(keySerde, valSerde));
-    }
-
     private static <K, V> StoreBuilder<WindowStore<K, V>> createWindowedStateStore(final JoinWindows windows,
                                                                                    final Serde<K> keySerde,
                                                                                    final Serde<V> valueSerde,
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableImpl.java
index 785f73a..1c5ad4d 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableImpl.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableImpl.java
@@ -107,57 +107,44 @@ public class KTableImpl<K, S, V> extends AbstractStream<K> implements KTable<K,
     public String queryableStoreName() {
         if (!isQueryable) {
             return null;
-        }
-        return this.queryableStoreName;
-    }
-
-    @SuppressWarnings("deprecation")
-    private KTable<K, V> doFilter(final Predicate<? super K, ? super V> predicate,
-                                  final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier,
-                                  final boolean isFilterNot) {
-        Objects.requireNonNull(predicate, "predicate can't be null");
-        String name = builder.newProcessorName(FILTER_NAME);
-        String internalStoreName = null;
-        if (storeSupplier != null) {
-            internalStoreName = storeSupplier.name();
-        }
-        KTableProcessorSupplier<K, V, V> processorSupplier = new KTableFilter<>(this, predicate, isFilterNot, internalStoreName);
-        builder.internalTopologyBuilder.addProcessor(name, processorSupplier, this.name);
-        if (storeSupplier != null) {
-            builder.internalTopologyBuilder.addStateStore(storeSupplier, name);
-            return new KTableImpl<>(builder, name, processorSupplier, this.keySerde, this.valSerde, sourceNodes, internalStoreName, true);
         } else {
-            return new KTableImpl<>(builder, name, processorSupplier, sourceNodes, this.queryableStoreName, false);
+            return this.queryableStoreName;
         }
     }
 
     private KTable<K, V> doFilter(final Predicate<? super K, ? super V> predicate,
                                   final MaterializedInternal<K, V, KeyValueStore<Bytes, byte[]>> materialized,
                                   final boolean filterNot) {
-        String name = builder.newProcessorName(FILTER_NAME);
+        final String name = builder.newProcessorName(FILTER_NAME);
+
+        // only materialize if the state store is queryable
+        final boolean shouldMaterialize = materialized != null && materialized.isQueryable();
 
         KTableProcessorSupplier<K, V, V> processorSupplier = new KTableFilter<>(this,
-                                                                                predicate,
-                                                                                filterNot,
-                                                                                materialized.storeName());
+                predicate,
+                filterNot,
+                shouldMaterialize ? materialized.storeName() : null);
+
         builder.internalTopologyBuilder.addProcessor(name, processorSupplier, this.name);
 
-        final StoreBuilder builder = new KeyValueStoreMaterializer<>(materialized).materialize();
-        this.builder.internalTopologyBuilder.addStateStore(builder, name);
-
-        return new KTableImpl<>(this.builder,
-                                name,
-                                processorSupplier,
-                                this.keySerde,
-                                this.valSerde,
-                                sourceNodes,
-                                builder.name(),
-                                true);
+        if (shouldMaterialize) {
+            this.builder.internalTopologyBuilder.addStateStore(new KeyValueStoreMaterializer<>(materialized).materialize(), name);
+        }
+
+        return new KTableImpl<>(builder,
+                name,
+                processorSupplier,
+                this.keySerde,
+                this.valSerde,
+                sourceNodes,
+                shouldMaterialize ? materialized.storeName() : this.queryableStoreName,
+                shouldMaterialize);
     }
 
     @Override
     public KTable<K, V> filter(final Predicate<? super K, ? super V> predicate) {
-        return filter(predicate, (String) null);
+        Objects.requireNonNull(predicate, "predicate can't be null");
+        return doFilter(predicate, null, false);
     }
 
     @Override
@@ -168,28 +155,10 @@ public class KTableImpl<K, S, V> extends AbstractStream<K> implements KTable<K,
         return doFilter(predicate, new MaterializedInternal<>(materialized, builder, FILTER_NAME), false);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<K, V> filter(final Predicate<? super K, ? super V> predicate,
-                               final String queryableStoreName) {
-        org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier = null;
-        if (queryableStoreName != null) {
-            storeSupplier = keyValueStore(this.keySerde, this.valSerde, queryableStoreName);
-        }
-        return doFilter(predicate, storeSupplier, false);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<K, V> filter(final Predicate<? super K, ? super V> predicate,
-                               final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-        return doFilter(predicate, storeSupplier, false);
-    }
-
     @Override
     public KTable<K, V> filterNot(final Predicate<? super K, ? super V> predicate) {
-        return filterNot(predicate, (String) null);
+        Objects.requireNonNull(predicate, "predicate can't be null");
+        return doFilter(predicate, null, true);
     }
 
     @Override
@@ -200,60 +169,46 @@ public class KTableImpl<K, S, V> extends AbstractStream<K> implements KTable<K,
         return doFilter(predicate, new MaterializedInternal<>(materialized, builder, FILTER_NAME), true);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<K, V> filterNot(final Predicate<? super K, ? super V> predicate,
-                                  final String queryableStoreName) {
-        org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier = null;
-        if (queryableStoreName != null) {
-            storeSupplier = keyValueStore(this.keySerde, this.valSerde, queryableStoreName);
-        }
-        return doFilter(predicate, storeSupplier, true);
-    }
+    private <VR> KTable<K, VR> doMapValues(final ValueMapperWithKey<? super K, ? super V, ? extends VR> mapper,
+                                           final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
+        final String name = builder.newProcessorName(MAPVALUES_NAME);
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public KTable<K, V> filterNot(final Predicate<? super K, ? super V> predicate,
-                                  final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-        return doFilter(predicate, storeSupplier, true);
-    }
+        // only materialize if the state store is queryable
+        final boolean shouldMaterialize = materialized != null && materialized.isQueryable();
+
+        final KTableProcessorSupplier<K, V, VR> processorSupplier = new KTableMapValues<>(
+                this,
+                mapper,
+                shouldMaterialize ? materialized.storeName() : null);
 
-    @SuppressWarnings("deprecation")
-    private <V1> KTable<K, V1> doMapValues(final ValueMapperWithKey<? super K, ? super V, ? extends V1> mapper,
-                                           final Serde<V1> valueSerde,
-                                           final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        Objects.requireNonNull(mapper);
-        String name = builder.newProcessorName(MAPVALUES_NAME);
-        String internalStoreName = null;
-        if (storeSupplier != null) {
-            internalStoreName = storeSupplier.name();
-        }
-        KTableProcessorSupplier<K, V, V1> processorSupplier = new KTableMapValues<>(this, mapper, internalStoreName);
         builder.internalTopologyBuilder.addProcessor(name, processorSupplier, this.name);
-        if (storeSupplier != null) {
-            builder.internalTopologyBuilder.addStateStore(storeSupplier, name);
-            return new KTableImpl<>(builder, name, processorSupplier, this.keySerde, valueSerde, sourceNodes, internalStoreName, true);
-        } else {
-            return new KTableImpl<>(builder, name, processorSupplier, sourceNodes, this.queryableStoreName, false);
+
+        if (shouldMaterialize) {
+            this.builder.internalTopologyBuilder.addStateStore(new KeyValueStoreMaterializer<>(materialized).materialize(), name);
         }
+
+        return new KTableImpl<>(builder, name, processorSupplier, sourceNodes, shouldMaterialize ? materialized.storeName() : this.queryableStoreName, shouldMaterialize);
     }
 
     @Override
-    public <V1> KTable<K, V1> mapValues(final ValueMapper<? super V, ? extends V1> mapper) {
-        return doMapValues(withKey(mapper), null, null);
+    public <VR> KTable<K, VR> mapValues(final ValueMapper<? super V, ? extends VR> mapper) {
+        Objects.requireNonNull(mapper, "mapper can't be null");
+        return doMapValues(withKey(mapper), null);
     }
 
     @Override
     public <VR> KTable<K, VR> mapValues(final ValueMapperWithKey<? super K, ? super V, ? extends VR> mapper) {
-        return doMapValues(mapper, null, null);
-
+        Objects.requireNonNull(mapper, "mapper can't be null");
+        return doMapValues(mapper, null);
     }
 
     @Override
     public <VR> KTable<K, VR> mapValues(final ValueMapper<? super V, ? extends VR> mapper,
                                         final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
-        return mapValues(withKey(mapper), materialized);
+        Objects.requireNonNull(mapper, "mapper can't be null");
+        Objects.requireNonNull(materialized, "materialized can't be null");
+
+        return doMapValues(withKey(mapper), new MaterializedInternal<>(materialized, builder, MAPVALUES_NAME));
     }
 
     @Override
@@ -261,36 +216,8 @@ public class KTableImpl<K, S, V> extends AbstractStream<K> implements KTable<K,
                                         final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
         Objects.requireNonNull(mapper, "mapper can't be null");
         Objects.requireNonNull(materialized, "materialized can't be null");
-        final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal
-                = new MaterializedInternal<>(materialized, builder, MAPVALUES_NAME);
-        final String name = builder.newProcessorName(MAPVALUES_NAME);
-        final KTableProcessorSupplier<K, V, VR> processorSupplier = new KTableMapValues<>(
-                this,
-                mapper,
-                materializedInternal.storeName());
-        builder.internalTopologyBuilder.addProcessor(name, processorSupplier, this.name);
-        builder.internalTopologyBuilder.addStateStore(
-                new KeyValueStoreMaterializer<>(materializedInternal).materialize(),
-                name);
-        return new KTableImpl<>(builder, name, processorSupplier, sourceNodes, this.queryableStoreName, true);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <V1> KTable<K, V1> mapValues(final ValueMapper<? super V, ? extends V1> mapper,
-                                        final Serde<V1> valueSerde,
-                                        final String queryableStoreName) {
-        return mapValues(withKey(mapper), Materialized.<K, V1, KeyValueStore<Bytes, byte[]>>as(queryableStoreName).
-                withValueSerde(valueSerde).withKeySerde(this.keySerde));
-    }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public  <V1> KTable<K, V1> mapValues(final ValueMapper<? super V, ? extends V1> mapper,
-                                         final Serde<V1> valueSerde,
-                                         final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-        return doMapValues(withKey(mapper), valueSerde, storeSupplier);
+        return doMapValues(mapper, new MaterializedInternal<>(materialized, builder, MAPVALUES_NAME));
     }
 
     @Override
@@ -328,24 +255,6 @@ public class KTableImpl<K, S, V> extends AbstractStream<K> implements KTable<K,
         return doJoin(other, joiner, new MaterializedInternal<>(materialized, builder, MERGE_NAME), false, false);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public <V1, R> KTable<K, R> join(final KTable<K, V1> other,
-                                     final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
-                                     final Serde<R> joinSerde,
-                                     final String queryableStoreName) {
-        return doJoin(other, joiner, false, false, joinSerde, queryableStoreName);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <V1, R> KTable<K, R> join(final KTable<K, V1> other,
-                                     final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
-                                     final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-        return doJoin(other, joiner, false, false, storeSupplier);
-    }
-
     @Override
     public <V1, R> KTable<K, R> outerJoin(final KTable<K, V1> other,
                                           final ValueJoiner<? super V, ? super V1, ? extends R> joiner) {
@@ -359,24 +268,6 @@ public class KTableImpl<K, S, V> extends AbstractStream<K> implements KTable<K,
         return doJoin(other, joiner, new MaterializedInternal<>(materialized, builder, MERGE_NAME), true, true);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public <V1, R> KTable<K, R> outerJoin(final KTable<K, V1> other,
-                                          final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
-                                          final Serde<R> joinSerde,
-                                          final String queryableStoreName) {
-        return doJoin(other, joiner, true, true, joinSerde, queryableStoreName);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <V1, R> KTable<K, R> outerJoin(final KTable<K, V1> other,
-                                          final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
-                                          final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-        return doJoin(other, joiner, true, true, storeSupplier);
-    }
-
     @Override
     public <V1, R> KTable<K, R> leftJoin(final KTable<K, V1> other,
                                          final ValueJoiner<? super V, ? super V1, ? extends R> joiner) {
@@ -394,64 +285,6 @@ public class KTableImpl<K, S, V> extends AbstractStream<K> implements KTable<K,
                       false);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public <V1, R> KTable<K, R> leftJoin(final KTable<K, V1> other,
-                                         final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
-                                         final Serde<R> joinSerde,
-                                         final String queryableStoreName) {
-        return doJoin(other, joiner, true, false, joinSerde, queryableStoreName);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Override
-    public <V1, R> KTable<K, R> leftJoin(final KTable<K, V1> other,
-                                         final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
-                                         final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
-        return doJoin(other, joiner, true, false, storeSupplier);
-    }
-
-    @SuppressWarnings({"unchecked", "deprecation"})
-    private <V1, R> KTable<K, R> doJoin(final KTable<K, V1> other,
-                                        final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
-                                        final boolean leftOuter,
-                                        final boolean rightOuter,
-                                        final Serde<R> joinSerde,
-                                        final String queryableStoreName) {
-        Objects.requireNonNull(other, "other can't be null");
-        Objects.requireNonNull(joiner, "joiner can't be null");
-
-        final org.apache.kafka.streams.processor.StateStoreSupplier storeSupplier
-            = queryableStoreName == null ? null : keyValueStore(this.keySerde, joinSerde, queryableStoreName);
-
-        return doJoin(other, joiner, leftOuter, rightOuter, storeSupplier);
-    }
-
-    @SuppressWarnings({"unchecked", "deprecation"})
-    private <V1, R> KTable<K, R> doJoin(final KTable<K, V1> other,
-                                        final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
-                                        final boolean leftOuter,
-                                        final boolean rightOuter,
-                                        final org.apache.kafka.streams.processor.StateStoreSupplier<KeyValueStore> storeSupplier) {
-        Objects.requireNonNull(other, "other can't be null");
-        Objects.requireNonNull(joiner, "joiner can't be null");
-        final String joinMergeName = builder.newProcessorName(MERGE_NAME);
-        final String internalQueryableName = storeSupplier == null ? null : storeSupplier.name();
-        final KTable<K, R> result = buildJoin((AbstractStream<K>) other,
-                                              joiner,
-                                              leftOuter,
-                                              rightOuter,
-                                              joinMergeName,
-                                              internalQueryableName);
-
-        if (internalQueryableName != null) {
-            builder.internalTopologyBuilder.addStateStore(storeSupplier, joinMergeName);
-        }
-
-        return result;
-    }
-
     @SuppressWarnings("unchecked")
     private <VO, VR> KTable<K, VR> doJoin(final KTable<K, VO> other,
                                           final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
@@ -469,6 +302,7 @@ public class KTableImpl<K, S, V> extends AbstractStream<K> implements KTable<K,
                                                joinMergeName,
                                                internalQueryableName);
 
+        // only materialize if specified in Materialized
         if (materialized != null) {
             final StoreBuilder<KeyValueStore<K, VR>> storeBuilder
                     = new KeyValueStoreMaterializer<>(materialized).materialize();
@@ -526,14 +360,6 @@ public class KTableImpl<K, S, V> extends AbstractStream<K> implements KTable<K,
         return new KTableImpl<>(builder, joinMergeName, joinMerge, allSourceNodes, internalQueryableName, internalQueryableName != null);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public <K1, V1> KGroupedTable<K1, V1> groupBy(final KeyValueMapper<? super K, ? super V, KeyValue<K1, V1>> selector,
-                                                  final Serde<K1> keySerde,
-                                                  final Serde<V1> valueSerde) {
-        return groupBy(selector, Serialized.with(keySerde, valueSerde));
-    }
-
     @Override
     public <K1, V1> KGroupedTable<K1, V1> groupBy(final KeyValueMapper<? super K, ? super V, KeyValue<K1, V1>> selector) {
         return this.groupBy(selector, Serialized.<K1, V1>with(null, null));
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImpl.java
index 34e5bd7..c29c656 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImpl.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImpl.java
@@ -37,6 +37,7 @@ import java.util.Objects;
 import java.util.Set;
 
 import static org.apache.kafka.streams.kstream.internals.KGroupedStreamImpl.AGGREGATE_NAME;
+import static org.apache.kafka.streams.kstream.internals.KGroupedStreamImpl.REDUCE_NAME;
 
 public class SessionWindowedKStreamImpl<K, V> extends AbstractStream<K> implements SessionWindowedKStream<K, V> {
     private final SessionWindows windows;
@@ -49,13 +50,6 @@ public class SessionWindowedKStreamImpl<K, V> extends AbstractStream<K> implemen
             return aggOne + aggTwo;
         }
     };
-    private final Initializer<V> reduceInitializer = new Initializer<V>() {
-        @Override
-        public V apply() {
-            return null;
-        }
-    };
-
 
     SessionWindowedKStreamImpl(final SessionWindows windows,
                                final InternalStreamsBuilder builder,
@@ -65,6 +59,7 @@ public class SessionWindowedKStreamImpl<K, V> extends AbstractStream<K> implemen
                                final Serde<V> valSerde,
                                final GroupedStreamAggregateBuilder<K, V> aggregateBuilder) {
         super(builder, name, sourceNodes);
+        Objects.requireNonNull(windows, "windows can't be null");
         this.windows = windows;
         this.keySerde = keySerde;
         this.valSerde = valSerde;
@@ -73,35 +68,62 @@ public class SessionWindowedKStreamImpl<K, V> extends AbstractStream<K> implemen
 
     @Override
     public KTable<Windowed<K>, Long> count() {
-        return doAggregate(aggregateBuilder.countInitializer,
-                           aggregateBuilder.countAggregator,
-                           countMerger,
-                           Serdes.Long());
+        return count(Materialized.<K, Long, SessionStore<Bytes, byte[]>>with(keySerde, Serdes.Long()));
     }
 
+    @SuppressWarnings("unchecked")
     @Override
     public KTable<Windowed<K>, Long> count(final Materialized<K, Long, SessionStore<Bytes, byte[]>> materialized) {
         Objects.requireNonNull(materialized, "materialized can't be null");
         final MaterializedInternal<K, Long, SessionStore<Bytes, byte[]>> materializedInternal
                 = new MaterializedInternal<>(materialized, builder, AGGREGATE_NAME);
+        if (materializedInternal.keySerde() == null) {
+            materializedInternal.withKeySerde(keySerde);
+        }
         if (materializedInternal.valueSerde() == null) {
-            materialized.withValueSerde(Serdes.Long());
+            materializedInternal.withValueSerde(Serdes.Long());
         }
-        return aggregate(aggregateBuilder.countInitializer,
-                         aggregateBuilder.countAggregator,
-                         countMerger,
-                         materialized);
+
+        return (KTable<Windowed<K>, Long>) aggregateBuilder.build(
+                new KStreamSessionWindowAggregate<>(windows, materializedInternal.storeName(), aggregateBuilder.countInitializer, aggregateBuilder.countAggregator, countMerger),
+                AGGREGATE_NAME,
+                materialize(materializedInternal),
+                materializedInternal.isQueryable());
+    }
+
+    @Override
+    public KTable<Windowed<K>, V> reduce(final Reducer<V> reducer) {
+        return reduce(reducer, Materialized.<K, V, SessionStore<Bytes, byte[]>>with(keySerde, valSerde));
     }
 
     @SuppressWarnings("unchecked")
     @Override
+    public KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
+                                         final Materialized<K, V, SessionStore<Bytes, byte[]>> materialized) {
+        Objects.requireNonNull(reducer, "reducer can't be null");
+        Objects.requireNonNull(materialized, "materialized can't be null");
+        final Aggregator<K, V, V> reduceAggregator = aggregatorForReducer(reducer);
+        final MaterializedInternal<K, V, SessionStore<Bytes, byte[]>> materializedInternal
+                = new MaterializedInternal<>(materialized, builder, REDUCE_NAME);
+        if (materializedInternal.keySerde() == null) {
+            materializedInternal.withKeySerde(keySerde);
+        }
+        if (materializedInternal.valueSerde() == null) {
+            materializedInternal.withValueSerde(valSerde);
+        }
+
+        return (KTable<Windowed<K>, V>) aggregateBuilder.build(
+                new KStreamSessionWindowAggregate<>(windows, materializedInternal.storeName(), aggregateBuilder.reduceInitializer, reduceAggregator, mergerForAggregator(reduceAggregator)),
+                REDUCE_NAME,
+                materialize(materializedInternal),
+                materializedInternal.isQueryable());
+    }
+
+    @Override
     public <T> KTable<Windowed<K>, T> aggregate(final Initializer<T> initializer,
                                                 final Aggregator<? super K, ? super V, T> aggregator,
                                                 final Merger<? super K, T> sessionMerger) {
-        Objects.requireNonNull(initializer, "initializer can't be null");
-        Objects.requireNonNull(aggregator, "aggregator can't be null");
-        Objects.requireNonNull(sessionMerger, "sessionMerger can't be null");
-        return doAggregate(initializer, aggregator, sessionMerger, (Serde<T>) valSerde);
+        return aggregate(initializer, aggregator, sessionMerger, Materialized.<K, T, SessionStore<Bytes, byte[]>>with(keySerde, null));
     }
 
     @SuppressWarnings("unchecked")
@@ -123,25 +145,9 @@ public class SessionWindowedKStreamImpl<K, V> extends AbstractStream<K> implemen
                 new KStreamSessionWindowAggregate<>(windows, materializedInternal.storeName(), initializer, aggregator, sessionMerger),
                 AGGREGATE_NAME,
                 materialize(materializedInternal),
-                true);
-    }
-
-    @Override
-    public KTable<Windowed<K>, V> reduce(final Reducer<V> reducer) {
-        Objects.requireNonNull(reducer, "reducer can't be null");
-        return doAggregate(reduceInitializer, aggregatorForReducer(reducer), mergerForAggregator(aggregatorForReducer(reducer)), valSerde);
-    }
-
-    @Override
-    public KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
-                                         final Materialized<K, V, SessionStore<Bytes, byte[]>> materialized) {
-        Objects.requireNonNull(reducer, "reducer can't be null");
-        Objects.requireNonNull(materialized, "materialized can't be null");
-        final Aggregator<K, V, V> reduceAggregator = aggregatorForReducer(reducer);
-        return aggregate(reduceInitializer, reduceAggregator, mergerForAggregator(reduceAggregator), materialized);
+                materializedInternal.isQueryable());
     }
 
-
     private <VR> StoreBuilder<SessionStore<K, VR>> materialize(final MaterializedInternal<K, VR, SessionStore<Bytes, byte[]>> materialized) {
         SessionBytesStoreSupplier supplier = (SessionBytesStoreSupplier) materialized.storeSupplier();
         if (supplier == null) {
@@ -184,26 +190,4 @@ public class SessionWindowedKStreamImpl<K, V> extends AbstractStream<K> implemen
             }
         };
     }
-
-    private <VR> StoreBuilder<SessionStore<K, VR>> storeBuilder(final String storeName, final Serde<VR> aggValueSerde) {
-        return Stores.sessionStoreBuilder(
-                Stores.persistentSessionStore(
-                        storeName,
-                        windows.maintainMs()),
-                keySerde,
-                aggValueSerde).withCachingEnabled();
-    }
-
-
-    @SuppressWarnings("unchecked")
-    private <VR> KTable<Windowed<K>, VR> doAggregate(final Initializer<VR> initializer,
-                                                     final Aggregator<? super K, ? super V, VR> aggregator,
-                                                     final Merger<? super K, VR> merger,
-                                                     final Serde<VR> serde) {
-        final String storeName = builder.newStoreName(AGGREGATE_NAME);
-        return (KTable<Windowed<K>, VR>) aggregateBuilder.build(new KStreamSessionWindowAggregate<>(windows, storeName, initializer, aggregator, merger),
-                                                                AGGREGATE_NAME,
-                                                                storeBuilder(storeName, serde),
-                                                                false);
-    }
 }
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImpl.java
index 5e54770..d1e5a17 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImpl.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImpl.java
@@ -63,42 +63,33 @@ public class TimeWindowedKStreamImpl<K, V, W extends Window> extends AbstractStr
 
     @Override
     public KTable<Windowed<K>, Long> count() {
-        return doAggregate(
-                aggregateBuilder.countInitializer,
-                aggregateBuilder.countAggregator,
-                Serdes.Long());
+        return count(Materialized.<K, Long, WindowStore<Bytes, byte[]>>with(keySerde, Serdes.Long()));
     }
 
+    @SuppressWarnings("unchecked")
     @Override
     public KTable<Windowed<K>, Long> count(final Materialized<K, Long, WindowStore<Bytes, byte[]>> materialized) {
         Objects.requireNonNull(materialized, "materialized can't be null");
         final MaterializedInternal<K, Long, WindowStore<Bytes, byte[]>> materializedInternal
                 = new MaterializedInternal<>(materialized, builder, AGGREGATE_NAME);
+        if (materializedInternal.keySerde() == null) {
+            materializedInternal.withKeySerde(keySerde);
+        }
         if (materializedInternal.valueSerde() == null) {
-            materialized.withValueSerde(Serdes.Long());
+            materializedInternal.withValueSerde(Serdes.Long());
         }
-        return aggregate(aggregateBuilder.countInitializer, aggregateBuilder.countAggregator, materialized);
+
+        return (KTable<Windowed<K>, Long>) aggregateBuilder.build(new KStreamWindowAggregate<>(windows, materializedInternal.storeName(), aggregateBuilder.countInitializer, aggregateBuilder.countAggregator),
+                AGGREGATE_NAME,
+                materialize(materializedInternal),
+                materializedInternal.isQueryable());
     }
 
 
-    @SuppressWarnings("unchecked")
     @Override
     public <VR> KTable<Windowed<K>, VR> aggregate(final Initializer<VR> initializer,
                                                   final Aggregator<? super K, ? super V, VR> aggregator) {
-        Objects.requireNonNull(initializer, "initializer can't be null");
-        Objects.requireNonNull(aggregator, "aggregator can't be null");
-        return doAggregate(initializer, aggregator, (Serde<VR>) valSerde);
-    }
-
-    @SuppressWarnings("unchecked")
-    private <VR> KTable<Windowed<K>, VR> doAggregate(final Initializer<VR> initializer,
-                                                     final Aggregator<? super K, ? super V, VR> aggregator,
-                                                     final Serde<VR> serde) {
-        final String storeName = builder.newStoreName(AGGREGATE_NAME);
-        return (KTable<Windowed<K>, VR>) aggregateBuilder.build(new KStreamWindowAggregate<>(windows, storeName, initializer, aggregator),
-                                                                AGGREGATE_NAME,
-                                                                windowStoreBuilder(storeName, serde),
-                                                                false);
+        return aggregate(initializer, aggregator, Materialized.<K, VR, WindowStore<Bytes, byte[]>>with(keySerde, null));
     }
 
     @SuppressWarnings("unchecked")
@@ -114,25 +105,15 @@ public class TimeWindowedKStreamImpl<K, V, W extends Window> extends AbstractStr
         if (materializedInternal.keySerde() == null) {
             materializedInternal.withKeySerde(keySerde);
         }
-        return (KTable<Windowed<K>, VR>) aggregateBuilder.build(new KStreamWindowAggregate<>(windows,
-                                                                                             materializedInternal.storeName(),
-                                                                                             initializer,
-                                                                                             aggregator),
+        return (KTable<Windowed<K>, VR>) aggregateBuilder.build(new KStreamWindowAggregate<>(windows, materializedInternal.storeName(), initializer, aggregator),
                                                                 AGGREGATE_NAME,
                                                                 materialize(materializedInternal),
-                                                                true);
+                                                                materializedInternal.isQueryable());
     }
 
-    @SuppressWarnings("unchecked")
     @Override
     public KTable<Windowed<K>, V> reduce(final Reducer<V> reducer) {
-        Objects.requireNonNull(reducer, "reducer can't be null");
-        final String storeName = builder.newStoreName(REDUCE_NAME);
-        return (KTable<Windowed<K>, V>) aggregateBuilder.build(new KStreamWindowReduce<K, V, W>(windows, storeName, reducer),
-                                                               REDUCE_NAME,
-                                                               windowStoreBuilder(storeName, valSerde),
-                                                               true);
-
+        return reduce(reducer, Materialized.<K, V, WindowStore<Bytes, byte[]>>with(keySerde, valSerde));
     }
 
     @SuppressWarnings("unchecked")
@@ -140,13 +121,20 @@ public class TimeWindowedKStreamImpl<K, V, W extends Window> extends AbstractStr
     public KTable<Windowed<K>, V> reduce(final Reducer<V> reducer, final Materialized<K, V, WindowStore<Bytes, byte[]>> materialized) {
         Objects.requireNonNull(reducer, "reducer can't be null");
         Objects.requireNonNull(materialized, "materialized can't be null");
+
         final MaterializedInternal<K, V, WindowStore<Bytes, byte[]>> materializedInternal
                 = new MaterializedInternal<>(materialized, builder, REDUCE_NAME);
+        if (materializedInternal.keySerde() == null) {
+            materializedInternal.withKeySerde(keySerde);
+        }
+        if (materializedInternal.valueSerde() == null) {
+            materializedInternal.withValueSerde(valSerde);
+        }
 
         return (KTable<Windowed<K>, V>) aggregateBuilder.build(new KStreamWindowReduce<K, V, W>(windows, materializedInternal.storeName(), reducer),
                                                                REDUCE_NAME,
                                                                materialize(materializedInternal),
-                                                               false);
+                                                               materializedInternal.isQueryable());
     }
 
     private <VR> StoreBuilder<WindowStore<K, VR>> materialize(final MaterializedInternal<K, VR, WindowStore<Bytes, byte[]>> materialized) {
@@ -173,18 +161,4 @@ public class TimeWindowedKStreamImpl<K, V, W extends Window> extends AbstractStr
         }
         return builder;
     }
-
-
-    private <VR> StoreBuilder<WindowStore<K, VR>> windowStoreBuilder(final String storeName, final Serde<VR> aggValueSerde) {
-        return Stores.windowStoreBuilder(
-                Stores.persistentWindowStore(
-                        storeName,
-                        windows.maintainMs(),
-                        windows.segments,
-                        windows.size(),
-                        false),
-                keySerde,
-                aggValueSerde).withCachingEnabled();
-    }
-
 }
diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/KStreamAggregationDedupIntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/KStreamAggregationDedupIntegrationTest.java
index 51bbb95..6dc0db8 100644
--- a/streams/src/test/java/org/apache/kafka/streams/integration/KStreamAggregationDedupIntegrationTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/integration/KStreamAggregationDedupIntegrationTest.java
@@ -24,6 +24,7 @@ import org.apache.kafka.common.serialization.LongDeserializer;
 import org.apache.kafka.common.serialization.Serdes;
 import org.apache.kafka.common.serialization.StringDeserializer;
 import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.streams.Consumed;
 import org.apache.kafka.streams.KafkaStreams;
 import org.apache.kafka.streams.KeyValue;
@@ -34,10 +35,14 @@ import org.apache.kafka.streams.integration.utils.IntegrationTestUtils;
 import org.apache.kafka.streams.kstream.KGroupedStream;
 import org.apache.kafka.streams.kstream.KStream;
 import org.apache.kafka.streams.kstream.KeyValueMapper;
+import org.apache.kafka.streams.kstream.Materialized;
+import org.apache.kafka.streams.kstream.Produced;
 import org.apache.kafka.streams.kstream.Reducer;
 import org.apache.kafka.streams.kstream.Serialized;
 import org.apache.kafka.streams.kstream.TimeWindows;
 import org.apache.kafka.streams.kstream.Windowed;
+import org.apache.kafka.streams.state.KeyValueStore;
+import org.apache.kafka.streams.state.WindowStore;
 import org.apache.kafka.test.IntegrationTest;
 import org.apache.kafka.test.MockMapper;
 import org.apache.kafka.test.TestUtils;
@@ -99,7 +104,7 @@ public class KStreamAggregationDedupIntegrationTest {
         streamsConfiguration.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 10 * 1024 * 1024L);
         streamsConfiguration.put(IntegrationTestUtils.INTERNAL_LEAVE_GROUP_ON_CLOSE, true);
 
-        KeyValueMapper<Integer, String, String> mapper = MockMapper.<Integer, String>selectValueMapper();
+        KeyValueMapper<Integer, String, String> mapper = MockMapper.selectValueMapper();
         stream = builder.stream(streamOneInput, Consumed.with(Serdes.Integer(), Serdes.String()));
         groupedStream = stream
             .groupBy(
@@ -126,9 +131,10 @@ public class KStreamAggregationDedupIntegrationTest {
     @Test
     public void shouldReduce() throws Exception {
         produceMessages(System.currentTimeMillis());
-        groupedStream.reduce(reducer, "reduce-by-key")
+        groupedStream
+                .reduce(reducer, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("reduce-by-key"))
                 .toStream()
-                .to(Serdes.String(), Serdes.String(), outputTopic);
+                .to(outputTopic, Produced.with(Serdes.String(), Serdes.String()));
 
         startStreams();
 
@@ -173,14 +179,15 @@ public class KStreamAggregationDedupIntegrationTest {
         produceMessages(secondBatchTimestamp);
 
         groupedStream
-            .reduce(reducer, TimeWindows.of(500L), "reduce-time-windows")
+            .windowedBy(TimeWindows.of(500L))
+            .reduce(reducer, Materialized.<String, String, WindowStore<Bytes, byte[]>>as("reduce-time-windows"))
             .toStream(new KeyValueMapper<Windowed<String>, String, String>() {
                 @Override
                 public String apply(Windowed<String> windowedKey, String value) {
                     return windowedKey.key() + "@" + windowedKey.window().start();
                 }
             })
-            .to(Serdes.String(), Serdes.String(), outputTopic);
+            .to(outputTopic, Produced.with(Serdes.String(), Serdes.String()));
 
         startStreams();
 
@@ -226,13 +233,14 @@ public class KStreamAggregationDedupIntegrationTest {
         produceMessages(timestamp);
 
         stream.groupByKey(Serialized.with(Serdes.Integer(), Serdes.String()))
-            .count(TimeWindows.of(500L), "count-windows")
+            .windowedBy(TimeWindows.of(500L))
+            .count(Materialized.<Integer, Long, WindowStore<Bytes, byte[]>>as("count-windows"))
             .toStream(new KeyValueMapper<Windowed<Integer>, Long, String>() {
                 @Override
                 public String apply(final Windowed<Integer> windowedKey, final Long value) {
                     return windowedKey.key() + "@" + windowedKey.window().start();
                 }
-            }).to(Serdes.String(), Serdes.Long(), outputTopic);
+            }).to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
 
         startStreams();
 
diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/KStreamAggregationIntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/KStreamAggregationIntegrationTest.java
index 2efe9f2..658bf33 100644
--- a/streams/src/test/java/org/apache/kafka/streams/integration/KStreamAggregationIntegrationTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/integration/KStreamAggregationIntegrationTest.java
@@ -54,8 +54,10 @@ import org.apache.kafka.streams.kstream.WindowedSerdes;
 import org.apache.kafka.streams.kstream.internals.SessionWindow;
 import org.apache.kafka.streams.kstream.internals.TimeWindow;
 import org.apache.kafka.streams.state.KeyValueIterator;
+import org.apache.kafka.streams.state.KeyValueStore;
 import org.apache.kafka.streams.state.QueryableStoreTypes;
 import org.apache.kafka.streams.state.ReadOnlySessionStore;
+import org.apache.kafka.streams.state.SessionStore;
 import org.apache.kafka.streams.state.WindowStore;
 import org.apache.kafka.test.IntegrationTest;
 import org.apache.kafka.test.MockMapper;
@@ -162,14 +164,13 @@ public class KStreamAggregationIntegrationTest {
         IntegrationTestUtils.purgeLocalStreamsState(streamsConfiguration);
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldReduce() throws Exception {
         produceMessages(mockTime.milliseconds());
         groupedStream
-            .reduce(reducer, "reduce-by-key")
+            .reduce(reducer, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("reduce-by-key"))
             .toStream()
-            .to(Serdes.String(), Serdes.String(), outputTopic);
+            .to(outputTopic, Produced.with(Serdes.String(), Serdes.String()));
 
         startStreams();
 
@@ -286,17 +287,15 @@ public class KStreamAggregationIntegrationTest {
         }
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldAggregate() throws Exception {
         produceMessages(mockTime.milliseconds());
         groupedStream.aggregate(
             initializer,
             aggregator,
-            Serdes.Integer(),
-            "aggregate-by-selected-key")
+            Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as("aggregate-by-selected-key"))
             .toStream()
-            .to(Serdes.String(), Serdes.Integer(), outputTopic);
+            .to(outputTopic, Produced.with(Serdes.String(), Serdes.Integer()));
 
         startStreams();
 
@@ -441,26 +440,24 @@ public class KStreamAggregationIntegrationTest {
         )));
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldCount() throws Exception {
         produceMessages(mockTime.milliseconds());
 
-        groupedStream.count("count-by-key")
+        groupedStream.count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("count-by-key"))
                 .toStream()
-                .to(Serdes.String(), Serdes.Long(), outputTopic);
+                .to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
 
         shouldCountHelper();
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldCountWithInternalStore() throws Exception {
         produceMessages(mockTime.milliseconds());
 
         groupedStream.count()
                 .toStream()
-                .to(Serdes.String(), Serdes.Long(), outputTopic);
+                .to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
 
         shouldCountHelper();
     }
@@ -510,7 +507,6 @@ public class KStreamAggregationIntegrationTest {
 
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldCountSessionWindows() throws Exception {
         final long sessionGap = 5 * 60 * 1000L;
@@ -577,7 +573,8 @@ public class KStreamAggregationIntegrationTest {
 
         builder.stream(userSessionsStream, Consumed.with(Serdes.String(), Serdes.String()))
                 .groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
-                .count(SessionWindows.with(sessionGap).until(maintainMillis))
+                .windowedBy(SessionWindows.with(sessionGap).until(maintainMillis))
+                .count()
                 .toStream()
                 .foreach(new ForeachAction<Windowed<String>, Long>() {
                     @Override
@@ -598,7 +595,6 @@ public class KStreamAggregationIntegrationTest {
         assertThat(results.get(new Windowed<>("penny", new SessionWindow(t3, t3))), equalTo(1L));
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldReduceSessionWindows() throws Exception {
         final long sessionGap = 1000L; // something to do with time
@@ -665,12 +661,13 @@ public class KStreamAggregationIntegrationTest {
         final String userSessionsStore = "UserSessionsStore";
         builder.stream(userSessionsStream, Consumed.with(Serdes.String(), Serdes.String()))
                 .groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
+                .windowedBy(SessionWindows.with(sessionGap).until(maintainMillis))
                 .reduce(new Reducer<String>() {
                     @Override
                     public String apply(final String value1, final String value2) {
                         return value1 + ":" + value2;
                     }
-                }, SessionWindows.with(sessionGap).until(maintainMillis), userSessionsStore)
+                }, Materialized.<String, String, SessionStore<Bytes, byte[]>>as(userSessionsStore))
                 .toStream()
                 .foreach(new ForeachAction<Windowed<String>, String>() {
                     @Override
@@ -734,10 +731,8 @@ public class KStreamAggregationIntegrationTest {
         kafkaStreams.start();
     }
 
-    private <K, V> List<KeyValue<K, V>> receiveMessages(final Deserializer<K>
-                                                            keyDeserializer,
-                                                        final Deserializer<V>
-                                                            valueDeserializer,
+    private <K, V> List<KeyValue<K, V>> receiveMessages(final Deserializer<K> keyDeserializer,
+                                                        final Deserializer<V> valueDeserializer,
                                                         final int numMessages)
         throws InterruptedException {
         return receiveMessages(keyDeserializer, valueDeserializer, null, numMessages);
@@ -767,9 +762,9 @@ public class KStreamAggregationIntegrationTest {
     }
 
     private <K, V> String readWindowedKeyedMessagesViaConsoleConsumer(final Deserializer<K> keyDeserializer,
-                                                  final Deserializer<V> valueDeserializer,
-                                                  final Class innerClass,
-                                                  final int numMessages) {
+                                                                      final Deserializer<V> valueDeserializer,
+                                                                      final Class innerClass,
+                                                                      final int numMessages) {
         ByteArrayOutputStream newConsole = new ByteArrayOutputStream();
         PrintStream originalStream = System.out;
         try (PrintStream newStream = new PrintStream(newConsole)) {
diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/KStreamsFineGrainedAutoResetIntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/KStreamsFineGrainedAutoResetIntegrationTest.java
index 5415b58..0f7df6b 100644
--- a/streams/src/test/java/org/apache/kafka/streams/integration/KStreamsFineGrainedAutoResetIntegrationTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/integration/KStreamsFineGrainedAutoResetIntegrationTest.java
@@ -37,6 +37,7 @@ import org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster;
 import org.apache.kafka.streams.integration.utils.IntegrationTestUtils;
 import org.apache.kafka.streams.kstream.KStream;
 import org.apache.kafka.streams.kstream.KStreamBuilder;
+import org.apache.kafka.streams.kstream.Produced;
 import org.apache.kafka.test.IntegrationTest;
 import org.apache.kafka.test.StreamsTestUtils;
 import org.apache.kafka.test.TestCondition;
@@ -193,9 +194,9 @@ public class KStreamsFineGrainedAutoResetIntegrationTest {
         final KStream<String, String> pattern2Stream = builder.stream(Pattern.compile("topic-[A-D]" + topicSuffix), Consumed.<String, String>with(Topology.AutoOffsetReset.LATEST));
         final KStream<String, String> namedTopicsStream = builder.stream(Arrays.asList(topicY, topicZ));
 
-        pattern1Stream.to(stringSerde, stringSerde, outputTopic);
-        pattern2Stream.to(stringSerde, stringSerde, outputTopic);
-        namedTopicsStream.to(stringSerde, stringSerde, outputTopic);
+        pattern1Stream.to(outputTopic, Produced.with(stringSerde, stringSerde));
+        pattern2Stream.to(outputTopic, Produced.with(stringSerde, stringSerde));
+        namedTopicsStream.to(outputTopic, Produced.with(stringSerde, stringSerde));
 
         final Properties producerConfig = TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class);
 
@@ -289,7 +290,7 @@ public class KStreamsFineGrainedAutoResetIntegrationTest {
         final StreamsBuilder builder = new StreamsBuilder();
         final KStream<String, String> exceptionStream = builder.stream(NOOP);
 
-        exceptionStream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
+        exceptionStream.to(DEFAULT_OUTPUT_TOPIC, Produced.with(stringSerde, stringSerde));
 
         KafkaStreams streams = new KafkaStreams(builder.build(), localConfig);
 
diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/RegexSourceIntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/RegexSourceIntegrationTest.java
index 57ed161..d0361dc 100644
--- a/streams/src/test/java/org/apache/kafka/streams/integration/RegexSourceIntegrationTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/integration/RegexSourceIntegrationTest.java
@@ -33,6 +33,7 @@ import org.apache.kafka.streams.StreamsConfig;
 import org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster;
 import org.apache.kafka.streams.integration.utils.IntegrationTestUtils;
 import org.apache.kafka.streams.kstream.KStream;
+import org.apache.kafka.streams.kstream.Produced;
 import org.apache.kafka.streams.processor.ProcessorSupplier;
 import org.apache.kafka.streams.processor.TopologyBuilder;
 import org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier;
@@ -143,7 +144,7 @@ public class RegexSourceIntegrationTest {
 
         final KStream<String, String> pattern1Stream = builder.stream(Pattern.compile("TEST-TOPIC-\\d"));
 
-        pattern1Stream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
+        pattern1Stream.to(DEFAULT_OUTPUT_TOPIC, Produced.with(stringSerde, stringSerde));
         final List<String> assignedTopics = new ArrayList<>();
         streams = new KafkaStreams(builder.build(), streamsConfiguration, new DefaultKafkaClientSupplier() {
             @Override
@@ -191,7 +192,7 @@ public class RegexSourceIntegrationTest {
 
         final KStream<String, String> pattern1Stream = builder.stream(Pattern.compile("TEST-TOPIC-[A-Z]"));
 
-        pattern1Stream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
+        pattern1Stream.to(DEFAULT_OUTPUT_TOPIC, Produced.with(stringSerde, stringSerde));
 
         final List<String> assignedTopics = new ArrayList<>();
         streams = new KafkaStreams(builder.build(), streamsConfiguration, new DefaultKafkaClientSupplier() {
@@ -280,9 +281,9 @@ public class RegexSourceIntegrationTest {
         final KStream<String, String> pattern2Stream = builder.stream(Pattern.compile("topic-[A-D]"));
         final KStream<String, String> namedTopicsStream = builder.stream(Arrays.asList(TOPIC_Y, TOPIC_Z));
 
-        pattern1Stream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
-        pattern2Stream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
-        namedTopicsStream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
+        pattern1Stream.to(DEFAULT_OUTPUT_TOPIC, Produced.with(stringSerde, stringSerde));
+        pattern2Stream.to(DEFAULT_OUTPUT_TOPIC, Produced.with(stringSerde, stringSerde));
+        namedTopicsStream.to(DEFAULT_OUTPUT_TOPIC, Produced.with(stringSerde, stringSerde));
 
         streams = new KafkaStreams(builder.build(), streamsConfiguration);
         streams.start();
@@ -326,8 +327,8 @@ public class RegexSourceIntegrationTest {
             final KStream<String, String> partitionedStreamFollower = builderFollower.stream(Pattern.compile("partitioned-\\d"));
 
 
-            partitionedStreamLeader.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
-            partitionedStreamFollower.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
+            partitionedStreamLeader.to(DEFAULT_OUTPUT_TOPIC, Produced.with(stringSerde, stringSerde));
+            partitionedStreamFollower.to(DEFAULT_OUTPUT_TOPIC, Produced.with(stringSerde, stringSerde));
 
             final List<String> leaderAssignment = new ArrayList<>();
             final List<String> followerAssignment = new ArrayList<>();
@@ -397,8 +398,8 @@ public class RegexSourceIntegrationTest {
         final KStream<String, String> pattern2Stream = builder.stream(Pattern.compile("f.*"));
 
 
-        pattern1Stream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
-        pattern2Stream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
+        pattern1Stream.to(DEFAULT_OUTPUT_TOPIC, Produced.with(stringSerde, stringSerde));
+        pattern2Stream.to(DEFAULT_OUTPUT_TOPIC, Produced.with(stringSerde, stringSerde));
 
         streams = new KafkaStreams(builder.build(), streamsConfiguration);
         streams.start();
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/KStreamBuilderTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/KStreamBuilderTest.java
index b63f2de..255c3eb 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/KStreamBuilderTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/KStreamBuilderTest.java
@@ -18,8 +18,9 @@ package org.apache.kafka.streams.kstream;
 
 import org.apache.kafka.common.serialization.Serde;
 import org.apache.kafka.common.serialization.Serdes;
-import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.common.utils.Utils;
+import org.apache.kafka.common.serialization.StringSerializer;
 import org.apache.kafka.streams.StreamsConfig;
 import org.apache.kafka.streams.TopologyTestDriverWrapper;
 import org.apache.kafka.streams.errors.TopologyBuilderException;
@@ -29,6 +30,7 @@ import org.apache.kafka.streams.processor.StateStore;
 import org.apache.kafka.streams.processor.TopologyBuilder;
 import org.apache.kafka.streams.processor.internals.ProcessorTopology;
 import org.apache.kafka.streams.processor.internals.SourceNode;
+import org.apache.kafka.streams.state.KeyValueStore;
 import org.apache.kafka.streams.test.ConsumerRecordFactory;
 import org.apache.kafka.test.MockMapper;
 import org.apache.kafka.test.MockProcessorSupplier;
@@ -201,7 +203,7 @@ public class KStreamBuilderTest {
         });
 
         final KStream<String, String> merged = processedSource1.merge(processedSource2).merge(source3);
-        merged.groupByKey().count("my-table");
+        merged.groupByKey().count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("my-table"));
         final Map<String, List<String>> actual = builder.stateStoreNameToSourceTopics();
         assertEquals(Utils.mkList("topic-1", "topic-2", "topic-3"), actual.get("my-table"));
     }
@@ -316,7 +318,7 @@ public class KStreamBuilderTest {
         assertEquals(Collections.singletonList("table-topic"), builder.stateStoreNameToSourceTopics().get("table-store"));
 
         final KStream<String, String> mapped = playEvents.map(MockMapper.<String, String>selectValueKeyValueMapper());
-        mapped.leftJoin(table, MockValueJoiner.TOSTRING_JOINER).groupByKey().count("count");
+        mapped.leftJoin(table, MockValueJoiner.TOSTRING_JOINER).groupByKey().count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("count"));
         assertEquals(Collections.singletonList("table-topic"), builder.stateStoreNameToSourceTopics().get("table-store"));
         assertEquals(Collections.singletonList(APP_ID + "-KSTREAM-MAP-0000000003-repartition"), builder.stateStoreNameToSourceTopics().get("count"));
     }
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilderTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilderTest.java
index 76ca495..acb9dbb 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilderTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilderTest.java
@@ -120,7 +120,7 @@ public class InternalStreamsBuilderTest {
         });
 
         final KStream<String, String> merged = processedSource1.merge(processedSource2).merge(source3);
-        merged.groupByKey().count("my-table");
+        merged.groupByKey().count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("my-table"));
         final Map<String, List<String>> actual = builder.internalTopologyBuilder.stateStoreNameToSourceTopics();
         assertEquals(Utils.mkList("topic-1", "topic-2", "topic-3"), actual.get("my-table"));
     }
@@ -266,7 +266,7 @@ public class InternalStreamsBuilderTest {
         assertEquals(Collections.singletonList("table-topic"), builder.internalTopologyBuilder.stateStoreNameToSourceTopics().get("table-store"));
 
         final KStream<String, String> mapped = playEvents.map(MockMapper.<String, String>selectValueKeyValueMapper());
-        mapped.leftJoin(table, MockValueJoiner.TOSTRING_JOINER).groupByKey().count("count");
+        mapped.leftJoin(table, MockValueJoiner.TOSTRING_JOINER).groupByKey().count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("count"));
         assertEquals(Collections.singletonList("table-topic"), builder.internalTopologyBuilder.stateStoreNameToSourceTopics().get("table-store"));
         assertEquals(Collections.singletonList(APP_ID + "-KSTREAM-MAP-0000000003-repartition"), builder.internalTopologyBuilder.stateStoreNameToSourceTopics().get("count"));
     }
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImplTest.java
index e7a9226..66ed49f 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImplTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImplTest.java
@@ -40,7 +40,6 @@ import org.apache.kafka.streams.kstream.SessionWindows;
 import org.apache.kafka.streams.kstream.TimeWindows;
 import org.apache.kafka.streams.kstream.Windowed;
 import org.apache.kafka.streams.kstream.Windows;
-import org.apache.kafka.streams.processor.StateStoreSupplier;
 import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender;
 import org.apache.kafka.streams.state.KeyValueStore;
 import org.apache.kafka.streams.state.SessionStore;
@@ -85,125 +84,64 @@ public class KGroupedStreamImplTest {
 
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotHaveNullReducerOnReduce() {
-        groupedStream.reduce(null, "store");
+        groupedStream.reduce(null);
     }
 
-    @SuppressWarnings("deprecation")
-    @Test
-    public void shouldAllowNullStoreNameOnReduce() {
-        groupedStream.reduce(MockReducer.STRING_ADDER, (String) null);
-    }
-
-    @SuppressWarnings("deprecation")
     @Test(expected = InvalidTopicException.class)
     public void shouldNotHaveInvalidStoreNameOnReduce() {
-        groupedStream.reduce(MockReducer.STRING_ADDER, INVALID_STORE_NAME);
+        groupedStream.reduce(MockReducer.STRING_ADDER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(INVALID_STORE_NAME));
     }
 
-    @SuppressWarnings("deprecation")
-    @Test(expected = NullPointerException.class)
-    public void shouldNotHaveNullStoreSupplierOnReduce() {
-        groupedStream.reduce(MockReducer.STRING_ADDER, (StateStoreSupplier<KeyValueStore>) null);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Test(expected = NullPointerException.class)
-    public void shouldNotHaveNullStoreSupplierOnCount() {
-        groupedStream.count((StateStoreSupplier<KeyValueStore>) null);
-    }
-
-
-    @SuppressWarnings("deprecation")
-    @Test(expected = NullPointerException.class)
-    public void shouldNotHaveNullStoreSupplierOnWindowedCount() {
-        groupedStream.count(TimeWindows.of(10), (StateStoreSupplier<WindowStore>) null);
-    }
-
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotHaveNullReducerWithWindowedReduce() {
-        groupedStream.reduce(null, TimeWindows.of(10), "store");
+        groupedStream.windowedBy(TimeWindows.of(10)).reduce(null, Materialized.<String, String, WindowStore<Bytes, byte[]>>as("store"));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotHaveNullWindowsWithWindowedReduce() {
-        groupedStream.reduce(MockReducer.STRING_ADDER, (Windows) null, "store");
+        groupedStream.windowedBy((Windows) null);
     }
 
-    @SuppressWarnings("deprecation")
-    @Test
-    public void shouldAllowNullStoreNameWithWindowedReduce() {
-        groupedStream.reduce(MockReducer.STRING_ADDER, TimeWindows.of(10), (String) null);
-    }
-
-    @SuppressWarnings("deprecation")
     @Test(expected = InvalidTopicException.class)
     public void shouldNotHaveInvalidStoreNameWithWindowedReduce() {
-        groupedStream.reduce(MockReducer.STRING_ADDER, TimeWindows.of(10), INVALID_STORE_NAME);
+        groupedStream.windowedBy(TimeWindows.of(10)).reduce(MockReducer.STRING_ADDER, Materialized.<String, String, WindowStore<Bytes, byte[]>>as(INVALID_STORE_NAME));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotHaveNullInitializerOnAggregate() {
-        groupedStream.aggregate(null, MockAggregator.TOSTRING_ADDER, Serdes.String(), "store");
+        groupedStream.aggregate(null, MockAggregator.TOSTRING_ADDER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("store"));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotHaveNullAdderOnAggregate() {
-        groupedStream.aggregate(MockInitializer.STRING_INIT, null, Serdes.String(), "store");
-    }
-
-    @SuppressWarnings("deprecation")
-    @Test
-    public void shouldAllowNullStoreNameOnAggregate() {
-        groupedStream.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, Serdes.String(), null);
+        groupedStream.aggregate(MockInitializer.STRING_INIT, null, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("store"));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = InvalidTopicException.class)
     public void shouldNotHaveInvalidStoreNameOnAggregate() {
-        groupedStream.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, Serdes.String(), INVALID_STORE_NAME);
+        groupedStream.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(INVALID_STORE_NAME));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotHaveNullInitializerOnWindowedAggregate() {
-        groupedStream.aggregate(null, MockAggregator.TOSTRING_ADDER, TimeWindows.of(10), Serdes.String(), "store");
+        groupedStream.windowedBy(TimeWindows.of(10)).aggregate(null, MockAggregator.TOSTRING_ADDER, Materialized.<String, String, WindowStore<Bytes, byte[]>>as("store"));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotHaveNullAdderOnWindowedAggregate() {
-        groupedStream.aggregate(MockInitializer.STRING_INIT, null, TimeWindows.of(10), Serdes.String(), "store");
+        groupedStream.windowedBy(TimeWindows.of(10)).aggregate(MockInitializer.STRING_INIT, null, Materialized.<String, String, WindowStore<Bytes, byte[]>>as("store"));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotHaveNullWindowsOnWindowedAggregate() {
-        groupedStream.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, null, Serdes.String(), "store");
-    }
-
-    @SuppressWarnings("deprecation")
-    @Test
-    public void shouldAllowNullStoreNameOnWindowedAggregate() {
-        groupedStream.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, TimeWindows.of(10), Serdes.String(), null);
+        groupedStream.windowedBy((Windows) null);
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = InvalidTopicException.class)
     public void shouldNotHaveInvalidStoreNameOnWindowedAggregate() {
-        groupedStream.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, TimeWindows.of(10), Serdes.String(), INVALID_STORE_NAME);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Test(expected = NullPointerException.class)
-    public void shouldNotHaveNullStoreSupplierOnWindowedAggregate() {
-        groupedStream.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, TimeWindows.of(10), (StateStoreSupplier<WindowStore>) null);
+        groupedStream.windowedBy(TimeWindows.of(10)).aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, Materialized.<String, String, WindowStore<Bytes, byte[]>>as(INVALID_STORE_NAME));
     }
 
     private void doAggregateSessionWindows(final Map<Windowed<String>, Integer> results) {
@@ -220,11 +158,10 @@ public class KGroupedStreamImplTest {
         assertEquals(Integer.valueOf(3), results.get(new Windowed<>("1", new SessionWindow(70, 100))));
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldAggregateSessionWindows() {
         final Map<Windowed<String>, Integer> results = new HashMap<>();
-        final KTable<Windowed<String>, Integer> table = groupedStream.aggregate(new Initializer<Integer>() {
+        final KTable<Windowed<String>, Integer> table = groupedStream.windowedBy(SessionWindows.with(30)).aggregate(new Initializer<Integer>() {
             @Override
             public Integer apply() {
                 return 0;
@@ -239,7 +176,7 @@ public class KGroupedStreamImplTest {
             public Integer apply(final String aggKey, final Integer aggOne, final Integer aggTwo) {
                 return aggOne + aggTwo;
             }
-        }, SessionWindows.with(30), Serdes.Integer(), "session-store");
+        }, Materialized.<String, Integer, SessionStore<Bytes, byte[]>>as("session-store").withValueSerde(Serdes.Integer()));
         table.toStream().foreach(new ForeachAction<Windowed<String>, Integer>() {
             @Override
             public void apply(final Windowed<String> key, final Integer value) {
@@ -251,11 +188,10 @@ public class KGroupedStreamImplTest {
         assertEquals(table.queryableStoreName(), "session-store");
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldAggregateSessionWindowsWithInternalStoreName() {
         final Map<Windowed<String>, Integer> results = new HashMap<>();
-        final KTable<Windowed<String>, Integer> table = groupedStream.aggregate(new Initializer<Integer>() {
+        final KTable<Windowed<String>, Integer> table = groupedStream.windowedBy(SessionWindows.with(30)).aggregate(new Initializer<Integer>() {
             @Override
             public Integer apply() {
                 return 0;
@@ -270,7 +206,7 @@ public class KGroupedStreamImplTest {
             public Integer apply(final String aggKey, final Integer aggOne, final Integer aggTwo) {
                 return aggOne + aggTwo;
             }
-        }, SessionWindows.with(30), Serdes.Integer());
+        }, Materialized.<String, Integer, SessionStore<Bytes, byte[]>>with(null, Serdes.Integer()));
         table.toStream().foreach(new ForeachAction<Windowed<String>, Integer>() {
             @Override
             public void apply(final Windowed<String> key, final Integer value) {
@@ -295,11 +231,11 @@ public class KGroupedStreamImplTest {
         assertEquals(Long.valueOf(3), results.get(new Windowed<>("1", new SessionWindow(70, 100))));
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldCountSessionWindows() {
         final Map<Windowed<String>, Long> results = new HashMap<>();
-        final KTable<Windowed<String>, Long> table = groupedStream.count(SessionWindows.with(30), "session-store");
+        final KTable<Windowed<String>, Long> table = groupedStream.windowedBy(SessionWindows.with(30))
+                .count(Materialized.<String, Long, SessionStore<Bytes, byte[]>>as("session-store"));
         table.toStream().foreach(new ForeachAction<Windowed<String>, Long>() {
             @Override
             public void apply(final Windowed<String> key, final Long value) {
@@ -310,11 +246,10 @@ public class KGroupedStreamImplTest {
         assertEquals(table.queryableStoreName(), "session-store");
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldCountSessionWindowsWithInternalStoreName() {
         final Map<Windowed<String>, Long> results = new HashMap<>();
-        final KTable<Windowed<String>, Long> table = groupedStream.count(SessionWindows.with(30));
+        final KTable<Windowed<String>, Long> table = groupedStream.windowedBy(SessionWindows.with(30)).count();
         table.toStream().foreach(new ForeachAction<Windowed<String>, Long>() {
             @Override
             public void apply(final Windowed<String> key, final Long value) {
@@ -339,20 +274,16 @@ public class KGroupedStreamImplTest {
         assertEquals("A:B:C", results.get(new Windowed<>("1", new SessionWindow(70, 100))));
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldReduceSessionWindows() {
         final Map<Windowed<String>, String> results = new HashMap<>();
-        final KTable<Windowed<String>, String> table = groupedStream.reduce(
-            new Reducer<String>() {
-                @Override
-                public String apply(final String value1, final String value2) {
-                    return value1 + ":" + value2;
-                }
-            },
-            SessionWindows.with(30),
-            "session-store"
-        );
+        final KTable<Windowed<String>, String> table = groupedStream.windowedBy(SessionWindows.with(30))
+                .reduce(new Reducer<String>() {
+                    @Override
+                    public String apply(final String value1, final String value2) {
+                        return value1 + ":" + value2;
+                    }
+                }, Materialized.<String, String, SessionStore<Bytes, byte[]>>as("session-store"));
         table.toStream().foreach(new ForeachAction<Windowed<String>, String>() {
             @Override
             public void apply(final Windowed<String> key, final String value) {
@@ -363,19 +294,16 @@ public class KGroupedStreamImplTest {
         assertEquals(table.queryableStoreName(), "session-store");
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldReduceSessionWindowsWithInternalStoreName() {
         final Map<Windowed<String>, String> results = new HashMap<>();
-        final KTable<Windowed<String>, String> table = groupedStream.reduce(
-            new Reducer<String>() {
-                @Override
-                public String apply(final String value1, final String value2) {
-                    return value1 + ":" + value2;
-                }
-            },
-            SessionWindows.with(30)
-        );
+        final KTable<Windowed<String>, String> table = groupedStream.windowedBy(SessionWindows.with(30))
+                .reduce(new Reducer<String>() {
+                    @Override
+                    public String apply(final String value1, final String value2) {
+                        return value1 + ":" + value2;
+                    }
+                });
         table.toStream().foreach(new ForeachAction<Windowed<String>, String>() {
             @Override
             public void apply(final Windowed<String> key, final String value) {
@@ -386,136 +314,78 @@ public class KGroupedStreamImplTest {
         assertNull(table.queryableStoreName());
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotAcceptNullReducerWhenReducingSessionWindows() {
-        groupedStream.reduce(null, SessionWindows.with(10), "store");
+        groupedStream.windowedBy(SessionWindows.with(30)).reduce(null, Materialized.<String, String, SessionStore<Bytes, byte[]>>as("store"));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotAcceptNullSessionWindowsReducingSessionWindows() {
-        groupedStream.reduce(MockReducer.STRING_ADDER, (SessionWindows) null, "store");
-    }
-
-    @SuppressWarnings("deprecation")
-    @Test
-    public void shouldAcceptNullStoreNameWhenReducingSessionWindows() {
-        groupedStream.reduce(MockReducer.STRING_ADDER, SessionWindows.with(10), (String) null);
+        groupedStream.windowedBy((SessionWindows) null);
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = InvalidTopicException.class)
     public void shouldNotAcceptInvalidStoreNameWhenReducingSessionWindows() {
-        groupedStream.reduce(MockReducer.STRING_ADDER, SessionWindows.with(10), INVALID_STORE_NAME);
+        groupedStream.windowedBy(SessionWindows.with(30)).reduce(MockReducer.STRING_ADDER, Materialized.<String, String, SessionStore<Bytes, byte[]>>as(INVALID_STORE_NAME));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotAcceptNullStateStoreSupplierWhenReducingSessionWindows() {
-        groupedStream.reduce(MockReducer.STRING_ADDER, SessionWindows.with(10), (StateStoreSupplier<SessionStore>) null);
+        groupedStream.windowedBy(SessionWindows.with(30)).reduce(null, Materialized.<String, String, SessionStore<Bytes, byte[]>>as(null));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotAcceptNullInitializerWhenAggregatingSessionWindows() {
-        groupedStream.aggregate(null, MockAggregator.TOSTRING_ADDER, new Merger<String, String>() {
+        groupedStream.windowedBy(SessionWindows.with(30)).aggregate(null, MockAggregator.TOSTRING_ADDER, new Merger<String, String>() {
             @Override
             public String apply(final String aggKey, final String aggOne, final String aggTwo) {
                 return null;
             }
-        }, SessionWindows.with(10), Serdes.String(), "storeName");
+        }, Materialized.<String, String, SessionStore<Bytes, byte[]>>as("storeName"));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotAcceptNullAggregatorWhenAggregatingSessionWindows() {
-        groupedStream.aggregate(MockInitializer.STRING_INIT, null, new Merger<String, String>() {
+        groupedStream.windowedBy(SessionWindows.with(30)).aggregate(MockInitializer.STRING_INIT, null, new Merger<String, String>() {
             @Override
             public String apply(final String aggKey, final String aggOne, final String aggTwo) {
                 return null;
             }
-        }, SessionWindows.with(10), Serdes.String(), "storeName");
+        }, Materialized.<String, String, SessionStore<Bytes, byte[]>>as("storeName"));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotAcceptNullSessionMergerWhenAggregatingSessionWindows() {
-        groupedStream.aggregate(
-            MockInitializer.STRING_INIT,
-            MockAggregator.TOSTRING_ADDER,
-            null,
-            SessionWindows.with(10),
-            Serdes.String(),
-            "storeName");
+        groupedStream.windowedBy(SessionWindows.with(30)).aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER,
+                null,
+                Materialized.<String, String, SessionStore<Bytes, byte[]>>as("storeName"));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = NullPointerException.class)
     public void shouldNotAcceptNullSessionWindowsWhenAggregatingSessionWindows() {
-        groupedStream.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, new Merger<String, String>() {
-            @Override
-            public String apply(final String aggKey, final String aggOne, final String aggTwo) {
-                return null;
-            }
-        }, null, Serdes.String(), "storeName");
+        groupedStream.windowedBy((SessionWindows) null);
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldAcceptNullStoreNameWhenAggregatingSessionWindows() {
-        groupedStream.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, new Merger<String, String>() {
-            @Override
-            public String apply(final String aggKey, final String aggOne, final String aggTwo) {
-                return null;
-            }
-        }, SessionWindows.with(10), Serdes.String(), (String) null);
+        groupedStream.windowedBy(SessionWindows.with(10))
+                .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, new Merger<String, String>() {
+                    @Override
+                    public String apply(final String aggKey, final String aggOne, final String aggTwo) {
+                        return null;
+                    }
+                }, Materialized.<String, String, SessionStore<Bytes, byte[]>>with(Serdes.String(), Serdes.String()));
     }
 
-    @SuppressWarnings("deprecation")
     @Test(expected = InvalidTopicException.class)
     public void shouldNotAcceptInvalidStoreNameWhenAggregatingSessionWindows() {
-        groupedStream.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, new Merger<String, String>() {
-            @Override
-            public String apply(final String aggKey, final String aggOne, final String aggTwo) {
-                return null;
-            }
-        }, SessionWindows.with(10), Serdes.String(), INVALID_STORE_NAME);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Test(expected = NullPointerException.class)
-    public void shouldNotAcceptNullStateStoreSupplierNameWhenAggregatingSessionWindows() {
-        groupedStream.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, new Merger<String, String>() {
-            @Override
-            public String apply(final String aggKey, final String aggOne, final String aggTwo) {
-                return null;
-            }
-        }, SessionWindows.with(10), Serdes.String(), (StateStoreSupplier<SessionStore>) null);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Test(expected = NullPointerException.class)
-    public void shouldNotAcceptNullSessionWindowsWhenCountingSessionWindows() {
-        groupedStream.count((SessionWindows) null, "store");
-    }
-
-    @SuppressWarnings("deprecation")
-    @Test
-    public void shouldAcceptNullStoreNameWhenCountingSessionWindows() {
-        groupedStream.count(SessionWindows.with(90), (String) null);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Test(expected = InvalidTopicException.class)
-    public void shouldNotAcceptInvalidStoreNameWhenCountingSessionWindows() {
-        groupedStream.count(SessionWindows.with(90), INVALID_STORE_NAME);
-    }
-
-    @SuppressWarnings("deprecation")
-    @Test(expected = NullPointerException.class)
-    public void shouldNotAcceptNullStateStoreSupplierWhenCountingSessionWindows() {
-        groupedStream.count(SessionWindows.with(90), (StateStoreSupplier<SessionStore>) null);
+        groupedStream.windowedBy(SessionWindows.with(10))
+                .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, new Merger<String, String>() {
+                    @Override
+                    public String apply(final String aggKey, final String aggOne, final String aggTwo) {
+                        return null;
+                    }
+                }, Materialized.<String, String, SessionStore<Bytes, byte[]>>as(INVALID_STORE_NAME));
     }
 
     @SuppressWarnings("unchecked")
@@ -685,13 +555,10 @@ public class KGroupedStreamImplTest {
         )));
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldCountWindowed() {
         final List<KeyValue<Windowed<String>, Long>> results = new ArrayList<>();
-        groupedStream.count(
-            TimeWindows.of(500L),
-            "aggregate-by-key-windowed")
+        groupedStream.windowedBy(TimeWindows.of(500L)).count(Materialized.<String, Long, WindowStore<Bytes, byte[]>>as("aggregate-by-key-windowed"))
             .toStream()
             .foreach(new ForeachAction<Windowed<String>, Long>() {
                 @Override
@@ -703,12 +570,10 @@ public class KGroupedStreamImplTest {
         doCountWindowed(results);
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void shouldCountWindowedWithInternalStoreName() {
         final List<KeyValue<Windowed<String>, Long>> results = new ArrayList<>();
-        groupedStream.count(
-            TimeWindows.of(500L))
+        groupedStream.windowedBy(TimeWindows.of(500L)).count()
             .toStream()
             .foreach(new ForeachAction<Windowed<String>, Long>() {
                 @Override
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedTableImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedTableImplTest.java
index b614732..247f631 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedTableImplTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedTableImplTest.java
@@ -31,7 +31,6 @@ import org.apache.kafka.streams.kstream.KTable;
 import org.apache.kafka.streams.kstream.KeyValueMapper;
 import org.apache.kafka.streams.kstream.Materialized;
 import org.apache.kafka.streams.kstream.Serialized;
-import org.apache.kafka.streams.processor.StateStoreSupplier;
 import org.apache.kafka.streams.state.KeyValueStore;
 import org.apache.kafka.streams.test.ConsumerRecordFactory;
 import org.apache.kafka.test.MockAggregator;
@@ -52,7 +51,6 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 
 
-@SuppressWarnings("deprecation")
 public class KGroupedTableImplTest {
 
     private final StreamsBuilder builder = new StreamsBuilder();
@@ -67,59 +65,39 @@ public class KGroupedTableImplTest {
                 .groupBy(MockMapper.<String, String>selectValueKeyValueMapper());
     }
 
-    @Test
-    public void shouldAllowNullStoreNameOnCount()  {
-        groupedTable.count((String) null);
-    }
-
-    @Test
-    public void shouldAllowNullStoreNameOnAggregate() {
-        groupedTable.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, MockAggregator.TOSTRING_REMOVER, (String) null);
-    }
-
     @Test(expected = InvalidTopicException.class)
     public void shouldNotAllowInvalidStoreNameOnAggregate() {
-        groupedTable.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, MockAggregator.TOSTRING_REMOVER, INVALID_STORE_NAME);
+        groupedTable.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, MockAggregator.TOSTRING_REMOVER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(INVALID_STORE_NAME));
     }
 
     @Test(expected = NullPointerException.class)
     public void shouldNotAllowNullInitializerOnAggregate() {
-        groupedTable.aggregate(null, MockAggregator.TOSTRING_ADDER, MockAggregator.TOSTRING_REMOVER, "store");
+        groupedTable.aggregate(null, MockAggregator.TOSTRING_ADDER, MockAggregator.TOSTRING_REMOVER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("store"));
     }
 
     @Test(expected = NullPointerException.class)
     public void shouldNotAllowNullAdderOnAggregate() {
-        groupedTable.aggregate(MockInitializer.STRING_INIT, null, MockAggregator.TOSTRING_REMOVER, "store");
+        groupedTable.aggregate(MockInitializer.STRING_INIT, null, MockAggregator.TOSTRING_REMOVER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("store"));
     }
 
     @Test(expected = NullPointerException.class)
     public void shouldNotAllowNullSubtractorOnAggregate() {
-        groupedTable.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, null, "store");
+        groupedTable.aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, null, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("store"));
     }
 
     @Test(expected = NullPointerException.class)
     public void shouldNotAllowNullAdderOnReduce() {
-        groupedTable.reduce(null, MockReducer.STRING_REMOVER, "store");
+        groupedTable.reduce(null, MockReducer.STRING_REMOVER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("store"));
     }
 
     @Test(expected = NullPointerException.class)
     public void shouldNotAllowNullSubtractorOnReduce() {
-        groupedTable.reduce(MockReducer.STRING_ADDER, null, "store");
-    }
-
-    @Test
-    public void shouldAllowNullStoreNameOnReduce() {
-        groupedTable.reduce(MockReducer.STRING_ADDER, MockReducer.STRING_REMOVER, (String) null);
+        groupedTable.reduce(MockReducer.STRING_ADDER, null, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("store"));
     }
 
     @Test(expected = InvalidTopicException.class)
     public void shouldNotAllowInvalidStoreNameOnReduce() {
-        groupedTable.reduce(MockReducer.STRING_ADDER, MockReducer.STRING_REMOVER, INVALID_STORE_NAME);
-    }
-
-    @Test(expected = NullPointerException.class)
-    public void shouldNotAllowNullStoreSupplierOnReduce() {
-        groupedTable.reduce(MockReducer.STRING_ADDER, MockReducer.STRING_REMOVER, (StateStoreSupplier<KeyValueStore>) null);
+        groupedTable.reduce(MockReducer.STRING_ADDER, MockReducer.STRING_REMOVER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(INVALID_STORE_NAME));
     }
 
     private Map<String, Integer> getReducedResults(final KTable<String, Integer> inputKTable) {
@@ -165,7 +143,7 @@ public class KGroupedTableImplTest {
                                                                       .withKeySerde(Serdes.String())
                                                                       .withValueSerde(Serdes.Double()))
             .groupBy(intProjection)
-            .reduce(MockReducer.INTEGER_ADDER, MockReducer.INTEGER_SUBTRACTOR, "reduced");
+            .reduce(MockReducer.INTEGER_ADDER, MockReducer.INTEGER_SUBTRACTOR, Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as("reduced"));
 
         final Map<String, Integer> results = getReducedResults(reduced);
         try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java
index 49e8aaa..c6ee70f 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java
@@ -24,7 +24,6 @@ import org.apache.kafka.streams.KeyValue;
 import org.apache.kafka.streams.StreamsBuilder;
 import org.apache.kafka.streams.StreamsBuilderTest;
 import org.apache.kafka.streams.TopologyTestDriver;
-import org.apache.kafka.streams.errors.TopologyException;
 import org.apache.kafka.streams.kstream.GlobalKTable;
 import org.apache.kafka.streams.kstream.JoinWindows;
 import org.apache.kafka.streams.kstream.Joined;
@@ -243,7 +242,7 @@ public class KStreamImplTest {
                     Joined.with(Serdes.String(),
                                 Serdes.String(),
                                 Serdes.String()))
-                .to(Serdes.String(), Serdes.String(), "output-topic");
+                .to("output-topic", Produced.with(Serdes.String(), Serdes.String()));
 
         ProcessorTopology processorTopology = builder.setApplicationId("X").build(null);
         SourceNode originalSourceNode = processorTopology.source("topic-1");
@@ -260,8 +259,10 @@ public class KStreamImplTest {
     @Test
     public void testToWithNullValueSerdeDoesntNPE() {
         final StreamsBuilder builder = new StreamsBuilder();
-        final KStream<String, String> inputStream = builder.stream(Collections.singleton("input"), stringConsumed);
-        inputStream.to(Serdes.String(), null, "output");
+        final Consumed<String, String> consumed = Consumed.with(Serdes.String(), Serdes.String());
+        final KStream<String, String> inputStream = builder.stream(Collections.singleton("input"), consumed);
+
+        inputStream.to("output", Produced.with(Serdes.String(), Serdes.String()));
     }
 
     @Test(expected = NullPointerException.class)
@@ -295,16 +296,6 @@ public class KStreamImplTest {
     }
 
     @Test(expected = NullPointerException.class)
-    public void shouldNotAllowNullFilePathOnWriteAsText() {
-        testStream.writeAsText(null);
-    }
-
-    @Test(expected = TopologyException.class)
-    public void shouldNotAllowEmptyFilePathOnWriteAsText() {
-        testStream.writeAsText("\t    \t");
-    }
-
-    @Test(expected = NullPointerException.class)
     public void shouldNotAllowNullMapperOnFlatMap() {
         testStream.flatMap(null);
     }
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java
index 7a2a8e0..91ffca8 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java
@@ -61,7 +61,8 @@ public class KStreamWindowAggregateTest {
         final KTable<Windowed<String>, String> table2 = builder
             .stream(topic1, Consumed.with(Serdes.String(), Serdes.String()))
             .groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
-            .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, TimeWindows.of(10).advanceBy(5), Serdes.String(), "topic1-Canonized");
+            .windowedBy(TimeWindows.of(10).advanceBy(5))
+            .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, Materialized.<String, String, WindowStore<Bytes, byte[]>>as("topic1-Canonized").withValueSerde(Serdes.String()));
 
         final MockProcessorSupplier<Windowed<String>, String> supplier = new MockProcessorSupplier<>();
         table2.toStream().process(supplier);
@@ -85,7 +86,6 @@ public class KStreamWindowAggregateTest {
             driver.pipeInput(recordFactory.create(topic1, "C", "3", 14L));
         }
 
-
         assertEquals(
             Utils.mkList(
                 "[A@0/10]:0+1",
@@ -119,14 +119,17 @@ public class KStreamWindowAggregateTest {
         final KTable<Windowed<String>, String> table1 = builder
             .stream(topic1, Consumed.with(Serdes.String(), Serdes.String()))
             .groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
-            .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, TimeWindows.of(10).advanceBy(5), Serdes.String(), "topic1-Canonized");
+            .windowedBy(TimeWindows.of(10).advanceBy(5))
+            .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, Materialized.<String, String, WindowStore<Bytes, byte[]>>as("topic1-Canonized").withValueSerde(Serdes.String()));
 
         final MockProcessorSupplier<Windowed<String>, String> supplier = new MockProcessorSupplier<>();
         table1.toStream().process(supplier);
 
         final KTable<Windowed<String>, String> table2 = builder
-            .stream(topic2, Consumed.with(Serdes.String(), Serdes.String())).groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
-            .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, TimeWindows.of(10).advanceBy(5), Serdes.String(), "topic2-Canonized");
+            .stream(topic2, Consumed.with(Serdes.String(), Serdes.String()))
+            .groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
+            .windowedBy(TimeWindows.of(10).advanceBy(5))
+            .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, Materialized.<String, String, WindowStore<Bytes, byte[]>>as("topic2-Canonized").withValueSerde(Serdes.String()));
 
         table2.toStream().process(supplier);
 
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableAggregateTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableAggregateTest.java
index a769b49..121017e 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableAggregateTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableAggregateTest.java
@@ -18,6 +18,7 @@ package org.apache.kafka.streams.kstream.internals;
 
 import org.apache.kafka.common.serialization.Serde;
 import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.common.utils.Utils;
 import org.apache.kafka.streams.Consumed;
 import org.apache.kafka.streams.KeyValue;
@@ -28,10 +29,12 @@ import org.apache.kafka.streams.kstream.ForeachAction;
 import org.apache.kafka.streams.kstream.Initializer;
 import org.apache.kafka.streams.kstream.KTable;
 import org.apache.kafka.streams.kstream.KeyValueMapper;
+import org.apache.kafka.streams.kstream.Materialized;
 import org.apache.kafka.streams.kstream.Reducer;
 import org.apache.kafka.streams.kstream.Serialized;
 import org.apache.kafka.streams.kstream.ValueJoiner;
 import org.apache.kafka.streams.kstream.ValueMapper;
+import org.apache.kafka.streams.state.KeyValueStore;
 import org.apache.kafka.test.KStreamTestDriver;
 import org.apache.kafka.test.MockAggregator;
 import org.apache.kafka.test.MockInitializer;
@@ -80,8 +83,7 @@ public class KTableAggregateTest {
         ).aggregate(MockInitializer.STRING_INIT,
                 MockAggregator.TOSTRING_ADDER,
                 MockAggregator.TOSTRING_REMOVER,
-                stringSerde,
-                "topic1-Canonized");
+                Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("topic1-Canonized").withValueSerde(stringSerde));
 
         table2.toStream().process(supplier);
 
@@ -127,8 +129,7 @@ public class KTableAggregateTest {
         ).aggregate(MockInitializer.STRING_INIT,
             MockAggregator.TOSTRING_ADDER,
             MockAggregator.TOSTRING_REMOVER,
-            stringSerde,
-            "topic1-Canonized");
+            Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("topic1-Canonized").withValueSerde(stringSerde));
 
         table2.toStream().process(supplier);
 
@@ -167,8 +168,7 @@ public class KTableAggregateTest {
                 .aggregate(MockInitializer.STRING_INIT,
                 MockAggregator.TOSTRING_ADDER,
                 MockAggregator.TOSTRING_REMOVER,
-                stringSerde,
-                "topic1-Canonized");
+                Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("topic1-Canonized").withValueSerde(stringSerde));
 
         table2.toStream().process(supplier);
 
@@ -235,7 +235,7 @@ public class KTableAggregateTest {
 
         builder.table(input, consumed)
                 .groupBy(MockMapper.<String, String>selectValueKeyValueMapper(), stringSerialzied)
-                .count("count")
+                .count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("count"))
                 .toStream()
                 .process(supplier);
 
@@ -264,7 +264,7 @@ public class KTableAggregateTest {
 
         builder.table(input, consumed)
             .groupBy(MockMapper.<String, String>selectValueKeyValueMapper(), stringSerialzied)
-            .count("count")
+            .count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("count"))
             .toStream()
             .process(supplier);
 
@@ -319,7 +319,7 @@ public class KTableAggregateTest {
                     public String apply(String key, String value, String aggregate) {
                         return aggregate.replaceAll(value, "");
                     }
-                }, Serdes.String(), "someStore")
+                }, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("someStore").withValueSerde(Serdes.String()))
                 .toStream()
                 .process(supplier);
 
@@ -372,7 +372,7 @@ public class KTableAggregateTest {
                     public Long apply(final Long value1, final Long value2) {
                         return value1 - value2;
                     }
-                }, "reducer-store");
+                }, Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("reducer-store"));
 
         reduce.toStream().foreach(new ForeachAction<String, Long>() {
             @Override
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableFilterTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableFilterTest.java
index 2eecbc1..3cf11bb 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableFilterTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableFilterTest.java
@@ -105,7 +105,6 @@ public class KTableFilterTest {
         doTestKTable(builder, table2, table3, topic1);
     }
 
-    @SuppressWarnings("deprecation")
     @Test
     public void testQueryableKTable() {
         final StreamsBuilder builder = new StreamsBuilder();
@@ -119,30 +118,6 @@ public class KTableFilterTest {
             public boolean test(String key, Integer value) {
                 return (value % 2) == 0;
             }
-        }, "anyStoreNameFilter");
-        KTable<String, Integer> table3 = table1.filterNot(new Predicate<String, Integer>() {
-            @Override
-            public boolean test(String key, Integer value) {
-                return (value % 2) == 0;
-            }
-        });
-
-        doTestKTable(builder, table2, table3, topic1);
-    }
-
-    @Test
-    public void shouldAddQueryableStore() {
-        final StreamsBuilder builder = new StreamsBuilder();
-
-        final String topic1 = "topic1";
-
-        KTable<String, Integer> table1 = builder.table(topic1, consumed);
-
-        KTable<String, Integer> table2 = table1.filter(new Predicate<String, Integer>() {
-            @Override
-            public boolean test(String key, Integer value) {
-                return (value % 2) == 0;
-            }
         }, Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as("anyStoreNameFilter"));
         KTable<String, Integer> table3 = table1.filterNot(new Predicate<String, Integer>() {
             @Override
@@ -151,6 +126,9 @@ public class KTableFilterTest {
             }
         });
 
+        assertEquals("anyStoreNameFilter", table2.queryableStoreName());
+        assertNull(table3.queryableStoreName());
+
         doTestKTable(builder, table2, table3, topic1);
     }
 
@@ -257,7 +235,7 @@ public class KTableFilterTest {
                 public boolean test(String key, Integer value) {
                     return (value % 2) == 0;
                 }
-            }, "anyStoreNameFilter");
+            }, Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as("anyStoreNameFilter"));
         KTableImpl<String, Integer, Integer> table3 = (KTableImpl<String, Integer, Integer>) table1.filterNot(
             new Predicate<String, Integer>() {
                 @Override
@@ -266,6 +244,9 @@ public class KTableFilterTest {
                 }
             });
 
+        assertEquals("anyStoreNameFilter", table2.queryableStoreName());
+        assertNull(table3.queryableStoreName());
+
         doTestValueGetter(builder, table2, table3, topic1);
     }
 
@@ -342,7 +323,7 @@ public class KTableFilterTest {
                 public boolean test(String key, Integer value) {
                     return (value % 2) == 0;
                 }
-            }, "anyStoreNameFilter");
+            }, Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as("anyStoreNameFilter"));
 
         doTestNotSendingOldValue(builder, table1, table2, topic1);
     }
@@ -422,7 +403,7 @@ public class KTableFilterTest {
                 public boolean test(String key, Integer value) {
                     return (value % 2) == 0;
                 }
-            }, "anyStoreNameFilter");
+            }, Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as("anyStoreNameFilter"));
 
         doTestSendingOldValue(builder, table1, table2, topic1);
     }
@@ -466,7 +447,7 @@ public class KTableFilterTest {
                     return value.equalsIgnoreCase("accept");
                 }
             }).groupBy(MockMapper.<String, String>noOpKeyValueMapper())
-            .reduce(MockReducer.STRING_ADDER, MockReducer.STRING_REMOVER, "mock-result");
+            .reduce(MockReducer.STRING_ADDER, MockReducer.STRING_REMOVER);
 
         doTestSkipNullOnMaterialization(builder, table1, table2, topic1);
     }
@@ -487,8 +468,8 @@ public class KTableFilterTest {
                 public boolean test(String key, String value) {
                     return value.equalsIgnoreCase("accept");
                 }
-            }, "anyStoreNameFilter").groupBy(MockMapper.<String, String>noOpKeyValueMapper())
-            .reduce(MockReducer.STRING_ADDER, MockReducer.STRING_REMOVER, "mock-result");
+            }, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("anyStoreNameFilter")).groupBy(MockMapper.<String, String>noOpKeyValueMapper())
+            .reduce(MockReducer.STRING_ADDER, MockReducer.STRING_REMOVER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("mock-result"));
 
         doTestSkipNullOnMaterialization(builder, table1, table2, topic1);
     }
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableImplTest.java
index fcdd0a7..351bc32 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableImplTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableImplTest.java
@@ -29,7 +29,6 @@ import org.apache.kafka.streams.kstream.Produced;
 import org.apache.kafka.streams.kstream.ValueJoiner;
 import org.apache.kafka.streams.kstream.ValueMapper;
 import org.apache.kafka.streams.kstream.ValueMapperWithKey;
-import org.apache.kafka.streams.processor.StateStoreSupplier;
 import org.apache.kafka.streams.processor.internals.SinkNode;
 import org.apache.kafka.streams.processor.internals.SourceNode;
 import org.apache.kafka.streams.state.KeyValueStore;
@@ -345,11 +344,11 @@ public class KTableImplTest {
                 );
 
         table1.groupBy(MockMapper.<String, String>noOpKeyValueMapper())
-            .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, MockAggregator.TOSTRING_REMOVER, "mock-result1");
+            .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, MockAggregator.TOSTRING_REMOVER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("mock-result1"));
 
 
         table1.groupBy(MockMapper.<String, String>noOpKeyValueMapper())
-            .reduce(MockReducer.STRING_ADDER, MockReducer.STRING_REMOVER, "mock-result2");
+            .reduce(MockReducer.STRING_ADDER, MockReducer.STRING_REMOVER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("mock-result2"));
 
         driver.setUp(builder, stateDir, stringSerde, stringSerde);
         driver.setTime(0L);
@@ -411,24 +410,7 @@ public class KTableImplTest {
 
     @Test
     public void shouldAllowNullStoreInJoin() {
-        table.join(table, MockValueJoiner.TOSTRING_JOINER, null, null);
-    }
-
-    @SuppressWarnings("unchecked")
-    @Test(expected = NullPointerException.class)
-    public void shouldNotAllowNullStoreSupplierInJoin() {
-        table.join(table, MockValueJoiner.TOSTRING_JOINER, (StateStoreSupplier) null);
-    }
-
-    @SuppressWarnings("unchecked")
-    @Test(expected = NullPointerException.class)
-    public void shouldNotAllowNullStoreSupplierInLeftJoin() {
-        table.leftJoin(table, MockValueJoiner.TOSTRING_JOINER, (StateStoreSupplier) null);
-    }
-
-    @Test(expected = NullPointerException.class)
-    public void shouldNotAllowNullStoreSupplierInOuterJoin() {
-        table.outerJoin(table, MockValueJoiner.TOSTRING_JOINER, (StateStoreSupplier) null);
+        table.join(table, MockValueJoiner.TOSTRING_JOINER);
     }
 
     @Test(expected = NullPointerException.class)
@@ -463,7 +445,7 @@ public class KTableImplTest {
             public boolean test(final String key, final String value) {
                 return false;
             }
-        }, (Materialized<String, String, KeyValueStore<Bytes, byte[]>>) null);
+        }, (Materialized) null);
     }
 
     @Test(expected = NullPointerException.class)
@@ -473,7 +455,7 @@ public class KTableImplTest {
             public boolean test(final String key, final String value) {
                 return false;
             }
-        }, (Materialized<String, String, KeyValueStore<Bytes, byte[]>>) null);
+        }, (Materialized) null);
     }
 
     @Test(expected = NullPointerException.class)
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java
index 0ca388f..7ed8b6a 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java
@@ -18,11 +18,14 @@ package org.apache.kafka.streams.kstream.internals;
 
 import org.apache.kafka.common.serialization.Serde;
 import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.streams.Consumed;
 import org.apache.kafka.streams.KeyValue;
 import org.apache.kafka.streams.StreamsBuilder;
 import org.apache.kafka.streams.StreamsBuilderTest;
 import org.apache.kafka.streams.kstream.KTable;
+import org.apache.kafka.streams.kstream.Materialized;
+import org.apache.kafka.streams.state.KeyValueStore;
 import org.apache.kafka.streams.processor.MockProcessorContext;
 import org.apache.kafka.streams.processor.Processor;
 import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender;
@@ -51,12 +54,14 @@ import static org.junit.Assert.assertTrue;
 
 public class KTableKTableInnerJoinTest {
 
-    final private String topic1 = "topic1";
-    final private String topic2 = "topic2";
+    private final String topic1 = "topic1";
+    private final String topic2 = "topic2";
 
-    final private Serde<Integer> intSerde = Serdes.Integer();
-    final private Serde<String> stringSerde = Serdes.String();
+    private final Serde<Integer> intSerde = Serdes.Integer();
+    private final Serde<String> stringSerde = Serdes.String();
     private final Consumed<Integer, String> consumed = Consumed.with(intSerde, stringSerde);
+    private final Materialized<Integer, String, KeyValueStore<Bytes, byte[]>> materialized = Materialized.with(intSerde, stringSerde);
+
     private File stateDir = null;
     @Rule
     public final KStreamTestDriver driver = new KStreamTestDriver();
@@ -188,16 +193,16 @@ public class KTableKTableInnerJoinTest {
 
         final KTable<Integer, String> table1;
         final KTable<Integer, String> table2;
-        final KTable<Integer, String> joined;
+        final KTable<Integer, String> table3;
         final MockProcessorSupplier<Integer, String> processor;
 
         processor = new MockProcessorSupplier<>();
         table1 = builder.table(topic1, consumed);
         table2 = builder.table(topic2, consumed);
-        joined = table1.join(table2, MockValueJoiner.TOSTRING_JOINER, Serdes.String(), "anyQueryableName");
-        joined.toStream().process(processor);
+        table3 = table1.join(table2, MockValueJoiner.TOSTRING_JOINER, materialized);
+        table3.toStream().process(processor);
 
-        doTestJoin(builder, expectedKeys, processor, joined);
+        doTestJoin(builder, expectedKeys, processor, table3);
     }
 
     private void doTestSendingOldValues(final StreamsBuilder builder,
@@ -316,12 +321,11 @@ public class KTableKTableInnerJoinTest {
 
         table1 = builder.table(topic1, consumed);
         table2 = builder.table(topic2, consumed);
-        joined = table1.join(table2, MockValueJoiner.TOSTRING_JOINER, Serdes.String(), "anyQueryableName");
+        joined = table1.join(table2, MockValueJoiner.TOSTRING_JOINER, materialized);
         supplier = new MockProcessorSupplier<>();
         builder.build().addProcessor("proc", supplier, ((KTableImpl<?, ?, ?>) joined).name);
 
         doTestSendingOldValues(builder, expectedKeys, table1, table2, supplier, joined, false);
-
     }
 
     @Test
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java
index 2eef302..51fd839 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java
@@ -18,17 +18,20 @@ package org.apache.kafka.streams.kstream.internals;
 
 import org.apache.kafka.common.serialization.Serde;
 import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.streams.Consumed;
 import org.apache.kafka.streams.KeyValue;
 import org.apache.kafka.streams.StreamsBuilder;
 import org.apache.kafka.streams.StreamsBuilderTest;
 import org.apache.kafka.streams.kstream.KTable;
 import org.apache.kafka.streams.kstream.KeyValueMapper;
+import org.apache.kafka.streams.kstream.Materialized;
 import org.apache.kafka.streams.kstream.Serialized;
 import org.apache.kafka.streams.kstream.ValueMapper;
 import org.apache.kafka.streams.processor.MockProcessorContext;
 import org.apache.kafka.streams.processor.Processor;
 import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender;
+import org.apache.kafka.streams.state.KeyValueStore;
 import org.apache.kafka.test.KStreamTestDriver;
 import org.apache.kafka.test.MockProcessor;
 import org.apache.kafka.test.MockProcessorSupplier;
@@ -364,7 +367,7 @@ public class KTableKTableLeftJoinTest {
                 },
                 Serialized.with(Serdes.Long(), Serdes.String())
             )
-            .reduce(MockReducer.STRING_ADDER, MockReducer.STRING_ADDER, "agg-store");
+            .reduce(MockReducer.STRING_ADDER, MockReducer.STRING_ADDER, Materialized.<Long, String, KeyValueStore<Bytes, byte[]>>as("agg-store"));
 
         final KTable<Long, String> one = builder.table(tableOne, consumed);
         final KTable<Long, String> two = builder.table(tableTwo, consumed);
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImplTest.java
index 619ee96..6a84c03 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImplTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImplTest.java
@@ -112,7 +112,8 @@ public class SessionWindowedKStreamImplTest {
         final Map<Windowed<String>, String> results = new HashMap<>();
         stream.aggregate(MockInitializer.STRING_INIT,
                          MockAggregator.TOSTRING_ADDER,
-                         sessionMerger)
+                         sessionMerger,
+                         Materialized.<String, String, SessionStore<Bytes, byte[]>>with(Serdes.String(), Serdes.String()))
                 .toStream()
                 .foreach(new ForeachAction<Windowed<String>, String>() {
                     @Override
@@ -129,21 +130,6 @@ public class SessionWindowedKStreamImplTest {
     @SuppressWarnings("unchecked")
     @Test
     public void shouldMaterializeCount() {
-        stream.count(Materialized.<String, Long, SessionStore<Bytes, byte[]>>as("count-store")
-                             .withKeySerde(Serdes.String()));
-
-        processData();
-        final SessionStore<String, Long> store = (SessionStore<String, Long>) driver.allStateStores().get("count-store");
-        final List<KeyValue<Windowed<String>, Long>> data = StreamsTestUtils.toList(store.fetch("1", "2"));
-        assertThat(data, equalTo(Arrays.asList(
-                KeyValue.pair(new Windowed<>("1", new SessionWindow(10, 15)), 2L),
-                KeyValue.pair(new Windowed<>("1", new SessionWindow(600, 600)), 1L),
-                KeyValue.pair(new Windowed<>("2", new SessionWindow(600, 600)), 1L))));
-    }
-
-    @SuppressWarnings("unchecked")
-    @Test
-    public void shouldMaterializeWithoutSpecifyingSerdes() {
         stream.count(Materialized.<String, Long, SessionStore<Bytes, byte[]>>as("count-store"));
 
         processData();
@@ -158,10 +144,7 @@ public class SessionWindowedKStreamImplTest {
     @SuppressWarnings("unchecked")
     @Test
     public void shouldMaterializeReduced() {
-        stream.reduce(MockReducer.STRING_ADDER,
-                      Materialized.<String, String, SessionStore<Bytes, byte[]>>as("reduced")
-                              .withKeySerde(Serdes.String())
-                              .withValueSerde(Serdes.String()));
+        stream.reduce(MockReducer.STRING_ADDER, Materialized.<String, String, SessionStore<Bytes, byte[]>>as("reduced"));
 
         processData();
         final SessionStore<String, String> sessionStore = (SessionStore<String, String>) driver.allStateStores().get("reduced");
@@ -179,9 +162,7 @@ public class SessionWindowedKStreamImplTest {
         stream.aggregate(MockInitializer.STRING_INIT,
                          MockAggregator.TOSTRING_ADDER,
                          sessionMerger,
-                         Materialized.<String, String, SessionStore<Bytes, byte[]>>as("aggregated")
-                                 .withKeySerde(Serdes.String())
-                                 .withValueSerde(Serdes.String()));
+                         Materialized.<String, String, SessionStore<Bytes, byte[]>>as("aggregated").withValueSerde(Serdes.String()));
 
         processData();
         final SessionStore<String, String> sessionStore = (SessionStore<String, String>) driver.allStateStores().get("aggregated");
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImplTest.java
index 286a823..6f9198d 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImplTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImplTest.java
@@ -105,8 +105,9 @@ public class TimeWindowedKStreamImplTest {
     public void shouldAggregateWindowed() {
         final Map<Windowed<String>, String> results = new HashMap<>();
         windowedStream.aggregate(MockInitializer.STRING_INIT,
-                                 MockAggregator.TOSTRING_ADDER
-        )
+                MockAggregator.TOSTRING_ADDER,
+                Materialized.<String, String, WindowStore<Bytes, byte[]>>with(Serdes.String(), Serdes.String()
+        ))
                 .toStream()
                 .foreach(new ForeachAction<Windowed<String>, String>() {
                     @Override
diff --git a/streams/src/test/java/org/apache/kafka/streams/perf/YahooBenchmark.java b/streams/src/test/java/org/apache/kafka/streams/perf/YahooBenchmark.java
index 9c77680..2f3a006 100644
--- a/streams/src/test/java/org/apache/kafka/streams/perf/YahooBenchmark.java
+++ b/streams/src/test/java/org/apache/kafka/streams/perf/YahooBenchmark.java
@@ -27,19 +27,23 @@ import org.apache.kafka.common.serialization.Deserializer;
 import org.apache.kafka.common.serialization.Serdes;
 import org.apache.kafka.common.serialization.Serializer;
 import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.streams.Consumed;
 import org.apache.kafka.streams.KafkaStreams;
 import org.apache.kafka.streams.StreamsBuilder;
 import org.apache.kafka.streams.StreamsConfig;
 import org.apache.kafka.streams.kstream.ForeachAction;
+import org.apache.kafka.streams.kstream.Joined;
 import org.apache.kafka.streams.kstream.KStream;
 import org.apache.kafka.streams.kstream.KTable;
 import org.apache.kafka.streams.kstream.KeyValueMapper;
+import org.apache.kafka.streams.kstream.Materialized;
 import org.apache.kafka.streams.kstream.Predicate;
 import org.apache.kafka.streams.kstream.Serialized;
 import org.apache.kafka.streams.kstream.TimeWindows;
 import org.apache.kafka.streams.kstream.ValueJoiner;
 import org.apache.kafka.streams.kstream.ValueMapper;
+import org.apache.kafka.streams.state.WindowStore;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -321,7 +325,7 @@ public class YahooBenchmark {
                 public String apply(ProjectedEvent value1, CampaignAd value2) {
                     return value2.campaignID;
                 }
-            }, Serdes.String(), Serdes.serdeFrom(projectedEventSerializer, projectedEventDeserializer));
+            }, Joined.<String, ProjectedEvent, CampaignAd>with(Serdes.String(), Serdes.serdeFrom(projectedEventSerializer, projectedEventDeserializer), null));
 
 
         // key by campaign rather than by ad as original
@@ -336,7 +340,8 @@ public class YahooBenchmark {
         // calculate windowed counts
         keyedByCampaign
             .groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
-            .count(TimeWindows.of(10 * 1000), "time-windows");
+            .windowedBy(TimeWindows.of(10 * 1000))
+            .count(Materialized.<String, Long, WindowStore<Bytes, byte[]>>as("time-windows"));
 
         return new KafkaStreams(builder.build(), streamConfig);
     }
diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsMetadataStateTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsMetadataStateTest.java
index 8e5d90d..e9bb2a3 100644
--- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsMetadataStateTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsMetadataStateTest.java
@@ -71,19 +71,19 @@ public class StreamsMetadataStateTest {
     private StreamPartitioner<String, Object> partitioner;
 
     @Before
-    public void before() throws Exception {
+    public void before() {
         builder = new StreamsBuilder();
         final KStream<Object, Object> one = builder.stream("topic-one");
-        one.groupByKey().count("table-one");
+        one.groupByKey().count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>as("table-one"));
 
         final KStream<Object, Object> two = builder.stream("topic-two");
-        two.groupByKey().count("table-two");
+        two.groupByKey().count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>as("table-two"));
 
         builder.stream("topic-three")
                 .groupByKey()
-                .count("table-three");
+                .count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>as("table-three"));
 
-        one.merge(two).groupByKey().count("merged-table");
+        one.merge(two).groupByKey().count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>as("merged-table"));
 
         builder.stream("topic-four").mapValues(new ValueMapper<Object, Object>() {
             @Override

-- 
To stop receiving notification emails like this one, please contact
guozhang@apache.org.

Mime
View raw message