kafka-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mj...@apache.org
Subject [kafka] branch 2.0 updated: KAFKA-7284: streams should unwrap fenced exception (#5499)
Date Tue, 14 Aug 2018 00:15:16 GMT
This is an automated email from the ASF dual-hosted git repository.

mjsax pushed a commit to branch 2.0
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/2.0 by this push:
     new 8ba90ed  KAFKA-7284: streams should unwrap fenced exception (#5499)
8ba90ed is described below

commit 8ba90edd8f3b0335f68d3faa3ffccd7a114b5e89
Author: John Roesler <vvcephei@users.noreply.github.com>
AuthorDate: Mon Aug 13 19:13:13 2018 -0500

    KAFKA-7284: streams should unwrap fenced exception (#5499)
    
    Reviewers: Matthias J. Sax <matthias@confluent.io>, Guozhang Wang <guozhang@confluent.io>
---
 .../kafka/clients/producer/MockProducer.java       | 12 ++++++--
 .../kafka/clients/producer/MockProducerTest.java   | 16 +++--------
 .../processor/internals/RecordCollectorImpl.java   | 32 ++++++++++++++--------
 3 files changed, 33 insertions(+), 27 deletions(-)

diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
index dc00b47..60fd186 100644
--- a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
@@ -21,6 +21,7 @@ import org.apache.kafka.clients.producer.internals.DefaultPartitioner;
 import org.apache.kafka.clients.producer.internals.FutureRecordMetadata;
 import org.apache.kafka.clients.producer.internals.ProduceRequestResult;
 import org.apache.kafka.common.Cluster;
+import org.apache.kafka.common.KafkaException;
 import org.apache.kafka.common.Metric;
 import org.apache.kafka.common.MetricName;
 import org.apache.kafka.common.PartitionInfo;
@@ -206,7 +207,7 @@ public class MockProducer<K, V> implements Producer<K, V>
{
         this.transactionInFlight = false;
     }
 
-    private void verifyProducerState() {
+    private synchronized void verifyProducerState() {
         if (this.closed) {
             throw new IllegalStateException("MockProducer is already closed.");
         }
@@ -244,7 +245,12 @@ public class MockProducer<K, V> implements Producer<K, V>
{
      */
     @Override
     public synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record,
Callback callback) {
-        verifyProducerState();
+        if (this.closed) {
+            throw new IllegalStateException("MockProducer is already closed.");
+        }
+        if (this.producerFenced) {
+            throw new KafkaException("MockProducer is fenced.", new ProducerFencedException("Fenced"));
+        }
         int partition = 0;
         if (!this.cluster.partitionsForTopic(record.topic()).isEmpty())
             partition = partition(record, this.cluster);
@@ -318,7 +324,7 @@ public class MockProducer<K, V> implements Producer<K, V>
{
         return this.closed;
     }
 
-    public void fenceProducer() {
+    public synchronized void fenceProducer() {
         verifyProducerState();
         verifyTransactionsInitialized();
         this.producerFenced = true;
diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
index 7a8c710..eef9d6d 100644
--- a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
+++ b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
@@ -19,6 +19,7 @@ package org.apache.kafka.clients.producer;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
 import org.apache.kafka.clients.producer.internals.DefaultPartitioner;
 import org.apache.kafka.common.Cluster;
+import org.apache.kafka.common.KafkaException;
 import org.apache.kafka.common.Node;
 import org.apache.kafka.common.PartitionInfo;
 import org.apache.kafka.common.TopicPartition;
@@ -267,18 +268,9 @@ public class MockProducerTest {
         try {
             producer.send(null);
             fail("Should have thrown as producer is fenced off");
-        } catch (ProducerFencedException e) { }
-    }
-
-    @Test
-    public void shouldThrowOnFlushIfProducerGotFenced() {
-        buildMockProducer(true);
-        producer.initTransactions();
-        producer.fenceProducer();
-        try {
-            producer.flush();
-            fail("Should have thrown as producer is fenced off");
-        } catch (ProducerFencedException e) { }
+        } catch (KafkaException e) {
+            assertTrue("The root cause of the exception should be ProducerFenced", e.getCause()
instanceof ProducerFencedException);
+        }
     }
 
     @Test
diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java
b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java
index d753648..9030226 100644
--- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java
+++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java
@@ -207,18 +207,26 @@ public class RecordCollectorImpl implements RecordCollector {
                 "You can increase producer parameter `max.block.ms` to increase this timeout.",
topic);
             throw new StreamsException(String.format("%sFailed to send record to topic %s
due to timeout.", logPrefix, topic));
         } catch (final Exception uncaughtException) {
-            throw new StreamsException(
-                String.format(
-                    EXCEPTION_MESSAGE,
-                    logPrefix,
-                    "an error caught",
-                    key,
-                    value,
-                    timestamp,
-                    topic,
-                    uncaughtException.toString()
-                ),
-                uncaughtException);
+            if (uncaughtException instanceof KafkaException &&
+                uncaughtException.getCause() instanceof ProducerFencedException) {
+                final KafkaException kafkaException = (KafkaException) uncaughtException;
+                // producer.send() call may throw a KafkaException which wraps a FencedException,
+                // in this case we should throw its wrapped inner cause so that it can be
captured and re-wrapped as TaskMigrationException
+                throw (ProducerFencedException) kafkaException.getCause();
+            } else {
+                throw new StreamsException(
+                    String.format(
+                        EXCEPTION_MESSAGE,
+                        logPrefix,
+                        "an error caught",
+                        key,
+                        value,
+                        timestamp,
+                        topic,
+                        uncaughtException.toString()
+                    ),
+                    uncaughtException);
+            }
         }
     }
 


Mime
View raw message