kafka-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From guozh...@apache.org
Subject kafka git commit: KAFKA-2120: cleaning follow-up
Date Fri, 16 Oct 2015 16:40:49 GMT
Repository: kafka
Updated Branches:
  refs/heads/trunk 5338f8432 -> 7efa12705


KAFKA-2120: cleaning follow-up

Trivial fix to get rid of unused statements in kafkaProducer.

Author: Mayuresh Gharat <mgharat@mgharat-ld1.linkedin.biz>

Reviewers: Edward Ribeiro, Guozhang Wang

Closes #320 from MayureshGharat/kafka-2120-followup


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/7efa1270
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/7efa1270
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/7efa1270

Branch: refs/heads/trunk
Commit: 7efa12705d91e21eec527ab49033b01ca5e3e3ce
Parents: 5338f84
Author: Mayuresh Gharat <mgharat@linkedin.com>
Authored: Fri Oct 16 09:45:32 2015 -0700
Committer: Guozhang Wang <wangguoz@gmail.com>
Committed: Fri Oct 16 09:45:32 2015 -0700

----------------------------------------------------------------------
 .../apache/kafka/clients/producer/KafkaProducer.java    | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kafka/blob/7efa1270/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
----------------------------------------------------------------------
diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
index d42fae9..44280e0 100644
--- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
@@ -418,7 +418,7 @@ public class KafkaProducer<K, V> implements Producer<K, V>
{
                         " to class " + producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName()
+
                         " specified in key.serializer");
             }
-            long remainingTime = checkMaybeGetRemainingTime(startTime);
+            checkMaybeGetRemainingTime(startTime);
             byte[] serializedValue;
             try {
                 serializedValue = valueSerializer.serialize(record.topic(), record.value());
@@ -427,14 +427,14 @@ public class KafkaProducer<K, V> implements Producer<K, V>
{
                         " to class " + producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName()
+
                         " specified in value.serializer");
             }
-            remainingTime = checkMaybeGetRemainingTime(startTime);
+            checkMaybeGetRemainingTime(startTime);
             int partition = partition(record, serializedKey, serializedValue, metadata.fetch());
-            remainingTime = checkMaybeGetRemainingTime(startTime);
+            checkMaybeGetRemainingTime(startTime);
             int serializedSize = Records.LOG_OVERHEAD + Record.recordSize(serializedKey,
serializedValue);
             ensureValidRecordSize(serializedSize);
             TopicPartition tp = new TopicPartition(record.topic(), partition);
             log.trace("Sending record {} with callback {} to topic {} partition {}", record,
callback, record.topic(), partition);
-            remainingTime = checkMaybeGetRemainingTime(startTime);
+            long remainingTime = checkMaybeGetRemainingTime(startTime);
             RecordAccumulator.RecordAppendResult result = accumulator.append(tp, serializedKey,
serializedValue, callback, remainingTime);
             if (result.batchIsFull || result.newBatchCreated) {
                 log.trace("Waking up the sender since topic {} partition {} is either full
or getting a new batch", record.topic(), partition);
@@ -679,7 +679,7 @@ public class KafkaProducer<K, V> implements Producer<K, V>
{
     }
 
     /**
-     * Check and may be get the time elapsed since startTime.
+     * Check and maybe get the time elapsed since startTime.
      * Throws a {@link org.apache.kafka.common.errors.TimeoutException} if the  elapsed time
      * is more than the max time to block (max.block.ms)
      *
@@ -689,7 +689,7 @@ public class KafkaProducer<K, V> implements Producer<K, V>
{
     private long checkMaybeGetRemainingTime(long startTime) {
         long elapsedTime = time.milliseconds() - startTime;
         if (elapsedTime > maxBlockTimeMs) {
-            throw new TimeoutException("Request timed out");
+            throw new TimeoutException("Request timed out due to exceeding the maximum threshold
of " + maxBlockTimeMs + " ms");
         }
         long remainingTime = maxBlockTimeMs - elapsedTime;
 


Mime
View raw message