From commits-return-9690-archive-asf-public=cust-asf.ponee.io@kafka.apache.org Tue Jun 12 11:09:15 2018 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx-eu-01.ponee.io (Postfix) with SMTP id 37609180608 for ; Tue, 12 Jun 2018 11:09:14 +0200 (CEST) Received: (qmail 78938 invoked by uid 500); 12 Jun 2018 09:09:13 -0000 Mailing-List: contact commits-help@kafka.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@kafka.apache.org Delivered-To: mailing list commits@kafka.apache.org Received: (qmail 78929 invoked by uid 99); 12 Jun 2018 09:09:13 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 12 Jun 2018 09:09:13 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id 7332B8299A; Tue, 12 Jun 2018 09:09:12 +0000 (UTC) Date: Tue, 12 Jun 2018 09:09:11 +0000 To: "commits@kafka.apache.org" Subject: [kafka] branch trunk updated: MINOR: Clean up imports and unused variables (#5171) MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Message-ID: <152879455038.28394.14944360080607401109@gitbox.apache.org> From: ijuma@apache.org X-Git-Host: gitbox.apache.org X-Git-Repo: kafka X-Git-Refname: refs/heads/trunk X-Git-Reftype: branch X-Git-Oldrev: dbca6b9b88e8e2eaa6a9c091ce5d88b8229c0d39 X-Git-Newrev: fa1d0383902260576132e09bdf9efcc2784b55b4 X-Git-Rev: fa1d0383902260576132e09bdf9efcc2784b55b4 X-Git-NotificationType: ref_changed_plus_diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated This is an automated email from the ASF dual-hosted git repository. ijuma pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/kafka.git The following commit(s) were added to refs/heads/trunk by this push: new fa1d038 MINOR: Clean up imports and unused variables (#5171) fa1d038 is described below commit fa1d0383902260576132e09bdf9efcc2784b55b4 Author: Jimin Hsieh AuthorDate: Tue Jun 12 17:08:51 2018 +0800 MINOR: Clean up imports and unused variables (#5171) Reviewers: Dhruvil Shah , Ismael Juma --- core/src/main/scala/kafka/admin/AdminClient.scala | 2 +- core/src/main/scala/kafka/log/LogSegment.scala | 2 +- core/src/main/scala/kafka/log/OffsetIndex.scala | 2 +- core/src/main/scala/kafka/network/RequestChannel.scala | 2 -- core/src/main/scala/kafka/network/SocketServer.scala | 5 ++--- core/src/test/scala/unit/kafka/server/FetchRequestTest.scala | 2 +- core/src/test/scala/unit/kafka/server/FetchSessionTest.scala | 2 +- 7 files changed, 7 insertions(+), 10 deletions(-) diff --git a/core/src/main/scala/kafka/admin/AdminClient.scala b/core/src/main/scala/kafka/admin/AdminClient.scala index ea42530..7312f10 100644 --- a/core/src/main/scala/kafka/admin/AdminClient.scala +++ b/core/src/main/scala/kafka/admin/AdminClient.scala @@ -22,7 +22,7 @@ import kafka.common.KafkaException import kafka.coordinator.group.GroupOverview import kafka.utils.Logging import org.apache.kafka.clients._ -import org.apache.kafka.clients.consumer.internals.{ConsumerNetworkClient, ConsumerProtocol, RequestFuture, RequestFutureAdapter} +import org.apache.kafka.clients.consumer.internals.{ConsumerNetworkClient, ConsumerProtocol, RequestFuture} import org.apache.kafka.common.config.ConfigDef.{Importance, Type} import org.apache.kafka.common.config.{AbstractConfig, ConfigDef} import org.apache.kafka.common.errors.{AuthenticationException, TimeoutException} diff --git a/core/src/main/scala/kafka/log/LogSegment.scala b/core/src/main/scala/kafka/log/LogSegment.scala index 6d61a41..f066106 100755 --- a/core/src/main/scala/kafka/log/LogSegment.scala +++ b/core/src/main/scala/kafka/log/LogSegment.scala @@ -26,7 +26,7 @@ import kafka.metrics.{KafkaMetricsGroup, KafkaTimer} import kafka.server.epoch.LeaderEpochCache import kafka.server.{FetchDataInfo, LogOffsetMetadata} import kafka.utils._ -import org.apache.kafka.common.errors.{CorruptRecordException, InvalidOffsetException} +import org.apache.kafka.common.errors.CorruptRecordException import org.apache.kafka.common.record.FileRecords.LogOffsetPosition import org.apache.kafka.common.record._ import org.apache.kafka.common.utils.Time diff --git a/core/src/main/scala/kafka/log/OffsetIndex.scala b/core/src/main/scala/kafka/log/OffsetIndex.scala index d185631..7dae098 100755 --- a/core/src/main/scala/kafka/log/OffsetIndex.scala +++ b/core/src/main/scala/kafka/log/OffsetIndex.scala @@ -21,7 +21,7 @@ import java.io.File import java.nio.ByteBuffer import kafka.utils.CoreUtils.inLock -import kafka.common.{IndexOffsetOverflowException, InvalidOffsetException} +import kafka.common.InvalidOffsetException /** * An index that maps offsets to physical file locations for a particular log segment. This index may be sparse: diff --git a/core/src/main/scala/kafka/network/RequestChannel.scala b/core/src/main/scala/kafka/network/RequestChannel.scala index eecce1d..e5aa5d9 100644 --- a/core/src/main/scala/kafka/network/RequestChannel.scala +++ b/core/src/main/scala/kafka/network/RequestChannel.scala @@ -25,11 +25,9 @@ import com.typesafe.scalalogging.Logger import com.yammer.metrics.core.{Gauge, Meter} import kafka.metrics.KafkaMetricsGroup import kafka.utils.{Logging, NotNothing} -import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.memory.MemoryPool import org.apache.kafka.common.network.Send import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.record.RecordConversionStats import org.apache.kafka.common.requests._ import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.utils.{Sanitizer, Time} diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index db5eda6..06da8df 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -38,7 +38,6 @@ import org.apache.kafka.common.metrics._ import org.apache.kafka.common.metrics.stats.Meter import org.apache.kafka.common.network.KafkaChannel.ChannelMuteEvent import org.apache.kafka.common.network.{ChannelBuilder, ChannelBuilders, KafkaChannel, ListenerName, Selectable, Send, Selector => KSelector} -import org.apache.kafka.common.record.MultiRecordsSend import org.apache.kafka.common.requests.{RequestContext, RequestHeader} import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.{KafkaThread, LogContext, Time} @@ -641,9 +640,9 @@ private[kafka] class Processor(val id: Int, updateRequestMetrics(response) trace("Closing socket connection actively according to the response code.") close(channelId) - case response: StartThrottlingResponse => + case _: StartThrottlingResponse => handleChannelMuteEvent(channelId, ChannelMuteEvent.THROTTLE_STARTED) - case response: EndThrottlingResponse => + case _: EndThrottlingResponse => // Try unmuting the channel. The channel will be unmuted only if the response has already been sent out to // the client. handleChannelMuteEvent(channelId, ChannelMuteEvent.THROTTLE_ENDED) diff --git a/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala b/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala index 424b8c7..63e23b2 100644 --- a/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala @@ -26,7 +26,7 @@ import kafka.utils.TestUtils import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.record.{MemoryRecords, Record, RecordBatch, Records} +import org.apache.kafka.common.record.{MemoryRecords, Record, RecordBatch} import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, FetchMetadata => JFetchMetadata} import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer} import org.junit.Assert._ diff --git a/core/src/test/scala/unit/kafka/server/FetchSessionTest.scala b/core/src/test/scala/unit/kafka/server/FetchSessionTest.scala index b79692d..ae001a3 100755 --- a/core/src/test/scala/unit/kafka/server/FetchSessionTest.scala +++ b/core/src/test/scala/unit/kafka/server/FetchSessionTest.scala @@ -22,7 +22,7 @@ import java.util.Collections import kafka.utils.MockTime import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.record.{AbstractRecords, Records} +import org.apache.kafka.common.record.Records import org.apache.kafka.common.requests.FetchMetadata.{FINAL_EPOCH, INVALID_SESSION_ID} import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, FetchMetadata => JFetchMetadata} import org.junit.Assert._ -- To stop receiving notification emails like this one, please contact ijuma@apache.org.