Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id EC4A3200D37 for ; Thu, 9 Nov 2017 22:15:41 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id E871E160C02; Thu, 9 Nov 2017 21:15:41 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 7FF18160C03 for ; Thu, 9 Nov 2017 22:15:40 +0100 (CET) Received: (qmail 93136 invoked by uid 500); 9 Nov 2017 21:15:36 -0000 Mailing-List: contact issues-help@drill.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@drill.apache.org Delivered-To: mailing list issues@drill.apache.org Received: (qmail 93098 invoked by uid 99); 9 Nov 2017 21:15:36 -0000 Received: from pnap-us-west-generic-nat.apache.org (HELO spamd2-us-west.apache.org) (209.188.14.142) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 09 Nov 2017 21:15:36 +0000 Received: from localhost (localhost [127.0.0.1]) by spamd2-us-west.apache.org (ASF Mail Server at spamd2-us-west.apache.org) with ESMTP id 70B4F1A15FB for ; Thu, 9 Nov 2017 21:15:35 +0000 (UTC) X-Virus-Scanned: Debian amavisd-new at spamd2-us-west.apache.org X-Spam-Flag: NO X-Spam-Score: -100.001 X-Spam-Level: X-Spam-Status: No, score=-100.001 tagged_above=-999 required=6.31 tests=[RP_MATCHES_RCVD=-0.001, SPF_PASS=-0.001, URIBL_BLOCKED=0.001, USER_IN_WHITELIST=-100] autolearn=disabled Received: from mx1-lw-us.apache.org ([10.40.0.8]) by localhost (spamd2-us-west.apache.org [10.40.0.9]) (amavisd-new, port 10024) with ESMTP id StJ41J09x2j9 for ; Thu, 9 Nov 2017 21:15:32 +0000 (UTC) Received: from mailrelay1-us-west.apache.org (mailrelay1-us-west.apache.org [209.188.14.139]) by mx1-lw-us.apache.org (ASF Mail Server at mx1-lw-us.apache.org) with ESMTP id 1033A611A6 for ; Thu, 9 Nov 2017 21:15:11 +0000 (UTC) Received: from jira-lw-us.apache.org (unknown [207.244.88.139]) by mailrelay1-us-west.apache.org (ASF Mail Server at mailrelay1-us-west.apache.org) with ESMTP id 0BFC6E25A5 for ; Thu, 9 Nov 2017 21:15:10 +0000 (UTC) Received: from jira-lw-us.apache.org (localhost [127.0.0.1]) by jira-lw-us.apache.org (ASF Mail Server at jira-lw-us.apache.org) with ESMTP id 58D8F240DA for ; Thu, 9 Nov 2017 21:15:08 +0000 (UTC) Date: Thu, 9 Nov 2017 21:15:08 +0000 (UTC) From: "ASF GitHub Bot (JIRA)" To: issues@drill.apache.org Message-ID: In-Reply-To: References: Subject: [jira] [Commented] (DRILL-4779) Kafka storage plugin support MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 7bit X-JIRA-FingerPrint: 30527f35849b9dde25b450d4833f0394 archived-at: Thu, 09 Nov 2017 21:15:42 -0000 [ https://issues.apache.org/jira/browse/DRILL-4779?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16246550#comment-16246550 ] ASF GitHub Bot commented on DRILL-4779: --------------------------------------- Github user paul-rogers commented on a diff in the pull request: https://github.com/apache/drill/pull/1027#discussion_r150086335 --- Diff: contrib/storage-kafka/src/main/java/org/apache/drill/exec/store/kafka/KafkaRecordReader.java --- @@ -0,0 +1,178 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.store.kafka; + +import static org.apache.drill.exec.store.kafka.DrillKafkaConfig.DRILL_KAFKA_POLL_TIMEOUT; + +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import org.apache.drill.common.exceptions.ExecutionSetupException; +import org.apache.drill.common.expression.SchemaPath; +import org.apache.drill.exec.ExecConstants; +import org.apache.drill.exec.ops.FragmentContext; +import org.apache.drill.exec.ops.OperatorContext; +import org.apache.drill.exec.physical.impl.OutputMutator; +import org.apache.drill.exec.store.AbstractRecordReader; +import org.apache.drill.exec.store.kafka.KafkaSubScan.KafkaSubScanSpec; +import org.apache.drill.exec.store.kafka.decoders.MessageReader; +import org.apache.drill.exec.store.kafka.decoders.MessageReaderFactory; +import org.apache.drill.exec.util.Utilities; +import org.apache.drill.exec.vector.complex.impl.VectorContainerWriter; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.TopicPartition; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Stopwatch; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +public class KafkaRecordReader extends AbstractRecordReader { + private static final Logger logger = LoggerFactory.getLogger(KafkaRecordReader.class); + public static final long DEFAULT_MESSAGES_PER_BATCH = 4000; + + private VectorContainerWriter writer; + private MessageReader messageReader; + + private boolean unionEnabled; + private KafkaConsumer kafkaConsumer; + private KafkaStoragePlugin plugin; + private KafkaSubScanSpec subScanSpec; + private long kafkaPollTimeOut; + private long endOffset; + + private long currentOffset; + private long totalFetchTime = 0; + + private List partitions; + private final boolean enableAllTextMode; + private final boolean readNumbersAsDouble; + + private Iterator> messageIter; + + public KafkaRecordReader(KafkaSubScan.KafkaSubScanSpec subScanSpec, List projectedColumns, + FragmentContext context, KafkaStoragePlugin plugin) { + setColumns(projectedColumns); + this.enableAllTextMode = context.getOptions().getOption(ExecConstants.KAFKA_ALL_TEXT_MODE).bool_val; + this.readNumbersAsDouble = context.getOptions() + .getOption(ExecConstants.KAFKA_READER_READ_NUMBERS_AS_DOUBLE).bool_val; + this.unionEnabled = context.getOptions().getOption(ExecConstants.ENABLE_UNION_TYPE); + this.plugin = plugin; + this.subScanSpec = subScanSpec; + this.endOffset = subScanSpec.getEndOffset(); + this.kafkaPollTimeOut = Long.valueOf(plugin.getConfig().getDrillKafkaProps().getProperty(DRILL_KAFKA_POLL_TIMEOUT)); + } + + @Override + protected Collection transformColumns(Collection projectedColumns) { + Set transformed = Sets.newLinkedHashSet(); + if (!isStarQuery()) { + for (SchemaPath column : projectedColumns) { + transformed.add(column); + } + } else { + transformed.add(Utilities.STAR_COLUMN); + } + return transformed; + } + + @Override + public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException { + this.writer = new VectorContainerWriter(output, unionEnabled); + messageReader = MessageReaderFactory.getMessageReader(plugin.getConfig().getDrillKafkaProps()); + messageReader.init(context.getManagedBuffer(), Lists.newArrayList(getColumns()), this.writer, + this.enableAllTextMode, false, this.readNumbersAsDouble); + kafkaConsumer = messageReader.getConsumer(plugin); + partitions = Lists.newArrayListWithCapacity(1); + TopicPartition topicPartition = new TopicPartition(subScanSpec.getTopicName(), subScanSpec.getPartitionId()); + partitions.add(topicPartition); + kafkaConsumer.assign(partitions); + currentOffset = subScanSpec.getStartOffset(); + kafkaConsumer.seek(topicPartition, currentOffset); + } + + private boolean hasNext() { + if (currentOffset > endOffset) { + logger.info("Read all the messages from Kafka. Start offset : {}, End offset : {}", currentOffset, endOffset); + return false; + } else if (messageIter != null && messageIter.hasNext()) { + return true; + } else { + return fetch(); + } + } + + private boolean fetch() { + long startTime = System.currentTimeMillis(); + ConsumerRecords consumerRecords = kafkaConsumer.poll(kafkaPollTimeOut); + + if (consumerRecords.isEmpty()) { + logger.warn("Not able to fetch messages within {}milliseconds. Consider increasing the value of {}", + DRILL_KAFKA_POLL_TIMEOUT, kafkaPollTimeOut); + } + + long lastFetchTime = (System.currentTimeMillis() - startTime); + logger.debug("Total number of messages fetched : " + consumerRecords.count()); + logger.debug("Time taken to fetch : " + (lastFetchTime / 1000) + " seconds"); + totalFetchTime += lastFetchTime; + + messageIter = consumerRecords.iterator(); + return messageIter.hasNext(); + } + + /** + * KafkaConsumer.poll will fetch 500 messages per poll call. So hasNext will + * take care of polling multiple times for this given batch next invocation + */ + @Override + public int next() { + writer.allocate(); + writer.reset(); + Stopwatch watch = Stopwatch.createStarted(); + + int messageCount = 0; + while (hasNext()) { + ConsumerRecord consumerRecord = messageIter.next(); + currentOffset = consumerRecord.offset(); + writer.setPosition(messageCount); + messageReader.readMessage(consumerRecord); --- End diff -- Again, error handling? > Kafka storage plugin support > ---------------------------- > > Key: DRILL-4779 > URL: https://issues.apache.org/jira/browse/DRILL-4779 > Project: Apache Drill > Issue Type: New Feature > Components: Storage - Other > Affects Versions: 1.11.0 > Reporter: B Anil Kumar > Assignee: B Anil Kumar > Labels: doc-impacting > Fix For: 1.12.0 > > > Implement Kafka storage plugin will enable the strong SQL support for Kafka. > Initially implementation can target for supporting json and avro message types -- This message was sent by Atlassian JIRA (v6.4.14#64029)