Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 7162F200C1B for ; Tue, 14 Feb 2017 17:43:12 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 6FFA3160B5F; Tue, 14 Feb 2017 16:43:12 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id B9296160B45 for ; Tue, 14 Feb 2017 17:43:11 +0100 (CET) Received: (qmail 52237 invoked by uid 500); 14 Feb 2017 16:43:10 -0000 Mailing-List: contact issues-help@nifi.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@nifi.apache.org Delivered-To: mailing list issues@nifi.apache.org Received: (qmail 52228 invoked by uid 99); 14 Feb 2017 16:43:10 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 14 Feb 2017 16:43:10 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id C629FDFCA3; Tue, 14 Feb 2017 16:43:10 +0000 (UTC) From: mattyb149 To: issues@nifi.apache.org Reply-To: issues@nifi.apache.org References: In-Reply-To: Subject: [GitHub] nifi pull request #1471: NIFI-3432 Handle Multiple Result Sets in ExecuteSQL Content-Type: text/plain Message-Id: <20170214164310.C629FDFCA3@git1-us-west.apache.org> Date: Tue, 14 Feb 2017 16:43:10 +0000 (UTC) archived-at: Tue, 14 Feb 2017 16:43:12 -0000 Github user mattyb149 commented on a diff in the pull request: https://github.com/apache/nifi/pull/1471#discussion_r101081936 --- Diff: nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQL.java --- @@ -192,31 +192,57 @@ public void process(InputStream in) throws IOException { try (final Connection con = dbcpService.getConnection(); final Statement st = con.createStatement()) { st.setQueryTimeout(queryTimeout); // timeout in seconds - final AtomicLong nrOfRows = new AtomicLong(0L); - if (fileToProcess == null) { - fileToProcess = session.create(); - } - fileToProcess = session.write(fileToProcess, new OutputStreamCallback() { - @Override - public void process(final OutputStream out) throws IOException { - try { - logger.debug("Executing query {}", new Object[]{selectQuery}); - final ResultSet resultSet = st.executeQuery(selectQuery); - nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out, convertNamesForAvro)); - } catch (final SQLException e) { - throw new ProcessException(e); - } + + + logger.debug("Executing query {}", new Object[]{selectQuery}); + boolean results = st.execute(selectQuery); + int resultCount = 0; + while(results){ + FlowFile resultSetFF; + if(fileToProcess==null) + resultSetFF = session.create(); + else { + resultSetFF = session.create(fileToProcess); + resultSetFF = session.putAllAttributes(resultSetFF, fileToProcess.getAttributes()); } - }); - // set attribute how many rows were selected - fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get())); + final AtomicLong nrOfRows = new AtomicLong(0L); + + resultSetFF = session.write(resultSetFF, new OutputStreamCallback() { + @Override + public void process(final OutputStream out) throws IOException { + try { + ResultSet resultSet = st.getResultSet(); + nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out, convertNamesForAvro)); + } catch (final SQLException e) { + throw new ProcessException(e); + } + } + }); + + // set attribute how many rows were selected + resultSetFF = session.putAttribute(resultSetFF, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get())); + + logger.info("{} contains {} Avro records; transferring to 'success'", + new Object[]{resultSetFF, nrOfRows.get()}); + session.getProvenanceReporter().modifyContent(resultSetFF, "Retrieved " + nrOfRows.get() + " rows", + stopWatch.getElapsed(TimeUnit.MILLISECONDS)); + session.transfer(resultSetFF, REL_SUCCESS); + resultCount++; + + // are there anymore result sets? + results = st.getMoreResults(); + } - logger.info("{} contains {} Avro records; transferring to 'success'", - new Object[]{fileToProcess, nrOfRows.get()}); - session.getProvenanceReporter().modifyContent(fileToProcess, "Retrieved " + nrOfRows.get() + " rows", - stopWatch.getElapsed(TimeUnit.MILLISECONDS)); - session.transfer(fileToProcess, REL_SUCCESS); + //If we had at least one result then it's OK to drop the original file, but if we had no results then + // pass the original flow file down the line to trigger downstream processors + if(fileToProcess != null) { + if (resultCount > 0) { + session.remove(fileToProcess); + } else { + session.transfer(fileToProcess, REL_SUCCESS); --- End diff -- If there was an incoming flow file, and we are now writing and transferring resultSetFF, why do we transfer the original to success? The content of incoming flow file(s) is SQL, when normal output is Avro. This might affect current flows that are only expecting Avro (or nothing). If this is a helpful addition, perhaps an "original" relationship could be added for this case. --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastructure@apache.org or file a JIRA ticket with INFRA. ---