Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id C62F1200BD4 for ; Fri, 2 Dec 2016 06:01:03 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id C47C2160B10; Fri, 2 Dec 2016 05:01:03 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 9DD88160B0B for ; Fri, 2 Dec 2016 06:01:02 +0100 (CET) Received: (qmail 24573 invoked by uid 500); 2 Dec 2016 05:01:01 -0000 Mailing-List: contact issues-help@eagle.incubator.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@eagle.incubator.apache.org Delivered-To: mailing list issues@eagle.incubator.apache.org Received: (qmail 24564 invoked by uid 99); 2 Dec 2016 05:01:01 -0000 Received: from pnap-us-west-generic-nat.apache.org (HELO spamd3-us-west.apache.org) (209.188.14.142) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 02 Dec 2016 05:01:01 +0000 Received: from localhost (localhost [127.0.0.1]) by spamd3-us-west.apache.org (ASF Mail Server at spamd3-us-west.apache.org) with ESMTP id 6AB6F180652 for ; Fri, 2 Dec 2016 05:01:01 +0000 (UTC) X-Virus-Scanned: Debian amavisd-new at spamd3-us-west.apache.org X-Spam-Flag: NO X-Spam-Score: -7.019 X-Spam-Level: X-Spam-Status: No, score=-7.019 tagged_above=-999 required=6.31 tests=[KAM_LAZY_DOMAIN_SECURITY=1, RCVD_IN_DNSWL_HI=-5, RCVD_IN_MSPIKE_H3=-0.01, RCVD_IN_MSPIKE_WL=-0.01, RP_MATCHES_RCVD=-2.999] autolearn=disabled Received: from mx1-lw-eu.apache.org ([10.40.0.8]) by localhost (spamd3-us-west.apache.org [10.40.0.10]) (amavisd-new, port 10024) with ESMTP id eW0Zipe5uyrF for ; Fri, 2 Dec 2016 05:00:58 +0000 (UTC) Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx1-lw-eu.apache.org (ASF Mail Server at mx1-lw-eu.apache.org) with SMTP id 1758F5F369 for ; Fri, 2 Dec 2016 05:00:56 +0000 (UTC) Received: (qmail 24452 invoked by uid 99); 2 Dec 2016 05:00:56 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 02 Dec 2016 05:00:56 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 22556F212F; Fri, 2 Dec 2016 05:00:56 +0000 (UTC) From: wujinhu To: issues@eagle.incubator.apache.org Reply-To: issues@eagle.incubator.apache.org References: In-Reply-To: Subject: [GitHub] incubator-eagle pull request #705: EAGLE-811 Refactor jdbcMetadataDaoImpl of... Content-Type: text/plain Message-Id: <20161202050056.22556F212F@git1-us-west.apache.org> Date: Fri, 2 Dec 2016 05:00:56 +0000 (UTC) archived-at: Fri, 02 Dec 2016 05:01:04 -0000 Github user wujinhu commented on a diff in the pull request: https://github.com/apache/incubator-eagle/pull/705#discussion_r90581912 --- Diff: eagle-core/eagle-alert-parent/eagle-alert/alert-metadata-parent/alert-metadata/src/main/java/org/apache/eagle/alert/metadata/impl/JdbcMetadataHandler.java --- @@ -0,0 +1,533 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.eagle.alert.metadata.impl; + +import org.apache.commons.collections.map.HashedMap; +import org.apache.commons.dbcp.BasicDataSource; +import org.apache.eagle.alert.coordination.model.Kafka2TupleMetadata; +import org.apache.eagle.alert.coordination.model.ScheduleState; +import org.apache.eagle.alert.coordination.model.internal.PolicyAssignment; +import org.apache.eagle.alert.coordination.model.internal.Topology; +import org.apache.eagle.alert.engine.coordinator.*; +import org.apache.eagle.alert.engine.model.AlertPublishEvent; +import org.apache.eagle.alert.metadata.MetadataUtils; +import org.apache.eagle.alert.metadata.resource.OpResult; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.typesafe.config.Config; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.sql.DataSource; +import java.io.IOException; +import java.sql.*; +import java.util.*; +import java.util.function.Function; + +public class JdbcMetadataHandler { + + private static final Logger LOG = LoggerFactory.getLogger(JdbcMetadataHandler.class); + // general model + private static final String INSERT_STATEMENT = "INSERT INTO %s(content, id) VALUES (?, ?)"; + private static final String DELETE_STATEMENT = "DELETE FROM %s WHERE id=?"; + private static final String UPDATE_STATEMENT = "UPDATE %s set content=? WHERE id=?"; + private static final String QUERY_ALL_STATEMENT = "SELECT content FROM %s"; + private static final String QUERY_CONDITION_STATEMENT = "SELECT content FROM %s WHERE id=?"; + private static final String QUERY_ORDERBY_STATEMENT = "SELECT content FROM %s ORDER BY id %s"; + + // customized model + private static final String CLEAR_SCHEDULESTATES_STATEMENT = "DELETE FROM schedule_state WHERE id NOT IN (SELECT id from (SELECT id FROM schedule_state ORDER BY id DESC limit ?) as states)"; + private static final String INSERT_ALERT_STATEMENT = "INSERT INTO alert_event(alertId, siteId, appIds, policyId, alertTimestamp, policyValue, alertData) VALUES (?, ?, ?, ?, ?, ?, ?)"; + private static final String QUERY_ALERT_STATEMENT = "SELECT * FROM alert_event order by alertTimestamp DESC limit ?"; + private static final String QUERY_ALERT_BY_ID_STATEMENT = "SELECT * FROM alert_event WHERE alertId=? order by alertTimestamp DESC limit ?"; + private static final String QUERY_ALERT_BY_POLICY_STATEMENT = "SELECT * FROM alert_event WHERE policyId=? order by alertTimestamp DESC limit ?"; + private static final String INSERT_POLICYPUBLISHMENT_STATEMENT = "INSERT INTO policy_publishment(policyId, publishmentName) VALUES (?, ?)"; + private static final String DELETE_PUBLISHMENT_STATEMENT = "DELETE FROM policy_publishment WHERE policyId=?"; + private static final String QUERY_PUBLISHMENT_BY_POLICY_STATEMENT = "SELECT content FROM publishment a INNER JOIN policy_publishment b ON a.id=b.publishmentName and b.policyId=?"; + private static final String QUERY_PUBLISHMENT_STATEMENT = "SELECT a.content, b.policyId FROM publishment a LEFT JOIN policy_publishment b ON a.id=b.publishmentName"; + + public enum SortType { DESC, ASC } + + private static Map tblNameMap = new HashMap<>(); + + private static final ObjectMapper mapper = new ObjectMapper(); + private DataSource dataSource; + + static { + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + registerTableName(StreamingCluster.class.getSimpleName(), "stream_cluster"); + registerTableName(StreamDefinition.class.getSimpleName(), "stream_definition"); + registerTableName(Kafka2TupleMetadata.class.getSimpleName(), "kafka_tuple_metadata"); + registerTableName(PolicyDefinition.class.getSimpleName(), "policy_definition"); + registerTableName(Publishment.class.getSimpleName(), "publishment"); + registerTableName(PublishmentType.class.getSimpleName(), "publishment_type"); + registerTableName(ScheduleState.class.getSimpleName(), "schedule_state"); + registerTableName(PolicyAssignment.class.getSimpleName(), "policy_assignment"); + registerTableName(Topology.class.getSimpleName(), "topology"); + registerTableName(AlertPublishEvent.class.getSimpleName(), "alert_event"); + } + + private static void registerTableName(String clzName, String tblName) { + tblNameMap.put(clzName, tblName); + } + + public JdbcMetadataHandler(Config config) { + try { + //JdbcSchemaManager.getInstance().init(config); + BasicDataSource bDatasource = new BasicDataSource(); + bDatasource.setDriverClassName(config.getString(MetadataUtils.JDBC_DRIVER_PATH)); + if (config.hasPath(MetadataUtils.JDBC_USERNAME_PATH)) { + bDatasource.setUsername(config.getString(MetadataUtils.JDBC_USERNAME_PATH)); + bDatasource.setPassword(config.getString(MetadataUtils.JDBC_PASSWORD_PATH)); + } + bDatasource.setUrl(config.getString(MetadataUtils.JDBC_CONNECTION_PATH)); + if (config.hasPath(MetadataUtils.JDBC_CONNECTION_PROPERTIES_PATH)) { + bDatasource.setConnectionProperties(config.getString(MetadataUtils.JDBC_CONNECTION_PROPERTIES_PATH)); + } + this.dataSource = bDatasource; + } catch (Exception e) { + LOG.error(e.getMessage(), e); + } + } + + private String getTableName(String clzName) { + String tbl = tblNameMap.get(clzName); + if (tbl != null) { + return tbl; + } else { + return clzName; + } + } + + private OpResult executeUpdate(Connection connection, String query, String key, String value) throws SQLException { + OpResult result = new OpResult(); + PreparedStatement statement = null; + try { + statement = connection.prepareStatement(query); + Clob clob = connection.createClob(); + clob.setString(1, value); + statement.setClob(1, clob); + statement.setString(2, key); + int status = statement.executeUpdate(); + LOG.info("update {} with query={}", status, query); + } finally { + if (statement != null) { + statement.close(); + } + } + return result; + } + + private OpResult executeUpdate(Connection connection, PreparedStatement statement) { + OpResult result = new OpResult(); + try { + int status = statement.executeUpdate(); + result.code = OpResult.SUCCESS; + result.message = String.format("updated %d records successfully", status); + LOG.info(result.message); + } catch (Exception e) { + LOG.error(e.getMessage(), e); + result.code = OpResult.FAILURE; + result.message = e.getMessage(); + } finally { + if (statement != null) { + try { + statement.close(); + } catch (SQLException e) { + LOG.error("Failed to close statement: {}", e.getMessage(), e.getCause()); + } + } + if (connection != null) { + try { + connection.close(); + } catch (SQLException e) { + LOG.error("Failed to close statement: {}", e.getMessage(), e.getCause()); + } + } + } + return result; + } + + public OpResult addOrReplace(String clzName, T t) { + String tb = getTableName(clzName); + OpResult result = new OpResult(); + Savepoint savepoint = null; + String key = null; + String value = null; + Connection connection = null; + try { + connection = dataSource.getConnection(); + key = MetadataUtils.getKey(t); + value = mapper.writeValueAsString(t); + connection.setAutoCommit(false); + savepoint = connection.setSavepoint("insertEntity"); + result = executeUpdate(connection, String.format(INSERT_STATEMENT, tb), key, value); + connection.commit(); + } catch (SQLException e) { + LOG.warn("fail to insert entity due to {}, and try to updated instead", e.getMessage()); + if (connection != null) { + LOG.info("Detected duplicated entity"); + try { + connection.rollback(savepoint); + executeUpdate(connection, String.format(UPDATE_STATEMENT, tb), key, value); + connection.commit(); + connection.setAutoCommit(true); + } catch (SQLException e1) { + LOG.warn("Rollback failed", e1); + } + } + } catch (JsonProcessingException e) { + LOG.error("Got JsonProcessingException: {}", e.getMessage(), e.getCause()); + result.code = OpResult.FAILURE; + result.message = e.getMessage(); + } finally { + if (connection != null) { + try { + connection.close(); + } catch (SQLException e) { + LOG.error("Failed to close statement: {}", e.getMessage(), e.getCause()); + } + } + } + return result; + } + + + public List list(Class clz) { + return listOrderBy(clz, null); + } + + public List listOrderBy(Class clz, String sortType) { + List result = new LinkedList(); + try { + String tb = getTableName(clz.getSimpleName()); + String query = String.format(QUERY_ALL_STATEMENT, tb); + if (sortType != null) { + query = String.format(QUERY_ORDERBY_STATEMENT, tb, sortType); + } + Connection connection = dataSource.getConnection(); + PreparedStatement statement = connection.prepareStatement(query); + return executeList(connection, statement, rs -> { + try { + String content = rs.getString(1); + return mapper.readValue(content, clz); + } catch (Exception e) { + throw new IllegalStateException(e); + } + }); + } catch (SQLException ex) { + LOG.error(ex.getMessage(), ex); + return result; + } + } + + private List executeList(Connection connection, PreparedStatement statement, Function selectFun) { + List result = new LinkedList<>(); + ResultSet rs = null; + try { + rs = statement.executeQuery(); + while (rs.next()) { + result.add(selectFun.apply(rs)); + } + } catch (SQLException e) { + LOG.error(e.getMessage(), e); + } finally { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + if (statement != null) { + try { + statement.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + if (connection != null) { + try { + connection.close(); + } catch (SQLException e) { + LOG.error("Failed to close statement: {}", e.getMessage(), e.getCause()); + } + } + } + return result; + } + + public T queryById(Class clz, String id) { + List result = new LinkedList(); + try { + String tb = getTableName(clz.getSimpleName()); + Connection connection = dataSource.getConnection(); + PreparedStatement statement = connection.prepareStatement(String.format(QUERY_CONDITION_STATEMENT, tb)); + statement.setString(1, id); + result = executeList(connection, statement, rs -> { + try { + String content = rs.getString(1); + return mapper.readValue(content, clz); + } catch (Exception e) { + throw new IllegalStateException(e); + } + }); + } catch (SQLException ex) { + LOG.error(ex.getMessage(), ex); + } + if (result.isEmpty()) { + return null; + } else { + return result.get(0); + } + } + + public AlertPublishEvent getAlertEventById(String alertId, int size) { + List alerts = listAlertEvents(QUERY_ALERT_BY_ID_STATEMENT, alertId, size); + if (alerts.isEmpty()) { + return null; + } else { + return alerts.get(0); + } + } + + public List getAlertEventByPolicyId(String policyId, int size) { + return listAlertEvents(QUERY_ALERT_BY_POLICY_STATEMENT, policyId, size); + } + + public List listAlertEvents(String query, String filter, int size) { + List alerts = new LinkedList<>(); + try { + Connection connection = dataSource.getConnection(); + PreparedStatement statement = null; + if (query == null) { + query = QUERY_ALERT_STATEMENT; + statement = connection.prepareStatement(query); + statement.setInt(1, size); + } else { + statement = connection.prepareStatement(query); + statement.setString(1, filter); + statement.setInt(2, size); + } + alerts = executeList(connection, statement, rs -> { + try { + AlertPublishEvent event = new AlertPublishEvent(); + event.setAlertId(rs.getString(1)); + event.setSiteId(rs.getString(2)); + event.setAppIds(mapper.readValue(rs.getString(3), List.class)); + event.setPolicyId(rs.getString(4)); + event.setAlertTimestamp(rs.getLong(5)); + event.setPolicyValue(rs.getString(6)); + event.setAlertData(mapper.readValue(rs.getString(7), Map.class)); + return event; + } catch (Exception e) { + throw new IllegalStateException(e); + } + }); + } catch (SQLException ex) { + LOG.error(ex.getMessage(), ex); + } + return alerts; + } + + public List listPublishments() { + List result = new LinkedList<>(); + Connection connection = null; + PreparedStatement statement = null; + ResultSet rs = null; + try { + connection = dataSource.getConnection(); + statement = connection.prepareStatement(QUERY_PUBLISHMENT_STATEMENT); + Map> publishPolicyMap = new HashedMap(); + rs = statement.executeQuery(); + while (rs.next()) { + String publishment = rs.getString(1); + String policyId = rs.getString(2); + List policyIds = publishPolicyMap.get(publishment); + if (policyIds == null) { + policyIds = new ArrayList<>(); + publishPolicyMap.put(publishment, policyIds); + } + if (policyId != null) { + policyIds.add(policyId); + } + } + for (Map.Entry> entry : publishPolicyMap.entrySet()) { + Publishment publishment = mapper.readValue(entry.getKey(), Publishment.class); + publishment.setPolicyIds(entry.getValue()); + result.add(publishment); + } + return result; + } catch (Exception ex) { + LOG.error(ex.getMessage(), ex); + return result; + } finally { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + e.printStackTrace(); --- End diff -- remove this line --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastructure@apache.org or file a JIRA ticket with INFRA. ---