Return-Path: X-Original-To: apmail-cloudstack-commits-archive@www.apache.org Delivered-To: apmail-cloudstack-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 324821054A for ; Wed, 8 May 2013 17:48:41 +0000 (UTC) Received: (qmail 41639 invoked by uid 500); 8 May 2013 17:48:28 -0000 Delivered-To: apmail-cloudstack-commits-archive@cloudstack.apache.org Received: (qmail 41493 invoked by uid 500); 8 May 2013 17:48:28 -0000 Mailing-List: contact commits-help@cloudstack.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@cloudstack.apache.org Delivered-To: mailing list commits@cloudstack.apache.org Received: (qmail 40609 invoked by uid 99); 8 May 2013 17:48:26 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 08 May 2013 17:48:26 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id 8AB3E889DA1; Wed, 8 May 2013 17:48:26 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: bfederle@apache.org To: commits@cloudstack.apache.org Date: Wed, 08 May 2013 17:48:52 -0000 Message-Id: In-Reply-To: <818559ec26454aa7b9803739214b1b22@git.apache.org> References: <818559ec26454aa7b9803739214b1b22@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [28/90] [abbrv] [partial] Moved most of the VOs and DAOs from server package into engine-schema as well http://git-wip-us.apache.org/repos/asf/cloudstack/blob/572e71e5/engine/schema/src/com/cloud/upgrade/dao/Upgrade222to224.java ---------------------------------------------------------------------- diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade222to224.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade222to224.java new file mode 100644 index 0000000..0841275 --- /dev/null +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade222to224.java @@ -0,0 +1,610 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.File; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.log4j.Logger; + +import com.cloud.capacity.Capacity; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; + +public class Upgrade222to224 implements DbUpgrade { + final static Logger s_logger = Logger.getLogger(Upgrade222to224.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] { "2.2.2", "2.2.3" }; + } + + @Override + public String getUpgradedVersion() { + return "2.2.4"; + } + + @Override + public boolean supportsRollingUpgrade() { + return true; + } + + @Override + public File[] getPrepareScripts() { + String script = Script.findScript("", "db/schema-222to224.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-222to224.sql"); + } + + return new File[] { new File(script) }; + } + + private void fixRelatedFkeyOnNetworksTable(Connection conn) throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`networks` DROP FOREIGN KEY `fk_networks__related`"); + try { + pstmt.executeUpdate(); + } catch (SQLException e) { + s_logger.debug("Ignore if the key is not there."); + } + pstmt.close(); + + pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`networks` ADD CONSTRAINT `fk_networks__related` FOREIGN KEY(`related`) REFERENCES `networks`(`id`) ON DELETE CASCADE"); + pstmt.executeUpdate(); + pstmt.close(); + } + + @Override + public void performDataMigration(Connection conn) { + try { + checkForDuplicatePublicNetworks(conn); + fixRelatedFkeyOnNetworksTable(conn); + updateClusterIdInOpHostCapacity(conn); + updateGuestOsType(conn); + updateNicsWithMode(conn); + updateUserStatsWithNetwork(conn); + dropIndexIfExists(conn); + fixBasicZoneNicCount(conn); + updateTotalCPUInOpHostCapacity(conn); + upgradeGuestOs(conn); + fixRecreatableVolumesProblem(conn); + updateFkeysAndIndexes(conn); + fixIPResouceCount(conn); + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to perform data migration", e); + } + } + + @Override + public File[] getCleanupScripts() { + String file = Script.findScript("", "db/schema-222to224-cleanup.sql"); + if (file == null) { + throw new CloudRuntimeException("Unable to find the upgrade script, schema-222to224-cleanup.sql"); + } + + return new File[] { new File(file) }; + } + + private void checkForDuplicatePublicNetworks(Connection conn) { + try { + // There should be one public network per zone + PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`data_center`"); + ResultSet zones = pstmt.executeQuery(); + ArrayList zonesWithDuplicateNetworks = new ArrayList(); + String errorMsg = "Found zones with duplicate public networks during 222 to 224 upgrade. Zone IDs: "; + long zoneId; + + while (zones.next()) { + zoneId = zones.getLong(1); + pstmt = conn.prepareStatement("SELECT count(*) FROM `cloud`.`networks` WHERE `networks`.`traffic_type`='Public' AND `data_center_id`=?"); + pstmt.setLong(1, zoneId); + ResultSet rs = pstmt.executeQuery(); + + if (rs.next()) { + long numNetworks = rs.getLong(1); + if (numNetworks > 1) { + zonesWithDuplicateNetworks.add(zoneId); + } + } + } + + if (zonesWithDuplicateNetworks.size() > 0) { + s_logger.warn(errorMsg + zonesWithDuplicateNetworks); + } + + } catch (SQLException e) { + s_logger.warn(e); + throw new CloudRuntimeException("Unable to check for duplicate public networks as part of 222 to 224 upgrade."); + } + } + + private void updateGuestOsType(Connection conn) { + try { + PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`guest_os` WHERE `display_name`='CentOS 5.3 (64-bit)'"); + ResultSet rs = pstmt.executeQuery(); + Long osId = null; + if (rs.next()) { + osId = rs.getLong(1); + } + + if (osId != null) { + pstmt = conn.prepareStatement("UPDATE `cloud`.`vm_template` SET `guest_os_id`=? WHERE id=2"); + pstmt.setLong(1, osId); + pstmt.executeUpdate(); + } + + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to update the guest os type for default template as a part of 222 to 224 upgrade", e); + } + } + + // fixes bug 9597 + private void fixRecreatableVolumesProblem(Connection conn) throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("UPDATE volumes as v SET recreatable=(SELECT recreatable FROM disk_offering d WHERE d.id = v.disk_offering_id) WHERE disk_offering_id != 0"); + pstmt.execute(); + pstmt.close(); + + pstmt = conn.prepareStatement("UPDATE volumes SET recreatable=0 WHERE disk_offering_id is NULL or disk_offering_id=0"); + pstmt.execute(); + pstmt.close(); + } + + private void updateClusterIdInOpHostCapacity(Connection conn) { + PreparedStatement pstmt = null; + ResultSet rs = null; + PreparedStatement pstmtUpdate = null; + try { + // Host and Primary storage capacity types + pstmt = conn.prepareStatement("SELECT host_id, capacity_type FROM op_host_capacity WHERE capacity_type IN (0,1,2,3)"); + rs = pstmt.executeQuery(); + while (rs.next()) { + long hostId = rs.getLong(1); + short capacityType = rs.getShort(2); + String updateSQLPrefix = "Update op_host_capacity set cluster_id = (select cluster_id from "; + String updateSQLSuffix = " where id = ? ) where host_id = ?"; + String tableName = "host"; + switch (capacityType) { + case Capacity.CAPACITY_TYPE_MEMORY: + case Capacity.CAPACITY_TYPE_CPU: + tableName = "host"; + break; + case Capacity.CAPACITY_TYPE_STORAGE: + case Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED: + tableName = "storage_pool"; + break; + } + pstmtUpdate = conn.prepareStatement(updateSQLPrefix + tableName + updateSQLSuffix); + pstmtUpdate.setLong(1, hostId); + pstmtUpdate.setLong(2, hostId); + pstmtUpdate.executeUpdate(); + pstmtUpdate.close(); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to update the cluster Ids in Op_Host_capacity table", e); + } finally { + if (pstmtUpdate != null) { + try { + pstmtUpdate.close(); + } catch (SQLException e) { + } + } + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + } + } + if (pstmt != null) { + try { + pstmt.close(); + } catch (SQLException e) { + } + } + + } + } + + private void updateNicsWithMode(Connection conn) { + try { + HashMap nicNetworkMaps = new HashMap(); + PreparedStatement pstmt = conn.prepareStatement("SELECT id, network_id FROM nics WHERE mode IS NULL"); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + nicNetworkMaps.put(rs.getLong(1), rs.getLong(2)); + } + + for (Long nic : nicNetworkMaps.keySet()) { + pstmt = conn.prepareStatement("SELECT mode FROM networks WHERE id=?"); + pstmt.setLong(1, nicNetworkMaps.get(nic)); + rs = pstmt.executeQuery(); + if (rs.next()) { + String mode = rs.getString(1); + pstmt = conn.prepareStatement("UPDATE nics SET mode=? where id=?"); + pstmt.setString(1, mode); + pstmt.setLong(2, nic); + pstmt.executeUpdate(); + } + } + rs.close(); + pstmt.close(); + + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to update the Mode field for nics as a part of 222 to 224 upgrade", e); + } + } + + private void updateUserStatsWithNetwork(Connection conn) { + try { + PreparedStatement pstmt = conn.prepareStatement("SELECT id, device_id FROM user_statistics WHERE network_id=0 or network_id is NULL and public_ip_address is NULL"); + ResultSet rs = pstmt.executeQuery(); + + while (rs.next()) { + Long id = rs.getLong(1); + Long instanceId = rs.getLong(2); + + if (instanceId != null && instanceId.longValue() != 0) { + // Check if domR is already expunged; we shouldn't update user stats in this case as nics are gone too + pstmt = conn.prepareStatement("SELECT * from vm_instance where id=? and removed is not null"); + pstmt.setLong(1, instanceId); + ResultSet rs1 = pstmt.executeQuery(); + + if (rs1.next()) { + s_logger.debug("Not updating user_statistics table for domR id=" + instanceId + " as domR is already expunged"); + continue; + } + + pstmt = conn.prepareStatement("SELECT network_id FROM nics WHERE instance_id=? AND mode='Dhcp'"); + pstmt.setLong(1, instanceId); + ResultSet rs2 = pstmt.executeQuery(); + + if (!rs2.next()) { + throw new CloudRuntimeException("Failed to update user_statistics table as a part of 222 to 224 upgrade: couldn't get network_id from nics table"); + } + + Long networkId = rs2.getLong(1); + + if (networkId != null) { + pstmt = conn.prepareStatement("UPDATE user_statistics SET network_id=?, device_type='DomainRouter' where id=?"); + pstmt.setLong(1, networkId); + pstmt.setLong(2, id); + pstmt.executeUpdate(); + } + } + } + + rs.close(); + pstmt.close(); + + s_logger.debug("Upgraded user_statistics with networkId for DomainRouter device type"); + + // update network_id information for ExternalFirewall and ExternalLoadBalancer device types + PreparedStatement pstmt1 = conn.prepareStatement("update user_statistics us, user_ip_address uip set us.network_id = uip.network_id where us.public_ip_address = uip.public_ip_address " + + "and us.device_type in ('ExternalFirewall' , 'ExternalLoadBalancer')"); + pstmt1.executeUpdate(); + pstmt1.close(); + + s_logger.debug("Upgraded user_statistics with networkId for ExternalFirewall and ExternalLoadBalancer device types"); + + s_logger.debug("Successfully update user_statistics table with network_ids as a part of 222 to 224 upgrade"); + + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to update user_statistics table with network_ids as a part of 222 to 224 upgrade", e); + } + } + + private void dropIndexIfExists(Connection conn) { + try { + PreparedStatement pstmt = conn.prepareStatement("SHOW INDEX FROM domain WHERE KEY_NAME = 'path'"); + ResultSet rs = pstmt.executeQuery(); + + if (rs.next()) { + pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`domain` DROP INDEX `path`"); + pstmt.executeUpdate(); + s_logger.debug("Unique key 'path' is removed successfully"); + } + + rs.close(); + pstmt.close(); + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to drop 'path' index for 'domain' table due to:", e); + } + } + + private void fixBasicZoneNicCount(Connection conn) { + try { + PreparedStatement pstmt = conn.prepareStatement("SELECT id from data_center where networktype='Basic'"); + ResultSet rs = pstmt.executeQuery(); + + while (rs.next()) { + Long zoneId = rs.getLong(1); + Long networkId = null; + Long vmCount = 0L; + s_logger.debug("Updating basic zone id=" + zoneId + " with correct nic count"); + + pstmt = conn.prepareStatement("SELECT id from networks where data_center_id=? AND guest_type='Direct'"); + pstmt.setLong(1, zoneId); + rs = pstmt.executeQuery(); + + if (rs.next()) { + networkId = rs.getLong(1); + } else { + continue; + } + + pstmt = conn.prepareStatement("SELECT count(*) from vm_instance where name like 'i-%' and (state='Running' or state='Starting' or state='Stopping')"); + rs = pstmt.executeQuery(); + + if (rs.next()) { + vmCount = rs.getLong(1); + } + + pstmt = conn.prepareStatement("UPDATE op_networks set nics_count=? where id=?"); + pstmt.setLong(1, vmCount); + pstmt.setLong(2, networkId); + pstmt.executeUpdate(); + + } + + s_logger.debug("Basic zones are updated with correct nic counts successfully"); + rs.close(); + pstmt.close(); + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to drop 'path' index for 'domain' table due to:", e); + } + } + + private void updateTotalCPUInOpHostCapacity(Connection conn) { + PreparedStatement pstmt = null; + ResultSet rs = null; + PreparedStatement pstmtUpdate = null; + try { + // Load all Routing hosts + s_logger.debug("Updating total CPU capacity entries in op_host_capacity"); + pstmt = conn.prepareStatement("SELECT id, cpus, speed FROM host WHERE type = 'Routing'"); + rs = pstmt.executeQuery(); + while (rs.next()) { + long hostId = rs.getLong(1); + int cpus = rs.getInt(2); + long speed = rs.getLong(3); + + long totalCapacity = cpus * speed; + + String updateSQL = "UPDATE op_host_capacity SET total_capacity = ? WHERE host_id = ? AND capacity_type = 1"; + pstmtUpdate = conn.prepareStatement(updateSQL); + pstmtUpdate.setLong(1, totalCapacity); + pstmtUpdate.setLong(2, hostId); + pstmtUpdate.executeUpdate(); + pstmtUpdate.close(); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to update the total host CPU capacity in Op_Host_capacity table", e); + } finally { + if (pstmtUpdate != null) { + try { + pstmtUpdate.close(); + } catch (SQLException e) { + } + } + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + } + } + if (pstmt != null) { + try { + pstmt.close(); + } catch (SQLException e) { + } + } + + } + } + + private void upgradeGuestOs(Connection conn) { + try { + PreparedStatement pstmt = conn.prepareStatement("SELECT * from guest_os WHERE id=138"); + ResultSet rs = pstmt.executeQuery(); + + if (!rs.next()) { + pstmt = conn.prepareStatement("INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (138, 7, 'None')"); + pstmt.executeUpdate(); + s_logger.debug("Inserted NONE category to guest_os table"); + } + + rs.close(); + pstmt.close(); + } catch (SQLException e) { + throw new CloudRuntimeException("Unalbe to insert NONE guest category to guest_os table due to:", e); + } + } + + private void updateFkeysAndIndexes(Connection conn) throws SQLException { + List keysToAdd = new ArrayList(); + List indexesToAdd = new ArrayList(); + List keysToDrop = new ArrayList(); + List indexesToDrop = new ArrayList(); + + // populate indexes/keys to drop + keysToDrop.add("ALTER TABLE `cloud`.`data_center` DROP FOREIGN KEY `fk_data_center__domain_id`"); + indexesToDrop.add("ALTER TABLE `cloud`.`data_center` DROP KEY `i_data_center__domain_id`"); + + keysToDrop.add("ALTER TABLE `cloud`.`vlan` DROP FOREIGN KEY `fk_vlan__data_center_id`"); + keysToDrop.add("ALTER TABLE `cloud`.`op_dc_ip_address_alloc` DROP FOREIGN KEY `fk_op_dc_ip_address_alloc__data_center_id`"); + + indexesToDrop.add("ALTER TABLE `cloud`.`networks` DROP FOREIGN KEY `fk_networks__network_offering_id`"); + indexesToDrop.add("ALTER TABLE `cloud`.`networks` DROP FOREIGN KEY `fk_networks__data_center_id`"); + indexesToDrop.add("ALTER TABLE `cloud`.`networks` DROP FOREIGN KEY `fk_networks__account_id`"); + indexesToDrop.add("ALTER TABLE `cloud`.`networks` DROP FOREIGN KEY `fk_networks__domain_id`"); + keysToDrop.add("ALTER TABLE `cloud`.`networks` DROP KEY `i_networks__removed`"); + + // populate indexes/keys to add + keysToAdd.add("ALTER TABLE `cloud`.`data_center` ADD CONSTRAINT `fk_data_center__domain_id` FOREIGN KEY (`domain_id`) REFERENCES `domain`(`id`)"); + indexesToAdd.add("ALTER TABLE `cloud`.`data_center` ADD INDEX `i_data_center__domain_id`(`domain_id`)"); + + keysToAdd.add("ALTER TABLE `cloud`.`vlan` ADD CONSTRAINT `fk_vlan__data_center_id` FOREIGN KEY `fk_vlan__data_center_id`(`data_center_id`) REFERENCES `data_center`(`id`)"); + keysToAdd + .add("ALTER TABLE `cloud`.`op_dc_ip_address_alloc` ADD CONSTRAINT `fk_op_dc_ip_address_alloc__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE"); + + keysToAdd.add("ALTER TABLE `cloud`.`networks` ADD INDEX `i_networks__removed` (`removed`)"); + + indexesToAdd.add("ALTER TABLE `cloud`.`networks` ADD CONSTRAINT `fk_networks__network_offering_id` FOREIGN KEY (`network_offering_id`) REFERENCES `network_offerings`(`id`)"); + indexesToAdd.add("ALTER TABLE `cloud`.`networks` ADD CONSTRAINT `fk_networks__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center` (`id`)"); + indexesToAdd.add("ALTER TABLE `cloud`.`networks` ADD CONSTRAINT `fk_networks__account_id` FOREIGN KEY (`account_id`) REFERENCES `account` (`id`)"); + indexesToAdd.add("ALTER TABLE `cloud`.`networks` ADD CONSTRAINT `fk_networks__domain_id` FOREIGN KEY (`domain_id`) REFERENCES `domain` (`id`)"); + + // drop keys + for (String key : keysToDrop) { + PreparedStatement pstmt = conn.prepareStatement(key); + try { + pstmt.executeUpdate(); + } catch (SQLException e) { + s_logger.debug("Ignore if the key is not there."); + } + pstmt.close(); + } + + // drop indexes + for (String index : indexesToDrop) { + PreparedStatement pstmt = conn.prepareStatement(index); + try { + pstmt.executeUpdate(); + } catch (SQLException e) { + s_logger.debug("Ignore if the index is not there."); + } + pstmt.close(); + } + + // update indexes + for (String index : indexesToAdd) { + PreparedStatement pstmt = conn.prepareStatement(index); + pstmt.executeUpdate(); + pstmt.close(); + } + + // update keys + for (String key : keysToAdd) { + PreparedStatement pstmt = conn.prepareStatement(key); + pstmt.executeUpdate(); + pstmt.close(); + } + } + + // In 2.2.x there was a bug when resource_count was incremented when Direct ip was allocated. Have to fix it during the + // upgrade + private void fixIPResouceCount(Connection conn) throws SQLException { + // First set all public_ip fields to be 0 + PreparedStatement pstmt = conn.prepareStatement("UPDATE resource_count set count=0 where type='public_ip'"); + pstmt.executeUpdate(); + + pstmt = conn.prepareStatement("SELECT id, account_id from resource_count where type='public_ip' and domain_id is NULL"); + ResultSet rs = pstmt.executeQuery(); + + while (rs.next()) { + // upgrade resource count for account + Long countId = rs.getLong(1); + Long accountId = rs.getLong(2); + pstmt = conn.prepareStatement("SELECT count(*) from user_ip_address where network_id is not null and account_id=?"); + pstmt.setLong(1, accountId); + ResultSet rs1 = pstmt.executeQuery(); + if (rs1.next()) { + Long ipCount = rs1.getLong(1); + if (ipCount.longValue() > 0) { + pstmt = conn.prepareStatement("UPDATE resource_count set count=? where id=?"); + pstmt.setLong(1, ipCount); + pstmt.setLong(2, countId); + pstmt.executeUpdate(); + } + rs1.close(); + } + } + rs.close(); + pstmt.close(); + + // upgrade resource count for domain + HashMap domainIpsCount = new HashMap(); + pstmt = conn.prepareStatement("SELECT account_id, count from resource_count where type='public_ip' and domain_id is NULL"); + rs = pstmt.executeQuery(); + while (rs.next()) { + Long accountId = rs.getLong(1); + Long count = rs.getLong(2); + pstmt = conn.prepareStatement("SELECT domain_id from account where id=?"); + pstmt.setLong(1, accountId); + ResultSet rs1 = pstmt.executeQuery(); + + if (!rs1.next()) { + throw new CloudRuntimeException("Unable to get domain information from account table as a part of resource_count table cleanup"); + } + + Long domainId = rs1.getLong(1); + + + if (!domainIpsCount.containsKey(domainId)) { + domainIpsCount.put(domainId, count); + } else { + long oldCount = domainIpsCount.get(domainId); + long newCount = oldCount + count; + domainIpsCount.put(domainId, newCount); + } + rs1.close(); + + Long parentId = 0L; + while (parentId != null) { + pstmt = conn.prepareStatement("SELECT parent from domain where id=?"); + pstmt.setLong(1, domainId); + ResultSet parentSet = pstmt.executeQuery(); + + if (parentSet.next()) { + parentId = parentSet.getLong(1); + if (parentId == null || parentId.longValue() == 0) { + parentId = null; + continue; + } + + if (!domainIpsCount.containsKey(parentId)) { + domainIpsCount.put(parentId, count); + } else { + long oldCount = domainIpsCount.get(parentId); + long newCount = oldCount + count; + domainIpsCount.put(parentId, newCount); + } + parentSet.close(); + domainId = parentId; + } + } + } + + rs.close(); + + for (Long domainId : domainIpsCount.keySet()) { + pstmt = conn.prepareStatement("UPDATE resource_count set count=? where domain_id=? and type='public_ip'"); + pstmt.setLong(1, domainIpsCount.get(domainId)); + pstmt.setLong(2, domainId); + pstmt.executeUpdate(); + } + + pstmt.close(); + + s_logger.debug("Resource limit is cleaned up successfully as a part of db upgrade"); + + } +} http://git-wip-us.apache.org/repos/asf/cloudstack/blob/572e71e5/engine/schema/src/com/cloud/upgrade/dao/Upgrade222to224Premium.java ---------------------------------------------------------------------- diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade222to224Premium.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade222to224Premium.java new file mode 100644 index 0000000..d78701e --- /dev/null +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade222to224Premium.java @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.File; +import java.sql.Connection; +import java.sql.PreparedStatement; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; + +public class Upgrade222to224Premium extends Upgrade222to224 { + final static Logger s_logger = Logger.getLogger(Upgrade222to224Premium.class); + + @Override + public File[] getPrepareScripts() { + File[] scripts = super.getPrepareScripts(); + File[] newScripts = new File[2]; + newScripts[0] = scripts[0]; + + String file = Script.findScript("","db/schema-222to224-premium.sql"); + if (file == null) { + throw new CloudRuntimeException("Unable to find the upgrade script, schema-222to224-premium.sql"); + } + + newScripts[1] = new File(file); + + return newScripts; + } + + @Override + public void performDataMigration(Connection conn) { + super.performDataMigration(conn); + updateUserStats(conn); + } + + @Override + public File[] getCleanupScripts() { + File[] scripts = super.getCleanupScripts(); + File[] newScripts = new File[1]; + // Change the array to 2 when you add in the scripts for premium. + newScripts[0] = scripts[0]; + return newScripts; + } + + private void updateUserStats(Connection conn) { + try { + + // update network_id information + PreparedStatement pstmt = conn.prepareStatement("update cloud_usage.user_statistics uus, cloud.user_statistics us set uus.network_id = " + + "us.network_id where uus.id = us.id"); + pstmt.executeUpdate(); + pstmt.close(); + + s_logger.debug("Upgraded cloud_usage user_statistics with networkId"); + + + // update network_id information in usage_network + PreparedStatement pstmt1 = conn.prepareStatement("update cloud_usage.usage_network un, cloud_usage.user_statistics us set un.network_id = " + + "us.network_id where us.account_id = un.account_id and us.data_center_id = un.zone_id and us.device_id = un.host_id"); + pstmt1.executeUpdate(); + pstmt1.close(); + + s_logger.debug("Upgraded cloud_usage usage_network with networkId"); + + + } catch (Exception e) { + throw new CloudRuntimeException("Failed to upgrade user stats: ", e); + } + } +} http://git-wip-us.apache.org/repos/asf/cloudstack/blob/572e71e5/engine/schema/src/com/cloud/upgrade/dao/Upgrade224to225.java ---------------------------------------------------------------------- diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade224to225.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade224to225.java new file mode 100644 index 0000000..b860124 --- /dev/null +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade224to225.java @@ -0,0 +1,351 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.File; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; + +public class Upgrade224to225 implements DbUpgrade { + final static Logger s_logger = Logger.getLogger(Upgrade224to225.class); + + @Override + public File[] getPrepareScripts() { + String file = Script.findScript("", "db/schema-224to225.sql"); + if (file == null) { + throw new CloudRuntimeException("Unable to find the upgrade script, schema-224to225.sql"); + } + + return new File[] { new File(file) }; + } + + @Override + public void performDataMigration(Connection conn) { + // create security groups for existing accounts if not present + createSecurityGroups(conn); + dropKeysIfExist(conn); + dropTableColumnsIfExist(conn); + addMissingKeys(conn); + addMissingOvsAccount(conn); + } + + @Override + public File[] getCleanupScripts() { + String file = Script.findScript("", "db/schema-224to225-cleanup.sql"); + if (file == null) { + throw new CloudRuntimeException("Unable to find the cleanup script, schema-224to225-cleanup.sql"); + } + + return new File[] { new File(file) }; + } + + @Override + public String[] getUpgradableVersionRange() { + return new String[] { "2.2.4", "2.2.4" }; + } + + @Override + public String getUpgradedVersion() { + return "2.2.5"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + private void createSecurityGroups(Connection conn) { + s_logger.debug("Creating missing default security group as a part of 224-225 upgrade"); + try { + List accounts = new ArrayList(); + PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM account WHERE removed IS NULL and id != 1"); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + accounts.add(rs.getLong(1)); + } + + for (Long accountId : accounts) { + // get default security group + pstmt = conn.prepareStatement("SELECT * FROM security_group WHERE name='default' and account_id=?"); + pstmt.setLong(1, accountId); + rs = pstmt.executeQuery(); + if (!rs.next()) { + s_logger.debug("Default security group is missing for account id=" + accountId + " so adding it"); + + // get accountName/domainId information + + pstmt = conn.prepareStatement("SELECT account_name, domain_id FROM account WHERE id=?"); + pstmt.setLong(1, accountId); + ResultSet rs1 = pstmt.executeQuery(); + if (!rs1.next()) { + throw new CloudRuntimeException("Unable to create default security group for account id=" + accountId + ": unable to get accountName/domainId info"); + } + String accountName = rs1.getString(1); + Long domainId = rs1.getLong(2); + + pstmt = conn + .prepareStatement("INSERT INTO `cloud`.`security_group` (name, description, account_name, account_id, domain_id) VALUES ('default', 'Default Security Group', ?, ?, ?)"); + pstmt.setString(1, accountName); + pstmt.setLong(2, accountId); + pstmt.setLong(3, domainId); + pstmt.executeUpdate(); + } + rs.close(); + pstmt.close(); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to create default security groups for existing accounts due to", e); + } + } + + private void dropTableColumnsIfExist(Connection conn) { + HashMap> tablesToModify = new HashMap>(); + + // account table + List columns = new ArrayList(); + columns.add("network_domain"); + tablesToModify.put("account", columns); + + // console proxy table + columns = new ArrayList(); + columns.add("gateway"); + columns.add("dns1"); + columns.add("dns2"); + columns.add("domain"); + columns.add("guest_mac_address"); + columns.add("guest_ip_address"); + columns.add("guest_netmask"); + columns.add("vlan_db_id"); + columns.add("vlan_id"); + columns.add("ram_size"); + tablesToModify.put("console_proxy", columns); + + // secondary storage table + columns = new ArrayList(); + columns.add("gateway"); + columns.add("dns1"); + columns.add("dns2"); + columns.add("domain"); + columns.add("guest_mac_address"); + columns.add("guest_ip_address"); + columns.add("guest_netmask"); + columns.add("vlan_db_id"); + columns.add("vlan_id"); + columns.add("ram_size"); + tablesToModify.put("secondary_storage_vm", columns); + + // disk offering table + columns = new ArrayList(); + columns.add("mirrored"); + tablesToModify.put("disk_offering", columns); + + // domain router table + columns = new ArrayList(); + columns.add("gateway"); + columns.add("ram_size"); + columns.add("dns1"); + columns.add("dns2"); + columns.add("domain"); + columns.add("guest_mac_address"); + columns.add("guest_dc_mac_address"); + columns.add("vnet"); + columns.add("dc_vlan"); + columns.add("vlan_db_id"); + columns.add("vlan_id"); + columns.add("dhcp_ip_address"); + tablesToModify.put("domain_router", columns); + + // volumes table + columns = new ArrayList(); + columns.add("mirror_state"); + columns.add("mirror_vol"); + columns.add("destroyed"); + tablesToModify.put("volumes", columns); + + // vm_instance table + columns = new ArrayList(); + columns.add("mirrored_vols"); + tablesToModify.put("vm_instance", columns); + + // user_vm table + columns = new ArrayList(); + columns.add("domain_router_id"); + columns.add("vnet"); + columns.add("dc_vlan"); + columns.add("external_ip_address"); + columns.add("external_mac_address"); + columns.add("external_vlan_db_id"); + tablesToModify.put("user_vm", columns); + + // service_offerings table + columns = new ArrayList(); + columns.add("guest_ip_type"); + tablesToModify.put("service_offering", columns); + + s_logger.debug("Dropping columns that don't exist in 2.2.5 version of the DB..."); + for (String tableName : tablesToModify.keySet()) { + DbUpgradeUtils.dropTableColumnsIfExist(conn, tableName, tablesToModify.get(tableName)); + } + } + + private void dropKeysIfExist(Connection conn) { + HashMap> foreignKeys = new HashMap>(); + HashMap> indexes = new HashMap>(); + + // console proxy table + List keys = new ArrayList(); + keys.add("fk_console_proxy__vlan_id"); + foreignKeys.put("console_proxy", keys); + + keys = new ArrayList(); + keys.add("i_console_proxy__vlan_id"); + indexes.put("console_proxy", keys); + + // mshost table + keys = new ArrayList(); + keys.add("msid_2"); + indexes.put("mshost", keys); + + // domain router table + keys = new ArrayList(); + keys.add("fk_domain_router__vlan_id"); + keys.add("fk_domain_route__id"); + foreignKeys.put("domain_router", keys); + + keys = new ArrayList(); + keys.add("i_domain_router__public_ip_address"); + keys.add("i_domain_router__vlan_id"); + indexes.put("domain_router", keys); + + // user_vm table + keys = new ArrayList(); + keys.add("i_user_vm__domain_router_id"); + keys.add("i_user_vm__external_ip_address"); + keys.add("i_user_vm__external_vlan_db_id"); + indexes.put("user_vm", keys); + + keys = new ArrayList(); + keys.add("fk_user_vm__domain_router_id"); + keys.add("fk_user_vm__external_vlan_db_id"); + keys.add("fk_user_vm__external_ip_address"); + foreignKeys.put("user_vm", keys); + + // user_vm_details table + keys = new ArrayList(); + keys.add("fk_user_vm_details__vm_id"); + foreignKeys.put("user_vm_details", keys); + indexes.put("user_vm_details", keys); + + // snapshots table + keys = new ArrayList(); + keys.add("id_2"); + indexes.put("snapshots", keys); + + // remote_access_vpn + keys = new ArrayList(); + keys.add("fk_remote_access_vpn__server_addr"); + foreignKeys.put("remote_access_vpn", keys); + + keys = new ArrayList(); + keys.add("fk_remote_access_vpn__server_addr_id"); + indexes.put("remote_access_vpn", keys); + + // drop all foreign keys first + s_logger.debug("Dropping keys that don't exist in 2.2.5 version of the DB..."); + for (String tableName : foreignKeys.keySet()) { + DbUpgradeUtils.dropKeysIfExist(conn, tableName, foreignKeys.get(tableName), true); + } + + // drop indexes now + for (String tableName : indexes.keySet()) { + DbUpgradeUtils.dropKeysIfExist(conn, tableName, indexes.get(tableName), false); + } + } + + private void addMissingKeys(Connection conn) { + PreparedStatement pstmt = null; + try { + s_logger.debug("Adding missing foreign keys"); + + HashMap keyToTableMap = new HashMap(); + keyToTableMap.put("fk_console_proxy__id", "console_proxy"); + keyToTableMap.put("fk_secondary_storage_vm__id", "secondary_storage_vm"); + keyToTableMap.put("fk_template_spool_ref__template_id", "template_spool_ref"); + keyToTableMap.put("fk_template_spool_ref__pool_id", "template_spool_ref"); + keyToTableMap.put("fk_user_vm_details__vm_id", "user_vm_details"); + keyToTableMap.put("fk_op_ha_work__instance_id", "op_ha_work"); + keyToTableMap.put("fk_op_ha_work__mgmt_server_id", "op_ha_work"); + keyToTableMap.put("fk_op_ha_work__host_id", "op_ha_work"); + + HashMap keyToStatementMap = new HashMap(); + keyToStatementMap.put("fk_console_proxy__id", "(`id`) REFERENCES `vm_instance` (`id`) ON DELETE CASCADE"); + keyToStatementMap.put("fk_secondary_storage_vm__id", "(`id`) REFERENCES `vm_instance` (`id`) ON DELETE CASCADE"); + keyToStatementMap.put("fk_template_spool_ref__template_id", "(`template_id`) REFERENCES `vm_template` (`id`)"); + keyToStatementMap.put("fk_template_spool_ref__pool_id", "(`pool_id`) REFERENCES `storage_pool` (`id`) ON DELETE CASCADE"); + keyToStatementMap.put("fk_user_vm_details__vm_id", "(`vm_id`) REFERENCES `user_vm` (`id`) ON DELETE CASCADE"); + keyToStatementMap.put("fk_op_ha_work__instance_id", "(`instance_id`) REFERENCES `vm_instance` (`id`) ON DELETE CASCADE"); + keyToStatementMap.put("fk_op_ha_work__mgmt_server_id", "(`mgmt_server_id`) REFERENCES `mshost`(`msid`)"); + keyToStatementMap.put("fk_op_ha_work__host_id", "(`host_id`) REFERENCES `host` (`id`)"); + + for (String key : keyToTableMap.keySet()) { + String tableName = keyToTableMap.get(key); + pstmt = conn + .prepareStatement("SELECT * FROM information_schema.table_constraints a JOIN information_schema.key_column_usage b ON a.table_schema = b.table_schema AND a.constraint_name = b.constraint_name WHERE a.table_schema=database() AND a.constraint_type='FOREIGN KEY' and a.constraint_name=?"); + pstmt.setString(1, key); + ResultSet rs = pstmt.executeQuery(); + if (rs.next()) { + continue; + } + + pstmt = conn.prepareStatement("ALTER TABLE " + tableName + " ADD CONSTRAINT " + key + " FOREIGN KEY " + keyToStatementMap.get(key)); + pstmt.executeUpdate(); + s_logger.debug("Added missing key " + key + " to table " + tableName); + rs.close(); + } + s_logger.debug("Missing keys were added successfully as a part of 224 to 225 upgrade"); + pstmt.close(); + } catch (SQLException e) { + s_logger.error("Unable to add missing foreign key; following statement was executed:" + pstmt); + throw new CloudRuntimeException("Unable to add missign keys due to exception", e); + } + } + + private void addMissingOvsAccount(Connection conn) { + try { + PreparedStatement pstmt = conn.prepareStatement("SELECT * from ovs_tunnel_account"); + ResultSet rs = pstmt.executeQuery(); + if (!rs.next()) { + s_logger.debug("Adding missing ovs tunnel account"); + pstmt = conn.prepareStatement("INSERT INTO `cloud`.`ovs_tunnel_account` (`from`, `to`, `account`, `key`, `port_name`, `state`) VALUES (0, 0, 0, 0, 'lock', 'SUCCESS')"); + pstmt.executeUpdate(); + } + } catch (SQLException e) { + s_logger.error("Unable to add missing ovs tunnel account due to ", e); + throw new CloudRuntimeException("Unable to add missign ovs tunnel account due to ", e); + } + } +} http://git-wip-us.apache.org/repos/asf/cloudstack/blob/572e71e5/engine/schema/src/com/cloud/upgrade/dao/Upgrade225to226.java ---------------------------------------------------------------------- diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade225to226.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade225to226.java new file mode 100644 index 0000000..f02f73f --- /dev/null +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade225to226.java @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.File; +import java.sql.Connection; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; + +public class Upgrade225to226 implements DbUpgrade { + final static Logger s_logger = Logger.getLogger(Upgrade225to226.class); + + @Override + public File[] getPrepareScripts() { + String file = Script.findScript("", "db/schema-225to226.sql"); + if (file == null) { + throw new CloudRuntimeException("Unable to find the upgrade script, schema-225to226.sql"); + } + + return new File[] { new File(file) }; + } + + @Override + public void performDataMigration(Connection conn) { + dropKeysIfExist(conn); + dropTableColumnsIfExist(conn); + } + + @Override + public File[] getCleanupScripts() { + return null; + } + + @Override + public String[] getUpgradableVersionRange() { + return new String[] { "2.2.5", "2.2.5" }; + } + + @Override + public String getUpgradedVersion() { + return "2.2.6"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + private void dropTableColumnsIfExist(Connection conn) { + HashMap> tablesToModify = new HashMap>(); + + // domain router table + List columns = new ArrayList(); + columns.add("account_id"); + columns.add("domain_id"); + tablesToModify.put("domain_router", columns); + + s_logger.debug("Dropping columns that don't exist in 2.2.6 version of the DB..."); + for (String tableName : tablesToModify.keySet()) { + DbUpgradeUtils.dropTableColumnsIfExist(conn, tableName, tablesToModify.get(tableName)); + } + } + + private void dropKeysIfExist(Connection conn) { + HashMap> foreignKeys = new HashMap>(); + HashMap> indexes = new HashMap>(); + + // domain router table + List keys = new ArrayList(); + keys.add("fk_domain_router__account_id"); + foreignKeys.put("domain_router", keys); + + keys = new ArrayList(); + keys.add("i_domain_router__account_id"); + indexes.put("domain_router", keys); + + // drop all foreign keys first + s_logger.debug("Dropping keys that don't exist in 2.2.6 version of the DB..."); + for (String tableName : foreignKeys.keySet()) { + DbUpgradeUtils.dropKeysIfExist(conn, tableName, foreignKeys.get(tableName), true); + } + + // drop indexes now + for (String tableName : indexes.keySet()) { + DbUpgradeUtils.dropKeysIfExist(conn, tableName, indexes.get(tableName), false); + } + } +} http://git-wip-us.apache.org/repos/asf/cloudstack/blob/572e71e5/engine/schema/src/com/cloud/upgrade/dao/Upgrade227to228.java ---------------------------------------------------------------------- diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade227to228.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade227to228.java new file mode 100644 index 0000000..8d1854f --- /dev/null +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade227to228.java @@ -0,0 +1,157 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.File; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; + +public class Upgrade227to228 implements DbUpgrade { + final static Logger s_logger = Logger.getLogger(Upgrade227to228.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] { "2.2.6", "2.2.7"}; + } + + @Override + public String getUpgradedVersion() { + return "2.2.8"; + } + + @Override + public boolean supportsRollingUpgrade() { + return true; + } + + @Override + public File[] getPrepareScripts() { + String script = Script.findScript("", "db/schema-227to228.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-227to228.sql"); + } + + return new File[] { new File(script) }; + } + + @Override + public void performDataMigration(Connection conn) { + try { + PreparedStatement pstmt = conn.prepareStatement("select id from data_center"); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + long dcId = rs.getLong(1); + pstmt = conn.prepareStatement("select id from host where data_center_id=? and type='SecondaryStorage'"); + pstmt.setLong(1, dcId); + ResultSet rs1 = pstmt.executeQuery(); + if (rs1.next()) { + long secHostId = rs1.getLong(1); + pstmt = conn.prepareStatement("update snapshots set sechost_id=? where data_center_id=?"); + pstmt.setLong(1, secHostId); + pstmt.setLong(2, dcId); + pstmt.executeUpdate(); + } + } + + pstmt = conn.prepareStatement("update disk_offering set disk_size = disk_size * 1024 * 1024 where disk_size <= 2 * 1024 * 1024 and disk_size != 0"); + pstmt.executeUpdate(); + + } catch (SQLException e) { + s_logger.error("Failed to DB migration for multiple secondary storages", e); + throw new CloudRuntimeException("Failed to DB migration for multiple secondary storages", e); + } + + updateDomainLevelNetworks(conn); + updateVolumeUsageRecords(conn); + } + + @Override + public File[] getCleanupScripts() { + return null; + } + + private void updateDomainLevelNetworks(Connection conn) { + s_logger.debug("Updating domain level specific networks..."); + try { + PreparedStatement pstmt = conn.prepareStatement("SELECT n.id FROM networks n, network_offerings o WHERE n.shared=1 AND o.system_only=0 AND o.id=n.network_offering_id"); + ResultSet rs = pstmt.executeQuery(); + ArrayList networks = new ArrayList(); + while (rs.next()) { + Object[] network = new Object[10]; + network[0] = rs.getLong(1); // networkId + networks.add(network); + } + rs.close(); + pstmt.close(); + + for (Object[] network : networks) { + Long networkId = (Long) network[0]; + pstmt = conn.prepareStatement("SELECT * from domain_network_ref where network_id=?"); + pstmt.setLong(1, networkId); + rs = pstmt.executeQuery(); + if (rs.next()) { + s_logger.debug("Setting network id=" + networkId + " as domain specific shared network"); + pstmt = conn.prepareStatement("UPDATE networks set is_domain_specific=1 where id=?"); + pstmt.setLong(1, networkId); + pstmt.executeUpdate(); + } + rs.close(); + pstmt.close(); + } + + s_logger.debug("Successfully updated domain level specific networks"); + } catch (SQLException e) { + s_logger.error("Failed to set domain specific shared networks due to ", e); + throw new CloudRuntimeException("Failed to set domain specific shared networks due to ", e); + } + } + + //this method inserts missing volume.delete events (events were missing when vm failed to create) + private void updateVolumeUsageRecords(Connection conn) { + try { + s_logger.debug("Inserting missing usage_event records for destroyed volumes..."); + PreparedStatement pstmt = conn.prepareStatement("select id, account_id, data_center_id, name from volumes where state='Destroy' and id in (select resource_id from usage_event where type='volume.create') and id not in (select resource_id from usage_event where type='volume.delete')"); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + long volumeId = rs.getLong(1); + long accountId = rs.getLong(2); + long zoneId = rs.getLong(3); + String volumeName = rs.getString(4); + + pstmt = conn.prepareStatement("insert into usage_event (type, account_id, created, zone_id, resource_name, resource_id) values ('VOLUME.DELETE', ?, now(), ?, ?, ?)"); + pstmt.setLong(1, accountId); + pstmt.setLong(2, zoneId); + pstmt.setString(3, volumeName); + pstmt.setLong(4, volumeId); + + pstmt.executeUpdate(); + } + s_logger.debug("Successfully inserted missing usage_event records for destroyed volumes"); + } catch (SQLException e) { + s_logger.error("Failed to insert missing delete usage records ", e); + throw new CloudRuntimeException("Failed to insert missing delete usage records ", e); + } + } +} http://git-wip-us.apache.org/repos/asf/cloudstack/blob/572e71e5/engine/schema/src/com/cloud/upgrade/dao/Upgrade227to228Premium.java ---------------------------------------------------------------------- diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade227to228Premium.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade227to228Premium.java new file mode 100644 index 0000000..5e5ecba --- /dev/null +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade227to228Premium.java @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.File; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; + +public class Upgrade227to228Premium extends Upgrade227to228 { + final static Logger s_logger = Logger.getLogger(Upgrade227to228Premium.class); + + @Override + public File[] getPrepareScripts() { + File[] scripts = super.getPrepareScripts(); + File[] newScripts = new File[2]; + newScripts[0] = scripts[0]; + + String file = Script.findScript("","db/schema-227to228-premium.sql"); + if (file == null) { + throw new CloudRuntimeException("Unable to find the upgrade script, schema-227to228-premium.sql"); + } + + newScripts[1] = new File(file); + + return newScripts; + } + + @Override + public void performDataMigration(Connection conn) { + addSourceIdColumn(conn); + addNetworkIdsToUserStats(conn); + super.performDataMigration(conn); + } + + @Override + public File[] getCleanupScripts() { + return null; + } + + private void addSourceIdColumn(Connection conn) { + boolean insertField = false; + try { + PreparedStatement pstmt; + try { + pstmt = conn.prepareStatement("SELECT source_id FROM `cloud_usage`.`usage_storage`"); + ResultSet rs = pstmt.executeQuery(); + + if (rs.next()) { + s_logger.info("The source id field already exist, not adding it"); + } + + } catch (Exception e) { + // if there is an exception, it means that field doesn't exist, and we can create it + insertField = true; + } + + if (insertField) { + s_logger.debug("Adding source_id to usage_storage..."); + pstmt = conn.prepareStatement("ALTER TABLE `cloud_usage`.`usage_storage` ADD COLUMN `source_id` bigint unsigned"); + pstmt.executeUpdate(); + s_logger.debug("Column source_id was added successfully to usage_storage table"); + pstmt.close(); + } + + + } catch (SQLException e) { + s_logger.error("Failed to add source_id to usage_storage due to ", e); + throw new CloudRuntimeException("Failed to add source_id to usage_storage due to ", e); + } + } + + private void addNetworkIdsToUserStats(Connection conn) { + s_logger.debug("Adding network IDs to user stats..."); + try { + String stmt = "SELECT DISTINCT public_ip_address FROM `cloud`.`user_statistics` WHERE public_ip_address IS NOT null"; + PreparedStatement pstmt = conn.prepareStatement(stmt); + ResultSet rs = pstmt.executeQuery(); + + while (rs.next()) { + String publicIpAddress = rs.getString(1); + stmt = "SELECT network_id FROM `cloud`.`user_ip_address` WHERE public_ip_address = ?"; + pstmt = conn.prepareStatement(stmt); + pstmt.setString(1, publicIpAddress); + ResultSet rs2 = pstmt.executeQuery(); + + if (rs2.next()) { + Long networkId = rs2.getLong(1); + String[] dbs = {"cloud", "cloud_usage"}; + for (String db : dbs) { + stmt = "UPDATE `" + db + "`.`user_statistics` SET network_id = ? WHERE public_ip_address = ?"; + pstmt = conn.prepareStatement(stmt); + pstmt.setLong(1, networkId); + pstmt.setString(2, publicIpAddress); + pstmt.executeUpdate(); + } + } + + rs2.close(); + } + + rs.close(); + pstmt.close(); + s_logger.debug("Successfully added network IDs to user stats."); + } catch (SQLException e) { + String errorMsg = "Failed to add network IDs to user stats."; + s_logger.error(errorMsg, e); + throw new CloudRuntimeException(errorMsg, e); + } + } + +} http://git-wip-us.apache.org/repos/asf/cloudstack/blob/572e71e5/engine/schema/src/com/cloud/upgrade/dao/Upgrade228to229.java ---------------------------------------------------------------------- diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade228to229.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade228to229.java new file mode 100644 index 0000000..de0f9e4 --- /dev/null +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade228to229.java @@ -0,0 +1,138 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.File; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; + +public class Upgrade228to229 implements DbUpgrade { + final static Logger s_logger = Logger.getLogger(Upgrade228to229.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] { "2.2.8", "2.2.8"}; + } + + @Override + public String getUpgradedVersion() { + return "2.2.9"; + } + + @Override + public boolean supportsRollingUpgrade() { + return true; + } + + @Override + public File[] getPrepareScripts() { + String script = Script.findScript("", "db/schema-228to229.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-228to229.sql"); + } + return new File[] { new File(script) }; + } + + @Override + public void performDataMigration(Connection conn) { + dropKeysIfExist(conn); + PreparedStatement pstmt; + try { + /*fk_cluster__data_center_id has been wrongly added in previous upgrade(not sure which one), 228to229 upgrade drops it and re-add again*/ + pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`cluster` ADD CONSTRAINT `fk_cluster__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `cloud`.`data_center`(`id`) ON DELETE CASCADE"); + pstmt.executeUpdate(); + + pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`snapshots` ADD INDEX `i_snapshots__removed`(`removed`)"); + pstmt.executeUpdate(); + + pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`network_tags` ADD CONSTRAINT `fk_network_tags__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks`(`id`) ON DELETE CASCADE"); + pstmt.executeUpdate(); + + pstmt.close(); + + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to execute cluster update", e); + } + } + + @Override + public File[] getCleanupScripts() { + return null; + } + + private void dropKeysIfExist(Connection conn) { + HashMap> indexes = new HashMap>(); + HashMap> foreignKeys = new HashMap>(); + + //indexes to drop + //for network_offering + List keys = new ArrayList(); + keys.add("name"); + indexes.put("network_offerings", keys); + + //for snapshot + keys = new ArrayList(); + keys.add("i_snapshots__removed"); + indexes.put("snapshots", keys); + + //for domain router + keys = new ArrayList(); + keys.add("i_domain_router__public_ip_address"); + indexes.put("domain_router", keys); + + //for user_ip_address + keys = new ArrayList(); + keys.add("i_user_ip_address__public_ip_address"); + indexes.put("user_ip_address", keys); + + + //foreign keys to drop - this key would be re-added later + keys = new ArrayList(); + keys.add("fk_cluster__data_center_id"); + foreignKeys.put("cluster", keys); + + keys = new ArrayList(); + keys.add("fk_domain_router__public_ip_address"); + foreignKeys.put("domain_router", keys); + + //drop foreign key from network tags table - it would be re-added later + keys = new ArrayList(); + keys.add("fk_network_tags__network_id"); + foreignKeys.put("network_tags", keys); + + // drop all foreign keys first + s_logger.debug("Dropping keys that don't exist in 2.2.6 version of the DB..."); + for (String tableName : foreignKeys.keySet()) { + DbUpgradeUtils.dropKeysIfExist(conn, tableName, foreignKeys.get(tableName), true); + } + + // drop indexes now + for (String tableName : indexes.keySet()) { + DbUpgradeUtils.dropKeysIfExist(conn, tableName, indexes.get(tableName), false); + } + } + +} http://git-wip-us.apache.org/repos/asf/cloudstack/blob/572e71e5/engine/schema/src/com/cloud/upgrade/dao/Upgrade229to2210.java ---------------------------------------------------------------------- diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade229to2210.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade229to2210.java new file mode 100644 index 0000000..25bd9ac --- /dev/null +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade229to2210.java @@ -0,0 +1,194 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.File; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.UUID; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; + +public class Upgrade229to2210 implements DbUpgrade { + final static Logger s_logger = Logger.getLogger(Upgrade229to2210.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] { "2.2.9", "2.2.9"}; + } + + @Override + public String getUpgradedVersion() { + return "2.2.10"; + } + + @Override + public boolean supportsRollingUpgrade() { + return true; + } + + @Override + public File[] getPrepareScripts() { + String script = Script.findScript("", "db/schema-229to2210.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-229to2210.sql"); + } + + return new File[] { new File(script) }; + } + + @Override + public void performDataMigration(Connection conn) { + updateFirewallRules(conn); + updateSnapshots(conn); + } + + @Override + public File[] getCleanupScripts() { + return null; + } + + private void updateSnapshots(Connection conn) { + PreparedStatement pstmt = null; + ResultSet rs = null; + long currentSnapshotId = 0; + try { + pstmt = conn.prepareStatement("select id, prev_snap_id from snapshots where sechost_id is NULL and prev_snap_id is not NULL and status=\"BackedUp\" and removed is NULL order by id"); + rs = pstmt.executeQuery(); + while (rs.next()) { + long id = rs.getLong(1); + long preSnapId = rs.getLong(2); + currentSnapshotId = id; + pstmt = conn.prepareStatement("select sechost_id from snapshots where id=? and sechost_id is not NULL"); + pstmt.setLong(1, preSnapId); + ResultSet sechost = pstmt.executeQuery(); + if (sechost.next()) { + long secHostId = sechost.getLong(1); + pstmt = conn.prepareStatement("update snapshots set sechost_id=? where id=?"); + pstmt.setLong(1, secHostId); + pstmt.setLong(2, id); + pstmt.executeUpdate(); + } + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to update snapshots id=" + currentSnapshotId, e); + } finally { + try { + if (rs != null) { + rs.close(); + } + + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } + + private void updateFirewallRules(Connection conn) { + PreparedStatement pstmt = null; + ResultSet rs = null; + long currentRuleId = 0; + try { + // Host and Primary storage capacity types + pstmt = conn.prepareStatement("select id, ip_address_id, start_port, end_port, protocol, account_id, domain_id, network_id from firewall_rules where state != 'Revoke'"); + rs = pstmt.executeQuery(); + while (rs.next()) { + long id = rs.getLong(1); + long ipId = rs.getLong(2); + int startPort = rs.getInt(3); + int endPort = rs.getInt(4); + String protocol = rs.getString(5); + long accountId = rs.getLong(6); + long domainId = rs.getLong(7); + long networkId = rs.getLong(8); + currentRuleId = id; + Long firewallRuleId = null; + + pstmt = conn.prepareStatement("INSERT INTO firewall_rules (ip_address_id, start_port, end_port, protocol, account_id, domain_id, network_id, purpose, state, xid, created, related) VALUES (?, ?, ?, ?, ?, ?, ?, 'Firewall', 'Active', ?, now(), ?)"); + + pstmt.setLong(1, ipId); + pstmt.setInt(2, startPort); + pstmt.setInt(3, endPort); + pstmt.setString(4, protocol); + pstmt.setLong(5, accountId); + pstmt.setLong(6, domainId); + pstmt.setLong(7, networkId); + pstmt.setString(8, UUID.randomUUID().toString()); + pstmt.setLong(9, id); + + s_logger.debug("Updating firewall rule with the statement " + pstmt); + pstmt.executeUpdate(); + + //get new FirewallRule update + pstmt = conn.prepareStatement("SELECT id from firewall_rules where purpose='Firewall' and start_port=? and end_port=? and protocol=? and ip_address_id=? and network_id=? and related=?"); + pstmt.setInt(1, startPort); + pstmt.setInt(2, endPort); + pstmt.setString(3, protocol); + pstmt.setLong(4, ipId); + pstmt.setLong(5, networkId); + pstmt.setLong(6, id); + + ResultSet rs1 = pstmt.executeQuery(); + + if (rs1.next()) { + firewallRuleId = rs1.getLong(1); + } else { + throw new CloudRuntimeException("Unable to find just inserted firewall rule for ptocol " + protocol + ", start_port " + startPort + " and end_port " + endPort + " and ip address id=" + ipId); + } + + pstmt = conn.prepareStatement("select id from firewall_rules_cidrs where firewall_rule_id=?"); + pstmt.setLong(1, id); + + ResultSet rs2 = pstmt.executeQuery(); + + if (rs2.next()) { + pstmt = conn.prepareStatement("update firewall_rules_cidrs set firewall_rule_id=? where firewall_rule_id=?"); + pstmt.setLong(1, firewallRuleId); + pstmt.setLong(2, id); + s_logger.debug("Updating existing cidrs for the rule id=" + id + " with the new Firewall rule id=" + firewallRuleId + " with statement" + pstmt); + pstmt.executeUpdate(); + } else { + pstmt = conn.prepareStatement("insert into firewall_rules_cidrs (firewall_rule_id,source_cidr) values (?, '0.0.0.0/0')"); + pstmt.setLong(1, firewallRuleId); + s_logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id=" + firewallRuleId + " with statement " + pstmt); + pstmt.executeUpdate(); + } + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to update firewall rule id=" + currentRuleId, e); + } finally { + try { + if (rs != null) { + rs.close(); + } + + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } + +} http://git-wip-us.apache.org/repos/asf/cloudstack/blob/572e71e5/engine/schema/src/com/cloud/upgrade/dao/Upgrade301to302.java ---------------------------------------------------------------------- diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade301to302.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade301to302.java new file mode 100644 index 0000000..cf5c119 --- /dev/null +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade301to302.java @@ -0,0 +1,229 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.io.File; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; + +public class Upgrade301to302 implements DbUpgrade { + final static Logger s_logger = Logger.getLogger(Upgrade301to302.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] { "3.0.1", "3.0.2" }; + } + + @Override + public String getUpgradedVersion() { + return "3.0.2"; + } + + @Override + public boolean supportsRollingUpgrade() { + return true; + } + + @Override + public File[] getPrepareScripts() { + String script = Script.findScript("", "db/schema-301to302.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-301to302.sql"); + } + + return new File[] { new File(script) }; + } + + private void dropKeysIfExists(Connection conn) { + HashMap> uniqueKeys = new HashMap>(); + List keys = new ArrayList(); + + keys.add("i_host__allocation_state"); + uniqueKeys.put("host", keys); + + s_logger.debug("Droping i_host__allocation_state key in host table"); + for (String tableName : uniqueKeys.keySet()) { + DbUpgradeUtils.dropKeysIfExist(conn, tableName, uniqueKeys.get(tableName), false); + } + } + + @Override + public void performDataMigration(Connection conn) { + dropKeysIfExists(conn); + updateSharedNetworks(conn); + fixLastHostIdKey(conn); + changeEngine(conn); + } + + @Override + public File[] getCleanupScripts() { + String script = Script.findScript("", "db/schema-301to302-cleanup.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-301to302-cleanup.sql"); + } + + return new File[] { new File(script) }; + } + + + protected void updateSharedNetworks(Connection conn) { + PreparedStatement pstmt = null; + ResultSet rs = null; + ResultSet rs1 = null; + + try { + pstmt = conn.prepareStatement("select n.id, map.id from `cloud`.`network_offerings` n, `cloud`.`ntwk_offering_service_map` map " + + "where n.id=map.network_offering_id and map.service='Lb' and map.provider='VirtualRouter';"); + rs = pstmt.executeQuery(); + while (rs.next()) { + long ntwkOffId = rs.getLong(1); + long mapId = rs.getLong(2); + + //check if the network offering has source nat service enabled + pstmt = conn.prepareStatement("select n.id from `cloud`.`network_offerings` n, `cloud`.`ntwk_offering_service_map`" + + " map where n.id=map.network_offering_id and map.service='SourceNat' AND n.id=?"); + pstmt.setLong(1, ntwkOffId); + rs1 = pstmt.executeQuery(); + if (rs1.next()) { + continue; + } + + //delete the service only when there are no lb rules for the network(s) using this network offering + pstmt = conn.prepareStatement("select * from `cloud`.`firewall_rules` f, `cloud`.`networks` n, `cloud`.`network_offerings`" + + " off where f.purpose='LB' and f.network_id=n.id and n.network_offering_id=off.id and off.id=?"); + pstmt.setLong(1, ntwkOffId); + rs1 = pstmt.executeQuery(); + if (rs1.next()) { + continue; + } + + //delete lb service for the network offering + pstmt = conn.prepareStatement("DELETE FROM `cloud`.`ntwk_offering_service_map` WHERE id=?"); + pstmt.setLong(1, mapId); + pstmt.executeUpdate(); + s_logger.debug("Deleted lb service for network offering id=" + ntwkOffId + " as it doesn't have source nat service enabled"); + + //delete lb service for the network + pstmt = conn.prepareStatement("SELECT map.id, n.id FROM `cloud`.`ntwk_service_map` map, networks n WHERE n.network_offering_id=? " + + "AND map.network_id=n.id AND map.service='Lb'"); + pstmt.setLong(1, ntwkOffId); + rs1 = pstmt.executeQuery(); + while (rs1.next()) { + mapId = rs1.getLong(1); + long ntwkId=rs1.getLong(2); + + pstmt = conn.prepareStatement("DELETE FROM `cloud`.`ntwk_service_map` WHERE id=?"); + pstmt.setLong(1, mapId); + pstmt.executeUpdate(); + s_logger.debug("Deleted lb service for network id=" + ntwkId + " as it doesn't have source nat service enabled"); + } + + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to update shared networks due to exception while executing query " + pstmt, e); + } finally { + try { + if (rs != null) { + rs.close(); + } + if (rs1 != null) { + rs1.close(); + } + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } + + + private void fixLastHostIdKey(Connection conn) { + //Drop i_usage_event__created key (if exists) and re-add it again + List keys = new ArrayList(); + + //Drop vmInstance keys (if exists) and insert one with correct name + keys = new ArrayList(); + + keys.add("fk_vm_instance__last_host_id"); + keys.add("i_vm_instance__last_host_id"); + + DbUpgradeUtils.dropKeysIfExist(conn, "cloud.vm_instance", keys, true); + DbUpgradeUtils.dropKeysIfExist(conn, "cloud.vm_instance", keys, false); + PreparedStatement pstmt = null; + try { + pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`vm_instance` ADD CONSTRAINT `fk_vm_instance__last_host_id` FOREIGN KEY (`last_host_id`) REFERENCES `host` (`id`)"); + pstmt.executeUpdate(); + pstmt.close(); + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to insert foreign key in vm_instance table ", e); + }finally { + try { + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } + + private void changeEngine(Connection conn) { + s_logger.debug("Fixing engine and row_format for op_lock and op_nwgrp_work tables"); + PreparedStatement pstmt = null; + try { + pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`op_lock` ENGINE=MEMORY, ROW_FORMAT = FIXED"); + pstmt.executeUpdate(); + pstmt.close(); + } catch (Exception e) { + s_logger.debug("Failed do execute the statement " + pstmt + ", moving on as it's not critical fix"); + } finally { + try { + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + + try { + pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`op_nwgrp_work` ENGINE=MEMORY, ROW_FORMAT = FIXED"); + pstmt.executeUpdate(); + pstmt.close(); + } catch (Exception e) { + s_logger.debug("Failed do execute the statement " + pstmt + ", moving on as it's not critical fix"); + } finally { + try { + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } + +}