cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From d...@apache.org
Subject git commit: updated refs/heads/master to 55a6df4
Date Wed, 18 Dec 2013 09:55:23 GMT
Updated Branches:
  refs/heads/master 961834661 -> 55a6df450


Resources leaks, refactoring and testing

Removing resource leaks from UsageSanityChecker and
refactoring it (encapsulation, removal of copy and paste, constants...)

Modularize static method for closing Statments in TransactionLegacy
and reusing this new method from other classes (Upgrade2214to30)

Create Unit and Integration Tests for UsageSanityChecker

Add DBUnit cases and integration profile for nitegration tests as
a base for future DB tests


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/55a6df45
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/55a6df45
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/55a6df45

Branch: refs/heads/master
Commit: 55a6df450153063a25f05d24f79587371f2082b1
Parents: 9618346
Author: Antonio Fornie <afornie@schubergphilis.com>
Authored: Mon Dec 16 16:43:34 2013 +0100
Committer: Daan Hoogland <dhoogland@schubergphilis.com>
Committed: Wed Dec 18 10:55:04 2013 +0100

----------------------------------------------------------------------
 .../com/cloud/upgrade/dao/Upgrade2214to30.java  |  40 +--
 .../com/cloud/utils/db/TransactionLegacy.java   |  24 +-
 usage/pom.xml                                   |  26 ++
 .../src/com/cloud/usage/UsageSanityChecker.java | 356 +++++++++++--------
 .../com/cloud/usage/UsageSanityCheckerIT.java   | 144 ++++++++
 .../com/cloud/usage/UsageSanityCheckerTest.java |  52 +++
 usage/test/resources/cloud1.xml                 |  15 +
 usage/test/resources/cloud2.xml                 |  15 +
 usage/test/resources/cloud3.xml                 |   4 +
 usage/test/resources/cloud_usage1.xml           |  17 +
 usage/test/resources/cloud_usage2.xml           |  34 ++
 usage/test/resources/cloud_usage3.xml           |   4 +
 12 files changed, 555 insertions(+), 176 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/55a6df45/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java
index 62fdcf1..58dd916 100644
--- a/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java
+++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java
@@ -34,6 +34,7 @@ import org.apache.log4j.Logger;
 import com.cloud.offering.NetworkOffering;
 import com.cloud.utils.crypt.DBEncryptionUtil;
 import com.cloud.utils.crypt.EncryptionSecretKeyChecker;
+import com.cloud.utils.db.TransactionLegacy;
 import com.cloud.utils.exception.CloudRuntimeException;
 import com.cloud.utils.script.Script;
 
@@ -110,19 +111,6 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
         return new File[] {new File(script)};
     }
 
-    protected void closePstmts(List<PreparedStatement> pstmt2Close){
-        for(PreparedStatement pstmt : pstmt2Close) {
-            try {
-                if (pstmt != null && !pstmt.isClosed()) {
-                    pstmt.close();
-                }
-            } catch (SQLException e) {
-                // It's not possible to recover from this and we need to continue closing
-                e.printStackTrace();
-            }
-        }
-    }
-
     private void setupPhysicalNetworks(Connection conn) {
         /**
          * for each zone:
@@ -374,7 +362,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
         } catch (SQLException e) {
             throw new CloudRuntimeException("Exception while adding PhysicalNetworks", e);
         } finally {
-            closePstmts(pstmt2Close);
+            TransactionLegacy.closePstmts(pstmt2Close);
         }
 
     }
@@ -454,7 +442,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
         } catch (UnsupportedEncodingException e) {
             throw new CloudRuntimeException("Unable encrypt host_details values ", e);
         } finally {
-            closePstmts(pstmt2Close);
+            TransactionLegacy.closePstmts(pstmt2Close);
         }
         s_logger.debug("Done encrypting host details");
     }
@@ -500,7 +488,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
         } catch (UnsupportedEncodingException e) {
             throw new CloudRuntimeException("Unable encrypt vm_instance vnc_password ", e);
         } finally {
-            closePstmts(pstmt2Close);
+            TransactionLegacy.closePstmts(pstmt2Close);
         }
         s_logger.debug("Done encrypting vm_instance vnc_password");
     }
@@ -533,7 +521,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
         } catch (UnsupportedEncodingException e) {
             throw new CloudRuntimeException("Unable encrypt user secret key ", e);
         } finally {
-            closePstmts(pstmt2Close);
+            TransactionLegacy.closePstmts(pstmt2Close);
         }
         s_logger.debug("Done encrypting user keys");
     }
@@ -566,7 +554,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
         } catch (UnsupportedEncodingException e) {
             throw new CloudRuntimeException("Unable encrypt vpn_users password ", e);
         } finally {
-            closePstmts(pstmt2Close);
+            TransactionLegacy.closePstmts(pstmt2Close);
         }
         s_logger.debug("Done encrypting vpn_users password");
     }
@@ -687,7 +675,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
         } catch (SQLException e) {
             throw new CloudRuntimeException("Unable to create service/provider map for network
offerings", e);
         } finally {
-            closePstmts(pstmt2Close);
+            TransactionLegacy.closePstmts(pstmt2Close);
         }
     }
 
@@ -725,7 +713,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
         } catch (SQLException e) {
             throw new CloudRuntimeException("Unable to update domain network ref", e);
         } finally {
-            closePstmts(pstmt2Close);
+            TransactionLegacy.closePstmts(pstmt2Close);
         }
     }
 
@@ -759,7 +747,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
         } catch (SQLException e) {
             throw new CloudRuntimeException("Unable to create service/provider map for networks",
e);
         } finally {
-            closePstmts(pstmt2Close);
+            TransactionLegacy.closePstmts(pstmt2Close);
         }
     }
 
@@ -873,7 +861,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
                 pstmt.close();
             } catch (SQLException e) {
             }
-            closePstmts(pstmt2Close);
+            TransactionLegacy.closePstmts(pstmt2Close);
         }
     }
 
@@ -904,7 +892,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
         } catch (SQLException e) {
             throw new CloudRuntimeException("Unable to update op_host_capacity table. ",
e);
         } finally {
-            closePstmts(pstmt2Close);
+            TransactionLegacy.closePstmts(pstmt2Close);
         }
     }
 
@@ -1011,7 +999,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
                 pstmt.close();
             } catch (SQLException e) {
             }
-            closePstmts(pstmt2Close);
+            TransactionLegacy.closePstmts(pstmt2Close);
         }
     }
 
@@ -1084,7 +1072,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
                 zoneIds.add(rs.getLong(1));
             }
         } catch (SQLException e) {
-            closePstmts(pstmt2Close);
+            TransactionLegacy.closePstmts(pstmt2Close);
             throw new CloudRuntimeException("Unable to switch networks to the new network
offering", e);
         }
 
@@ -1167,7 +1155,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade
{
                     pstmt.close();
                 } catch (SQLException e) {
                 }
-                closePstmts(pstmt2Close);
+                TransactionLegacy.closePstmts(pstmt2Close);
             }
         }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/55a6df45/framework/db/src/com/cloud/utils/db/TransactionLegacy.java
----------------------------------------------------------------------
diff --git a/framework/db/src/com/cloud/utils/db/TransactionLegacy.java b/framework/db/src/com/cloud/utils/db/TransactionLegacy.java
index b36c027..ac0ea21 100755
--- a/framework/db/src/com/cloud/utils/db/TransactionLegacy.java
+++ b/framework/db/src/com/cloud/utils/db/TransactionLegacy.java
@@ -769,7 +769,7 @@ public class TransactionLegacy {
 
         try {
             // we should only close db connection when it is not user managed
-            if (this._dbId != CONNECTED_DB) {
+            if (_dbId != CONNECTED_DB) {
                 if (s_connLogger.isTraceEnabled()) {
                     s_connLogger.trace("Closing DB connection: dbconn" + System.identityHashCode(_conn));
                 }
@@ -1212,6 +1212,26 @@ public class TransactionLegacy {
      * @param conn
      */
     protected void setConnection(Connection conn) {
-        this._conn = conn;
+        _conn = conn;
     }
+
+    /**
+     * Receives a list of {@link PreparedStatement} and quietly closes all of them, which
+     * triggers also closing their dependent objects, like a {@link ResultSet}
+     *
+     * @param pstmt2Close
+     */
+    public static void closePstmts(List<PreparedStatement> pstmt2Close) {
+        for (PreparedStatement pstmt : pstmt2Close) {
+            try {
+                if (pstmt != null && !pstmt.isClosed()) {
+                    pstmt.close();
+                }
+            } catch (SQLException e) {
+                // It's not possible to recover from this and we need to continue closing
+                e.printStackTrace();
+            }
+        }
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/55a6df45/usage/pom.xml
----------------------------------------------------------------------
diff --git a/usage/pom.xml b/usage/pom.xml
index a0055cc..ecd765e 100644
--- a/usage/pom.xml
+++ b/usage/pom.xml
@@ -44,6 +44,13 @@
       <groupId>commons-daemon</groupId>
       <artifactId>commons-daemon</artifactId>
     </dependency>
+
+    <dependency>
+      <groupId>org.dbunit</groupId>
+      <artifactId>dbunit</artifactId>
+      <version>2.4.9</version>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
   <build>
     <resources>
@@ -213,5 +220,24 @@
         </plugins>
       </build>
     </profile>
+    <profile>
+      <id>integration</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-failsafe-plugin</artifactId>
+            <executions>
+              <execution>
+                <goals>
+                  <goal>integration-test</goal>
+                  <goal>verify</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
   </profiles>
 </project>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/55a6df45/usage/src/com/cloud/usage/UsageSanityChecker.java
----------------------------------------------------------------------
diff --git a/usage/src/com/cloud/usage/UsageSanityChecker.java b/usage/src/com/cloud/usage/UsageSanityChecker.java
index 34032bc..5e6123b 100644
--- a/usage/src/com/cloud/usage/UsageSanityChecker.java
+++ b/usage/src/com/cloud/usage/UsageSanityChecker.java
@@ -25,191 +25,182 @@ import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.log4j.Logger;
 
 import com.cloud.utils.db.TransactionLegacy;
 
+/**
+ * This class must not be used concurrently because its state changes often during
+ * execution in a non synchronized way
+ */
 public class UsageSanityChecker {
 
-    private StringBuffer errors;
-    private String lastCheckId = "";
-    private final String lastCheckFile = "/usr/local/libexec/sanity-check-last-id";
+    protected static final Logger s_logger = Logger.getLogger(UsageSanityChecker.class);
+    protected static final int DEFAULT_AGGREGATION_RANGE = 1440;
+    protected StringBuilder errors;
+    protected List<CheckCase> checkCases;
+    protected String lastCheckFile = "/usr/local/libexec/sanity-check-last-id";
+    protected String lastCheckId = "";
+    protected int lastId = -1;
+    protected int maxId = -1;
+    protected Connection conn;
 
-    private boolean checkMaxUsage(Connection conn) throws SQLException {
+    protected void reset() {
+        errors = new StringBuilder();
+        checkCases = new ArrayList<CheckCase>();
+    }
 
-        PreparedStatement pstmt = conn.prepareStatement("SELECT value FROM `cloud`.`configuration`
where name = 'usage.stats.job.aggregation.range'");
-        ResultSet rs = pstmt.executeQuery();
+    protected boolean checkItemCountByPstmt() throws SQLException {
+        boolean checkOk = true;
 
-        int aggregationRange = 1440;
-        if (rs.next()) {
-            aggregationRange = rs.getInt(1);
-        } else {
-            System.out.println("Failed to retrieve aggregation range. Using default : " +
aggregationRange);
+        for(CheckCase check : checkCases) {
+            checkOk &= checkItemCountByPstmt(check);
         }
 
-        int aggregationHours = aggregationRange / 60;
+        return checkOk;
+    }
+
+    protected boolean checkItemCountByPstmt(CheckCase checkCase) throws SQLException {
+        List<PreparedStatement> pstmt2Close = new ArrayList<PreparedStatement>();
+        boolean checkOk = true;
 
         /*
-         * Check for usage records with raw_usage > aggregationHours
+         * Check for item usage records which are created after it is removed
          */
-        pstmt =
-            conn.prepareStatement("SELECT count(*) FROM `cloud_usage`.`cloud_usage` cu where
usage_type not in (4,5) and raw_usage > " + aggregationHours + lastCheckId);
-        rs = pstmt.executeQuery();
-        if (rs.next() && (rs.getInt(1) > 0)) {
-            errors.append("Error: Found " + rs.getInt(1) + " usage records with raw_usage
> " + aggregationHours);
-            errors.append("\n");
-            return false;
+        PreparedStatement pstmt;
+        try {
+            pstmt = conn.prepareStatement(checkCase.sqlTemplate);
+            if(checkCase.checkId) {
+                pstmt.setInt(1, lastId);
+                pstmt.setInt(2, maxId);
+            }
+
+            pstmt2Close.add(pstmt);
+            ResultSet rs = pstmt.executeQuery();
+            if (rs.next() && (rs.getInt(1) > 0)) {
+                errors.append(String.format("Error: Found %s %s\n", rs.getInt(1), checkCase.itemName));
+                checkOk = false;
+            }
+        } catch (SQLException e) {
+            throw e;
+        } finally {
+            TransactionLegacy.closePstmts(pstmt2Close);
         }
-        return true;
+        return checkOk;
     }
 
-    private boolean checkVmUsage(Connection conn) throws SQLException {
-        boolean success = true;
-        /*
-         * Check for Vm usage records which are created after the vm is destroyed
-         */
-        PreparedStatement pstmt =
-            conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner
join cloud.vm_instance vm where vm.type = 'User' " +
-                "and cu.usage_type in (1 , 2) and cu.usage_id = vm.id and cu.start_date >
vm.removed" + lastCheckId);
-        ResultSet rs = pstmt.executeQuery();
-        if (rs.next() && (rs.getInt(1) > 0)) {
-            errors.append("Error: Found " + rs.getInt(1) + " Vm usage records which are created
after Vm is destroyed");
-            errors.append("\n");
-            success = false;
-        }
+    protected void checkMaxUsage() throws SQLException {
+        int aggregationRange = DEFAULT_AGGREGATION_RANGE;
+        List<PreparedStatement> pstmt2Close = new ArrayList<PreparedStatement>();
+        try {
+            PreparedStatement pstmt = conn.prepareStatement(
+                    "SELECT value FROM `cloud`.`configuration` where name = 'usage.stats.job.aggregation.range'");
+            pstmt2Close.add(pstmt);
+            ResultSet rs = pstmt.executeQuery();
 
-        /*
-         * Check for Vms which have multiple running vm records in helper table
-         */
-        pstmt =
-            conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance
where usage_type =1 "
-                + "and end_date is null group by vm_instance_id having count(vm_instance_id)
> 1) c ;");
-        rs = pstmt.executeQuery();
-        if (rs.next() && (rs.getInt(1) > 0)) {
-            errors.append("Error: Found " + rs.getInt(1) + " duplicate running Vm entries
in vm usage helper table");
-            errors.append("\n");
-            success = false;
+            if (rs.next()) {
+                aggregationRange = rs.getInt(1);
+            } else {
+                s_logger.debug("Failed to retrieve aggregation range. Using default : " +
aggregationRange);
+            }
+        } catch (SQLException e) {
+            throw e;
+        } finally {
+            TransactionLegacy.closePstmts(pstmt2Close);
         }
 
-        /*
-         * Check for Vms which have multiple allocated vm records in helper table
-         */
-        pstmt =
-            conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance
where usage_type =2 "
-                + "and end_date is null group by vm_instance_id having count(vm_instance_id)
> 1) c ;");
-        rs = pstmt.executeQuery();
-        if (rs.next() && (rs.getInt(1) > 0)) {
-            errors.append("Error: Found " + rs.getInt(1) + " duplicate allocated Vm entries
in vm usage helper table");
-            errors.append("\n");
-            success = false;
-        }
+        int aggregationHours = aggregationRange / 60;
 
-        /*
-         * Check for Vms which have running vm entry without allocated vm  entry in helper
table
-         */
-        pstmt =
-            conn.prepareStatement("select count(vm_instance_id) from cloud_usage.usage_vm_instance
o where o.end_date is null and o.usage_type=1 and not exists "
-                + "(select 1 from cloud_usage.usage_vm_instance i where i.vm_instance_id=o.vm_instance_id
and usage_type=2 and i.end_date is null)");
-        rs = pstmt.executeQuery();
-        if (rs.next() && (rs.getInt(1) > 0)) {
-            errors.append("Error: Found " + rs.getInt(1) + " running Vm entries without corresponding
allocated entries in vm usage helper table");
-            errors.append("\n");
-            success = false;
-        }
-        return success;
+        addCheckCase("SELECT count(*) FROM `cloud_usage`.`cloud_usage` cu where usage_type
not in (4,5) and raw_usage > "
+                + aggregationHours,
+                "usage records with raw_usage > " + aggregationHours,
+                lastCheckId);
     }
 
-    private boolean checkVolumeUsage(Connection conn) throws SQLException {
-        boolean success = true;
-        /*
-         * Check for Volume usage records which are created after the volume is removed
-         */
-        PreparedStatement pstmt =
-            conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner
join cloud.volumes v " +
-                "where cu.usage_type = 6 and cu.usage_id = v.id and cu.start_date > v.removed"
+ lastCheckId);
-        ResultSet rs = pstmt.executeQuery();
-        if (rs.next() && (rs.getInt(1) > 0)) {
-            errors.append("Error: Found " + rs.getInt(1) + " volume usage records which are
created after volume is removed");
-            errors.append("\n");
-            success = false;
-        }
+    protected void checkVmUsage() {
+        addCheckCase("select count(*) from cloud_usage.cloud_usage cu inner join cloud.vm_instance
vm "
+                + "where vm.type = 'User' and cu.usage_type in (1 , 2) "
+                + "and cu.usage_id = vm.id and cu.start_date > vm.removed ",
+                "Vm usage records which are created after Vm is destroyed",
+                lastCheckId);
 
-        /*
-         * Check for duplicate records in volume usage helper table
-         */
-        pstmt =
-            conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_volume
"
-                + "where deleted is null group by id having count(id) > 1) c;");
-        rs = pstmt.executeQuery();
-        if (rs.next() && (rs.getInt(1) > 0)) {
-            errors.append("Error: Found " + rs.getInt(1) + " duplicate records is volume
usage helper table");
-            errors.append("\n");
-            success = false;
-        }
-        return success;
+        addCheckCase("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance
"
+                + "where usage_type =1 and end_date is null group by vm_instance_id "
+                + "having count(vm_instance_id) > 1) c ;",
+                "duplicate running Vm entries in vm usage helper table");
+
+        addCheckCase("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance
"
+                + "where usage_type =2 and end_date is null group by vm_instance_id "
+                + "having count(vm_instance_id) > 1) c ;",
+                "duplicate allocated Vm entries in vm usage helper table");
+
+        addCheckCase("select count(vm_instance_id) from cloud_usage.usage_vm_instance o "
+                + "where o.end_date is null and o.usage_type=1 and not exists "
+                + "(select 1 from cloud_usage.usage_vm_instance i where "
+                + "i.vm_instance_id=o.vm_instance_id and usage_type=2 and i.end_date is null)",
+                "running Vm entries without corresponding allocated entries in vm usage helper
table");
     }
 
-    private boolean checkTemplateISOUsage(Connection conn) throws SQLException {
-        /*
-         * Check for Template/ISO usage records which are created after it is removed
-         */
-        PreparedStatement pstmt =
-            conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner
join cloud.template_zone_ref tzr " +
-                "where cu.usage_id = tzr.template_id and cu.zone_id = tzr.zone_id and cu.usage_type
in (7,8) and cu.start_date > tzr.removed" + lastCheckId);
-        ResultSet rs = pstmt.executeQuery();
-        if (rs.next() && (rs.getInt(1) > 0)) {
-            errors.append("Error: Found " + rs.getInt(1) + " template/ISO usage records which
are created after it is removed");
-            errors.append("\n");
-            return false;
-        }
-        return true;
+    protected void checkVolumeUsage() {
+        addCheckCase("select count(*) from cloud_usage.cloud_usage cu inner join cloud.volumes
v where "
+                + "cu.usage_type = 6 and cu.usage_id = v.id and cu.start_date > v.removed
",
+                "volume usage records which are created after volume is removed",
+                lastCheckId);
+
+        addCheckCase("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_volume
"
+                + "where deleted is null group by id having count(id) > 1) c;",
+                "duplicate records in volume usage helper table");
     }
 
-    private boolean checkSnapshotUsage(Connection conn) throws SQLException {
-        /*
-         * Check for snapshot usage records which are created after snapshot is removed
-         */
-        PreparedStatement pstmt =
-            conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner
join cloud.snapshots s " +
-                "where cu.usage_id = s.id and cu.usage_type = 9 and cu.start_date > s.removed"
+ lastCheckId);
-        ResultSet rs = pstmt.executeQuery();
-        if (rs.next() && (rs.getInt(1) > 0)) {
-            errors.append("Error: Found " + rs.getInt(1) + " snapshot usage records which
are created after snapshot is removed");
-            errors.append("\n");
-            return false;
-        }
-        return true;
+    protected void checkTemplateISOUsage() {
+        addCheckCase("select count(*) from cloud_usage.cloud_usage cu inner join cloud.template_zone_ref
tzr where "
+                + "cu.usage_id = tzr.template_id and cu.zone_id = tzr.zone_id and cu.usage_type
in (7,8) and cu.start_date > tzr.removed ",
+                "template/ISO usage records which are created after it is removed",
+                lastCheckId);
     }
 
-    public String runSanityCheck() throws SQLException {
+    protected void checkSnapshotUsage() {
+        addCheckCase("select count(*) from cloud_usage.cloud_usage cu inner join cloud.snapshots
s where "
+                + "cu.usage_id = s.id and cu.usage_type = 9 and cu.start_date > s.removed
",
+                "snapshot usage records which are created after it is removed",
+                lastCheckId);
+    }
+
+    protected void readLastCheckId(){
+        BufferedReader reader = null;
         try {
-            BufferedReader reader = new BufferedReader(new FileReader(lastCheckFile));
-            String last_id = null;
-            if ((reader != null) && (last_id = reader.readLine()) != null) {
-                int lastId = Integer.parseInt(last_id);
-                if (lastId > 0) {
-                    lastCheckId = " and cu.id > " + last_id;
-                }
+            reader = new BufferedReader(new FileReader(lastCheckFile));
+            String lastIdText = null;
+            lastId = -1;
+            if ((reader != null) && (lastIdText = reader.readLine()) != null) {
+                lastId = Integer.parseInt(lastIdText);
+            }
+        } catch (IOException e) {
+            s_logger.error(e);
+        } finally {
+            try {
+                reader.close();
+            } catch (IOException e) {
+                s_logger.error(e);
             }
-            reader.close();
-        } catch (Exception e) {
-            // Error while reading last check id
         }
+    }
 
-        Connection conn = TransactionLegacy.getStandaloneConnection();
-        int maxId = 0;
+    protected void readMaxId() throws SQLException {
         PreparedStatement pstmt = conn.prepareStatement("select max(id) from cloud_usage.cloud_usage");
         ResultSet rs = pstmt.executeQuery();
+        maxId = -1;
         if (rs.next() && (rs.getInt(1) > 0)) {
             maxId = rs.getInt(1);
-            lastCheckId += " and cu.id <= " + maxId;
+            lastCheckId += " and cu.id <= ?";
         }
-        errors = new StringBuffer();
-        checkMaxUsage(conn);
-        checkVmUsage(conn);
-        checkVolumeUsage(conn);
-        checkTemplateISOUsage(conn);
-        checkSnapshotUsage(conn);
-        FileWriter fstream;
+    }
+
+    protected void updateNewMaxId() {
+        FileWriter fstream = null;
         try {
             fstream = new FileWriter(lastCheckFile);
             BufferedWriter out = new BufferedWriter(fstream);
@@ -217,20 +208,89 @@ public class UsageSanityChecker {
             out.close();
         } catch (IOException e) {
             // Error while writing last check id
+        } finally {
+            if (fstream != null) {
+                try {
+                    fstream.close();
+                } catch (IOException e) {
+                    s_logger.error(e);
+                }
+            }
         }
+    }
+
+    public String runSanityCheck() throws SQLException {
+
+        readLastCheckId();
+        if (lastId > 0) {
+            lastCheckId = " and cu.id > ?";
+        }
+
+        conn = getConnection();
+        readMaxId();
+
+        reset();
+
+        checkMaxUsage();
+        checkVmUsage();
+        checkVolumeUsage();
+        checkTemplateISOUsage();
+        checkSnapshotUsage();
+
+        checkItemCountByPstmt();
+
         return errors.toString();
     }
 
+    /**
+     * Local acquisition of {@link Connection} to remove static cling
+     * @return
+     */
+    protected Connection getConnection() {
+        return TransactionLegacy.getStandaloneConnection();
+    }
+
     public static void main(String args[]) {
         UsageSanityChecker usc = new UsageSanityChecker();
         String sanityErrors;
         try {
             sanityErrors = usc.runSanityCheck();
             if (sanityErrors.length() > 0) {
-                System.out.println(sanityErrors.toString());
+                s_logger.error(sanityErrors.toString());
             }
         } catch (SQLException e) {
             e.printStackTrace();
         }
     }
+
+    protected void addCheckCase(String sqlTemplate, String itemName, String lastCheckId)
{
+        checkCases.add(new CheckCase(sqlTemplate, itemName, lastCheckId));
+    }
+
+    protected void addCheckCase(String sqlTemplate, String itemName) {
+        checkCases.add(new CheckCase(sqlTemplate, itemName));
+    }
 }
+
+
+/**
+ * Just an abstraction of the kind of check to repeat across these cases
+ * encapsulating what change for each specific case
+ */
+class CheckCase {
+    public String sqlTemplate;
+    public String itemName;
+    public boolean checkId = false;
+
+    public CheckCase(String sqlTemplate, String itemName, String lastCheckId) {
+        checkId = true;
+        this.sqlTemplate = sqlTemplate + lastCheckId;
+        this.itemName = itemName;
+    }
+
+    public CheckCase(String sqlTemplate, String itemName) {
+        checkId = false;
+        this.sqlTemplate = sqlTemplate;
+        this.itemName = itemName;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/55a6df45/usage/test/com/cloud/usage/UsageSanityCheckerIT.java
----------------------------------------------------------------------
diff --git a/usage/test/com/cloud/usage/UsageSanityCheckerIT.java b/usage/test/com/cloud/usage/UsageSanityCheckerIT.java
new file mode 100644
index 0000000..412bebc
--- /dev/null
+++ b/usage/test/com/cloud/usage/UsageSanityCheckerIT.java
@@ -0,0 +1,144 @@
+package com.cloud.usage;
+
+import org.dbunit.DatabaseUnitException;
+import org.dbunit.dataset.DataSetException;
+import org.dbunit.dataset.IDataSet;
+import org.dbunit.dataset.xml.FlatXmlDataSetBuilder;
+import org.dbunit.ext.mysql.MySqlConnection;
+import org.dbunit.operation.DatabaseOperation;
+
+import static org.junit.Assert.assertEquals;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.mockito.Mockito;
+
+import com.cloud.utils.PropertiesUtil;
+
+import java.io.FileNotFoundException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Properties;
+
+@RunWith(Parameterized.class)
+public class UsageSanityCheckerIT{
+
+    protected Connection cloudConn;
+
+    protected Connection usageConn;
+
+    protected MySqlConnection dbuUsageConn;
+
+    protected MySqlConnection dbuCloudConn;
+
+    protected Properties properties = new Properties();
+
+    protected IDataSet cloudDataSet;
+
+    protected IDataSet usageDataSet;
+
+    protected String cloudDbuFileName;
+
+    protected String usageDbuFileName;
+
+    protected String expectedErrors;
+
+    protected static final String EXPECTED_ERRORS_1 = "Error: Found 2 usage records with
raw_usage > 10\n" +
+            "Error: Found 1 Vm usage records which are created after Vm is destroyed\n" +
+            "Error: Found 2 duplicate allocated Vm entries in vm usage helper table\n" +
+            "Error: Found 1 running Vm entries without corresponding allocated entries in
vm usage helper table\n" +
+            "Error: Found 1 volume usage records which are created after volume is removed\n"
+
+            "Error: Found 1 template/ISO usage records which are created after it is removed\n"
+
+            "Error: Found 1 snapshot usage records which are created after it is removed\n";
+
+    protected static final String EXPECTED_ERRORS_2 = "Error: Found 3 usage records with
raw_usage > 10\n" +
+            "Error: Found 1 Vm usage records which are created after Vm is destroyed\n" +
+            "Error: Found 8 duplicate running Vm entries in vm usage helper table\n" +
+            "Error: Found 4 duplicate allocated Vm entries in vm usage helper table\n" +
+            "Error: Found 4 running Vm entries without corresponding allocated entries in
vm usage helper table\n" +
+            "Error: Found 2 volume usage records which are created after volume is removed\n"
+
+            "Error: Found 6 duplicate records in volume usage helper table\n" +
+            "Error: Found 2 template/ISO usage records which are created after it is removed\n"
+
+            "Error: Found 1 snapshot usage records which are created after it is removed\n";
+
+    protected static final String EXPECTED_ERRORS_3 = "";
+
+
+    public UsageSanityCheckerIT(String cloudDbuFileName, String usageDbuFileName,
+            String expectedErrors) {
+        this.cloudDbuFileName = cloudDbuFileName;
+        this.usageDbuFileName = usageDbuFileName;
+        this.expectedErrors = expectedErrors;
+    }
+
+    @Parameters
+    public static Collection<Object[]> data() {
+        Object [][] data = new Object[][] {
+                {"cloud1.xml", "cloud_usage1.xml", EXPECTED_ERRORS_1},
+                {"cloud2.xml", "cloud_usage2.xml", EXPECTED_ERRORS_2},
+                {"cloud3.xml", "cloud_usage3.xml", EXPECTED_ERRORS_3}
+        };
+        return Arrays.asList(data);
+    }
+
+    protected Connection createConnection(String dbSchema) throws SQLException {
+        String cloudDbUrl = "jdbc:mysql://"+properties.getProperty("db."+dbSchema+".host")
+
+                ":" + properties.getProperty("db."+dbSchema+".port") + "/" +
+                properties.getProperty("db."+dbSchema+".name");
+        return DriverManager.getConnection(cloudDbUrl, properties.getProperty("db."+dbSchema+".username"),
+                properties.getProperty("db."+dbSchema+".password"));
+    }
+
+    @Before
+    public void setUp() throws Exception {
+        PropertiesUtil.loadFromFile(properties, PropertiesUtil.findConfigFile("db.properties"));
+
+        Class.forName("com.mysql.jdbc.Driver");
+        cloudConn = createConnection("cloud");
+        usageConn = createConnection("usage");
+
+        dbuCloudConn = new MySqlConnection(cloudConn, properties.getProperty("db.cloud.name"));
+        dbuUsageConn = new MySqlConnection(usageConn, properties.getProperty("db.usage.name"));
+        cloudDataSet = getCloudDataSet();
+        usageDataSet = getUsageDataSet();
+        DatabaseOperation.CLEAN_INSERT.execute(dbuCloudConn, cloudDataSet);
+        DatabaseOperation.CLEAN_INSERT.execute(dbuUsageConn, usageDataSet);
+    }
+
+    @After
+    public void tearDown() throws DataSetException, FileNotFoundException, DatabaseUnitException,
SQLException {
+        DatabaseOperation.DELETE_ALL.execute(dbuCloudConn, getCloudDataSet());
+        DatabaseOperation.DELETE_ALL.execute(dbuUsageConn, getUsageDataSet());
+    }
+
+    @Test
+    public void testRunSanityCheck() throws SQLException, ClassNotFoundException, FileNotFoundException,
DatabaseUnitException {
+        // Prepare
+        UsageSanityChecker checker = Mockito.spy(new UsageSanityChecker());
+        Mockito.doReturn(cloudConn).when(checker).getConnection();
+        Mockito.doNothing().when(checker).readLastCheckId();
+        Mockito.doNothing().when(checker).updateNewMaxId();
+        checker.lastId = 2;
+
+        // Execute
+        String actualErrors = checker.runSanityCheck();
+
+        // Assert
+        assertEquals("Expected errors not found", expectedErrors, actualErrors);
+    }
+
+    protected IDataSet getCloudDataSet() throws DataSetException, FileNotFoundException {
+        return new FlatXmlDataSetBuilder().build(PropertiesUtil.openStreamFromURL(cloudDbuFileName));
+    }
+
+    protected IDataSet getUsageDataSet() throws DataSetException, FileNotFoundException {
+        return new FlatXmlDataSetBuilder().build(PropertiesUtil.openStreamFromURL(usageDbuFileName));
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/55a6df45/usage/test/com/cloud/usage/UsageSanityCheckerTest.java
----------------------------------------------------------------------
diff --git a/usage/test/com/cloud/usage/UsageSanityCheckerTest.java b/usage/test/com/cloud/usage/UsageSanityCheckerTest.java
new file mode 100644
index 0000000..346a8ad
--- /dev/null
+++ b/usage/test/com/cloud/usage/UsageSanityCheckerTest.java
@@ -0,0 +1,52 @@
+package com.cloud.usage;
+
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import junit.framework.TestCase;
+
+public class UsageSanityCheckerTest extends TestCase {
+
+    @Test
+    public void testCheckItemCountByPstmt() throws SQLException {
+        // Prepare
+        // Mock dependencies to exclude from the test
+        String sqlTemplate1 = "SELECT * FROM mytable1";
+        String sqlTemplate2 = "SELECT * FROM mytable2";
+
+        Connection conn = Mockito.mock(Connection.class);
+        PreparedStatement pstmt = Mockito.mock(PreparedStatement.class);
+        ResultSet rs = Mockito.mock(ResultSet.class);
+
+        Mockito.when(conn.prepareStatement(sqlTemplate1)).thenReturn(pstmt);
+        Mockito.when(conn.prepareStatement(sqlTemplate2)).thenReturn(pstmt);
+        Mockito.when(pstmt.executeQuery()).thenReturn(rs, rs);
+
+        // First if: true -> 8
+        // Second loop: true -> 16
+        Mockito.when(rs.next()).thenReturn(true, true);
+        Mockito.when(rs.getInt(1)).thenReturn(8, 8, 16, 16);
+
+        // Prepare class under test
+        UsageSanityChecker checker = new UsageSanityChecker();
+        checker.conn = conn;
+        checker.reset();
+        checker.addCheckCase(sqlTemplate1, "item1");
+        checker.addCheckCase(sqlTemplate2, "item2");
+
+        // Execute
+        checker.checkItemCountByPstmt();
+
+        // Verify
+        Pattern pattern = Pattern.compile(".*8.*item1.*\n.*16.*item2.*");
+        Matcher matcher = pattern.matcher(checker.errors);
+        assertTrue("Didn't create complete errors. It should create 2 errors: 8 item1 and
16 item2", matcher.find());
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/55a6df45/usage/test/resources/cloud1.xml
----------------------------------------------------------------------
diff --git a/usage/test/resources/cloud1.xml b/usage/test/resources/cloud1.xml
new file mode 100644
index 0000000..e56ed07
--- /dev/null
+++ b/usage/test/resources/cloud1.xml
@@ -0,0 +1,15 @@
+<!-- cloud -->
+<dataset>
+<configuration name="usage.stats.job.aggregation.range" value="600"/>
+
+<vm_instance type="User" id="8" removed="0" />
+
+<volumes id="16" removed="0"/>
+<volumes id="17" removed="0"/>
+
+<template_zone_ref template_id="14" zone_id="1" removed="0" />
+<template_zone_ref template_id="15" zone_id="1" removed="0" />
+<template_zone_ref template_id="16" zone_id="1" removed="0" />
+
+<snapshots id="18" removed="0" />
+</dataset>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/55a6df45/usage/test/resources/cloud2.xml
----------------------------------------------------------------------
diff --git a/usage/test/resources/cloud2.xml b/usage/test/resources/cloud2.xml
new file mode 100644
index 0000000..e56ed07
--- /dev/null
+++ b/usage/test/resources/cloud2.xml
@@ -0,0 +1,15 @@
+<!-- cloud -->
+<dataset>
+<configuration name="usage.stats.job.aggregation.range" value="600"/>
+
+<vm_instance type="User" id="8" removed="0" />
+
+<volumes id="16" removed="0"/>
+<volumes id="17" removed="0"/>
+
+<template_zone_ref template_id="14" zone_id="1" removed="0" />
+<template_zone_ref template_id="15" zone_id="1" removed="0" />
+<template_zone_ref template_id="16" zone_id="1" removed="0" />
+
+<snapshots id="18" removed="0" />
+</dataset>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/55a6df45/usage/test/resources/cloud3.xml
----------------------------------------------------------------------
diff --git a/usage/test/resources/cloud3.xml b/usage/test/resources/cloud3.xml
new file mode 100644
index 0000000..6e7a745
--- /dev/null
+++ b/usage/test/resources/cloud3.xml
@@ -0,0 +1,4 @@
+<!-- cloud -->
+<dataset>
+<volumes id="17" removed="0"/>
+</dataset>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/55a6df45/usage/test/resources/cloud_usage1.xml
----------------------------------------------------------------------
diff --git a/usage/test/resources/cloud_usage1.xml b/usage/test/resources/cloud_usage1.xml
new file mode 100644
index 0000000..aaad17d
--- /dev/null
+++ b/usage/test/resources/cloud_usage1.xml
@@ -0,0 +1,17 @@
+<!-- cloud_usage -->
+<dataset>
+<cloud_usage usage_type="1" raw_usage="11" usage_id="8" id="8" start_date="1" zone_id="1"/>
+<cloud_usage usage_type="1" raw_usage="13" usage_id="6" id="6" start_date="1" zone_id="1"/>
+
+<cloud_usage usage_type="6" usage_id="16" id="7" start_date="1" zone_id="1"/>
+<cloud_usage usage_type="8" usage_id="14" id="7" start_date="1" zone_id="1" />
+
+<cloud_usage usage_type="9" usage_id="18" id="7" start_date="1" zone_id="1" />
+
+<usage_volume id="2" />
+<usage_volume id="4" />
+
+<usage_vm_instance usage_type="1" vm_instance_id="1" />
+<usage_vm_instance usage_type="2" vm_instance_id="2" />
+<usage_vm_instance usage_type="2" vm_instance_id="2" />
+</dataset>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/55a6df45/usage/test/resources/cloud_usage2.xml
----------------------------------------------------------------------
diff --git a/usage/test/resources/cloud_usage2.xml b/usage/test/resources/cloud_usage2.xml
new file mode 100644
index 0000000..d87d05a
--- /dev/null
+++ b/usage/test/resources/cloud_usage2.xml
@@ -0,0 +1,34 @@
+<!-- cloud_usage -->
+<dataset>
+<cloud_usage usage_type="1" raw_usage="11" usage_id="8" id="8" start_date="1" zone_id="1"/>
+<cloud_usage usage_type="1" raw_usage="12" usage_id="7" id="7" start_date="1" zone_id="1"/>
+<cloud_usage usage_type="1" raw_usage="13" usage_id="6" id="6" start_date="1" zone_id="1"/>
+
+<cloud_usage usage_type="6" usage_id="16" id="7" start_date="1" zone_id="1"/>
+<cloud_usage usage_type="6" usage_id="16" id="7" start_date="1" zone_id="1" />
+<cloud_usage usage_type="8" usage_id="14" id="7" start_date="1" zone_id="1" />
+<cloud_usage usage_type="8" usage_id="14" id="7" start_date="1" zone_id="1" />
+
+<cloud_usage usage_type="9" usage_id="18" id="7" start_date="1" zone_id="1" />
+
+<usage_volume id="2" />
+<usage_volume id="3" />
+<usage_volume id="4" />
+<usage_volume id="2" />
+<usage_volume id="3" />
+<usage_volume id="4" />
+
+<usage_vm_instance usage_type="1" vm_instance_id="1" />
+<usage_vm_instance usage_type="1" vm_instance_id="1" />
+<usage_vm_instance usage_type="1" vm_instance_id="2" />
+<usage_vm_instance usage_type="1" vm_instance_id="2" />
+<usage_vm_instance usage_type="1" vm_instance_id="3" />
+<usage_vm_instance usage_type="1" vm_instance_id="3" />
+<usage_vm_instance usage_type="1" vm_instance_id="3" />
+<usage_vm_instance usage_type="1" vm_instance_id="3" />
+
+<usage_vm_instance usage_type="2" vm_instance_id="1" />
+<usage_vm_instance usage_type="2" vm_instance_id="1" />
+<usage_vm_instance usage_type="2" vm_instance_id="2" />
+<usage_vm_instance usage_type="2" vm_instance_id="2" />
+</dataset>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/55a6df45/usage/test/resources/cloud_usage3.xml
----------------------------------------------------------------------
diff --git a/usage/test/resources/cloud_usage3.xml b/usage/test/resources/cloud_usage3.xml
new file mode 100644
index 0000000..332ddfc
--- /dev/null
+++ b/usage/test/resources/cloud_usage3.xml
@@ -0,0 +1,4 @@
+<!-- cloud_usage -->
+<dataset>
+<cloud_usage usage_type="1" raw_usage="11" usage_id="8" id="8" start_date="1" zone_id="1"/>
+</dataset>


Mime
View raw message